1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ops/maxpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class MaxPoolPadBuilderTest : public OpsTest {
28 public:
29 void SetUp();
30 void TearDown();
31
32 void SetPad(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetRoundMode(OH_NN_DataType dataType,
35 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
36 void SetGlobal(OH_NN_DataType dataType,
37 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
38 void SetPadParam();
39
40 public:
41 MaxPoolBuilder m_builder;
42 std::vector<uint32_t> m_inputs{0};
43 std::vector<uint32_t> m_outputs{1};
44 std::vector<uint32_t> m_params{2, 3, 4, 5, 6, 7};
45 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
46 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
47 std::vector<int32_t> m_kenelsize_dim{2};
48 std::vector<int32_t> m_stride_dim{2};
49 std::vector<int32_t> m_pad_dim{4};
50 std::vector<int32_t> m_param_dim{};
51 };
52
SetUp()53 void MaxPoolPadBuilderTest::SetUp() {}
54
TearDown()55 void MaxPoolPadBuilderTest::TearDown() {}
56
SetRoundMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)57 void MaxPoolPadBuilderTest::SetRoundMode(OH_NN_DataType dataType,
58 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
59 {
60 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
61 int32_t* roundModeValue = new (std::nothrow) int32_t(0);
62 EXPECT_NE(nullptr, roundModeValue);
63 tensor->SetBuffer(roundModeValue, sizeof(int32_t));
64 m_allTensors.emplace_back(tensor);
65 }
66
SetGlobal(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)67 void MaxPoolPadBuilderTest::SetGlobal(OH_NN_DataType dataType,
68 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
69 {
70 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
71 bool* globalValue = new (std::nothrow) bool(false);
72 EXPECT_NE(nullptr, globalValue);
73 tensor->SetBuffer(globalValue, sizeof(bool));
74 m_allTensors.emplace_back(tensor);
75 }
76
SetPad(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)77 void MaxPoolPadBuilderTest::SetPad(OH_NN_DataType dataType,
78 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
79 {
80 int32_t padNum{4};
81 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
82 int64_t* padValue = new (std::nothrow) int64_t[padNum]{0, 0, 0, 0};
83 EXPECT_NE(nullptr, padValue);
84
85 tensor->SetBuffer(padValue, sizeof(int64_t) * padNum);
86 m_allTensors.emplace_back(tensor);
87 }
88
SetPadParam()89 void MaxPoolPadBuilderTest::SetPadParam()
90 {
91 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
92 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
93 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
94 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
95 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
96 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
97 }
98
99 /**
100 * @tc.name: maxpool_build_pad_001
101 * @tc.desc: Verify the success of the build function
102 * @tc.type: FUNC
103 */
104 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_001, TestSize.Level1)
105 {
106 m_paramsIndex = m_params;
107 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
108 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
109
110 SetPadParam();
111 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
112 }
113
114 /**
115 * @tc.name: maxpool_build_pad_002
116 * @tc.desc: Verify the forbidden of the build function
117 * @tc.type: FUNC
118 */
119 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_002, TestSize.Level1)
120 {
121 m_paramsIndex = m_params;
122 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
123 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
124
125 SetPadParam();
126 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
127 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
128 }
129
130 /**
131 * @tc.name: maxpool_build_pad_003
132 * @tc.desc: Verify the missing input of the build function
133 * @tc.type: FUNC
134 */
135 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_003, TestSize.Level1)
136 {
137 m_inputs = {};
138 m_outputs = {0};
139 m_params = {1, 2, 3, 4, 5, 6};
140 m_paramsIndex = m_params;
141 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
142 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
143
144 SetPadParam();
145 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
146 }
147
148 /**
149 * @tc.name: maxpool_build_pad_004
150 * @tc.desc: Verify the missing output of the build function
151 * @tc.type: FUNC
152 */
153 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_004, TestSize.Level1)
154 {
155 m_inputs = {0};
156 m_outputs = {};
157 m_params = {1, 2, 3, 4, 5, 6};
158 m_paramsIndex = m_params;
159 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
160 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
161
162 SetPadParam();
163 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
164 }
165
166 /**
167 * @tc.name: maxpool_build_pad_005
168 * @tc.desc: Verify the inputIndex out of bounds of the build function
169 * @tc.type: FUNC
170 */
171 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_005, TestSize.Level1)
172 {
173 m_inputs = {8};
174 m_outputs = {1};
175 m_params = {2, 3, 4, 5, 6, 7};
176 m_paramsIndex = m_params;
177 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
178 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
179
180 SetPadParam();
181 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
182 }
183
184 /**
185 * @tc.name: maxpool_build_pad_006
186 * @tc.desc: Verify the outputIndex out of bounds of the build function
187 * @tc.type: FUNC
188 */
189 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_006, TestSize.Level1)
190 {
191 m_inputs = {0};
192 m_outputs = {8};
193 m_params = {2, 3, 4, 5, 6, 7};
194 m_paramsIndex = m_params;
195 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
196 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
197
198 SetPadParam();
199 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
200 }
201
202 /**
203 * @tc.name: maxpool_build_pad_007
204 * @tc.desc: Verify the invalid kernelSize of the build function
205 * @tc.type: FUNC
206 */
207 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_007, TestSize.Level1)
208 {
209 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
210 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
211
212 int32_t kernelsNum{2};
213 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
214 OH_NN_MAX_POOL_KERNEL_SIZE);
215 int32_t* valueKernelSize = new (std::nothrow) int32_t[kernelsNum]{1, 1};
216 EXPECT_NE(nullptr, valueKernelSize);
217
218 tensor->SetBuffer(valueKernelSize, sizeof(int32_t) * kernelsNum);
219 m_allTensors.emplace_back(tensor);
220
221 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
222 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
223 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
224 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
225 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
226 m_paramsIndex = m_params;
227 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
228 }
229
230 /**
231 * @tc.name: maxpool_build_pad_008
232 * @tc.desc: Verify the invalid stride of the build function
233 * @tc.type: FUNC
234 */
235 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_008, TestSize.Level1)
236 {
237 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
238 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
239 m_paramsIndex = m_params;
240
241 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
242 int32_t strideNum{2};
243 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
244 int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1};
245 EXPECT_NE(nullptr, strideValue);
246
247 tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum);
248 m_allTensors.emplace_back(tensor);
249 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
250 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
251 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
252 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
253 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
254 }
255
256 /**
257 * @tc.name: maxpool_build_pad_009
258 * @tc.desc: Verify the invalid pad of the build function
259 * @tc.type: FUNC
260 */
261 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_009, TestSize.Level1)
262 {
263 m_paramsIndex = m_params;
264 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
265 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
266
267 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
268 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
269 int32_t padNum{4};
270 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
271 int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0};
272 EXPECT_NE(nullptr, padValue);
273
274 tensor->SetBuffer(padValue, sizeof(int32_t) * padNum);
275 m_allTensors.emplace_back(tensor);
276 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
277 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
278 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
279 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
280 }
281
282
283 /**
284 * @tc.name: maxpool_build_pad_010
285 * @tc.desc: Verify the invalid activation of the build function
286 * @tc.type: FUNC
287 */
288 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_010, TestSize.Level1)
289 {
290 m_paramsIndex = m_params;
291 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
292 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
293
294 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
295 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
296 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
297 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
298 OH_NN_MAX_POOL_ACTIVATION_TYPE);
299 int32_t* activationValue = new (std::nothrow) int32_t(0);
300 EXPECT_NE(nullptr, activationValue);
301
302 tensor->SetBuffer(activationValue, sizeof(int32_t));
303 m_allTensors.emplace_back(tensor);
304 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
305 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
306 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
307 }
308
309 /**
310 * @tc.name: maxpool_build_pad_011
311 * @tc.desc: Verify the invalid roundMode of the build function
312 * @tc.type: FUNC
313 */
314 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_011, TestSize.Level1)
315 {
316 m_paramsIndex = m_params;
317 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
318 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
319
320 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
321 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
322 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
323 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
324 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr,
325 OH_NN_MAX_POOL_ROUND_MODE);
326 int64_t* roundModeValue = new (std::nothrow) int64_t(0);
327 EXPECT_NE(nullptr, roundModeValue);
328
329 tensor->SetBuffer(roundModeValue, sizeof(int64_t));
330 m_allTensors.emplace_back(tensor);
331 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
332 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
333 }
334
335 /**
336 * @tc.name: maxpool_build_pad_012
337 * @tc.desc: Verify the invalid activation of the build function
338 * @tc.type: FUNC
339 */
340 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_012, TestSize.Level1)
341 {
342 m_paramsIndex = m_params;
343 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
344 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
345
346 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
347 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
348 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
349 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
350 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
351 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
352 OH_NN_MAX_POOL_GLOBAL);
353 int32_t* globalValue = new (std::nothrow) int32_t(0);
354 EXPECT_NE(nullptr, globalValue);
355
356 tensor->SetBuffer(globalValue, sizeof(int32_t));
357 m_allTensors.emplace_back(tensor);
358 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
359 }
360
361 /**
362 * @tc.name: maxpool_build_pad_013
363 * @tc.desc: Verify the activation scalar length of the build function
364 * @tc.type: FUNC
365 */
366 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_013, TestSize.Level1)
367 {
368 m_param_dim = {2};
369 m_paramsIndex = m_params;
370 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
371 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
372
373 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
374 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
375 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
376 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
377 EXPECT_NE(nullptr, activationValue);
378
379 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
380 OH_NN_MAX_POOL_ACTIVATION_TYPE);
381 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
382 m_allTensors.emplace_back(tensor);
383 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
384 }
385
386 /**
387 * @tc.name: maxpool_build_pad_014
388 * @tc.desc: Verify the maxpool without set kernelsize of the build function
389 * @tc.type: FUNC
390 */
391 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_014, TestSize.Level1)
392 {
393 m_paramsIndex = m_params;
394 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
395 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
396
397 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr,
398 OH_NN_MAX_POOL_KERNEL_SIZE);
399 m_allTensors.emplace_back(tensor);
400
401 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
402 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
403 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
404 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
405 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
406 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
407 }
408
409 /**
410 * @tc.name: maxpool_build_pad_015
411 * @tc.desc: Verify the maxpool without set stride of the build function
412 * @tc.type: FUNC
413 */
414 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_015, TestSize.Level1)
415 {
416 m_paramsIndex = m_params;
417 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
418 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
419
420 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
421 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
422 m_allTensors.emplace_back(tensor);
423
424 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
425 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
426 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
427 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
428 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
429 }
430
431 /**
432 * @tc.name: maxpool_build_pad_016
433 * @tc.desc: Verify the maxpool without set pad of the build function
434 * @tc.type: FUNC
435 */
436 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_016, TestSize.Level1)
437 {
438 m_paramsIndex = m_params;
439 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
440 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
441
442 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
443 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
444 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
445 m_allTensors.emplace_back(tensor);
446
447 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
448 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
449 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
450 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
451 }
452
453 /**
454 * @tc.name: maxpool_build_pad_017
455 * @tc.desc: Verify the maxpool without set activation of the build function
456 * @tc.type: FUNC
457 */
458 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_017, TestSize.Level1)
459 {
460 m_paramsIndex = m_params;
461 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
462 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
463
464 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
465 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
466 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
467 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
468 OH_NN_MAX_POOL_ACTIVATION_TYPE);
469 m_allTensors.emplace_back(tensor);
470
471 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
472 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
473 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
474 }
475
476 /**
477 * @tc.name: maxpool_build_pad_018
478 * @tc.desc: Verify the avgpool without set activation of the build function
479 * @tc.type: FUNC
480 */
481 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_018, TestSize.Level1)
482 {
483 m_paramsIndex = m_params;
484 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
485 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
486
487 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
488 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
489 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
490 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
491
492 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
493 OH_NN_MAX_POOL_ROUND_MODE);
494 m_allTensors.emplace_back(tensor);
495 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
496 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
497 }
498
499 /**
500 * @tc.name: maxpool_build_pad_019
501 * @tc.desc: Verify the avgpool without set activation of the build function
502 * @tc.type: FUNC
503 */
504 HWTEST_F(MaxPoolPadBuilderTest, maxpool_build_pad_019, TestSize.Level1)
505 {
506 m_paramsIndex = m_params;
507 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
508 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
509
510 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
511 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
512 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_MAX_POOL_PAD);
513 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
514 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
515
516 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
517 OH_NN_MAX_POOL_GLOBAL);
518 m_allTensors.emplace_back(tensor);
519 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
520 }
521
522 /**
523 * @tc.name: maxpool_getprimitive_pad_001
524 * @tc.desc: Verify the behavior of the GetPrimitive function
525 * @tc.type: FUNC
526 */
527 HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_001, TestSize.Level1)
528 {
529 m_paramsIndex = m_params;
530 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
531 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
532
533 SetPadParam();
534 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
535 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
536 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
537 EXPECT_NE(expectPrimitive, primitive);
538
539 std::vector<int64_t> expectKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get());
540 std::vector<int64_t> kernelSizeValueTest{1, 1};
541 EXPECT_EQ(kernelSizeValueTest, expectKernelSize);
542
543 std::vector<int64_t> expectStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get());
544 std::vector<int64_t> strideValueTest{1, 1};
545 std::vector<int64_t> expectPadValue = mindspore::lite::MindIR_MaxPoolFusion_GetPad(primitive.get());
546 std::vector<int64_t> padValueValueTest{0, 0, 0, 0};
547 EXPECT_EQ(padValueValueTest, expectPadValue);
548
549 int8_t activationValue = 0;
550 int expectActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get());
551 EXPECT_EQ(activationValue, expectActivation);
552 mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR;
553 auto expectRoundMode = mindspore::lite::MindIR_MaxPoolFusion_GetRoundMode(primitive.get());
554 EXPECT_EQ(roundModeValue, expectRoundMode);
555 bool globalValue = false;
556 bool expectGlobal = mindspore::lite::MindIR_MaxPoolFusion_GetGlobal(primitive.get());
557 EXPECT_EQ(globalValue, expectGlobal);
558 }
559
560 /**
561 * @tc.name: maxpool_getprimitive_pad_002
562 * @tc.desc: Verify the behavior of the GetPrimitive function
563 * @tc.type: FUNC
564 */
565 HWTEST_F(MaxPoolPadBuilderTest, maxpool_getprimitive_pad_002, TestSize.Level1)
566 {
567 m_paramsIndex = m_params;
568 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
569 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
570
571 SetPadParam();
572 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
573 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
574 EXPECT_EQ(expectPrimitive, primitive);
575 }
576 } // namespace UnitTest
577 } // namespace NeuralNetworkRuntime
578 } // namespace OHOS
579