1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ops/maxpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class MaxPoolBuilderTest : public OpsTest {
28 public:
29 void SetUp() override;
30 void TearDown() override;
31
32 void SetPadMode(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetRoundMode(OH_NN_DataType dataType,
35 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
36 void SetGlobal(OH_NN_DataType dataType,
37 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
38 void SetParam();
39
40 public:
41 MaxPoolBuilder m_builder;
42 std::vector<uint32_t> m_inputs{0};
43 std::vector<uint32_t> m_outputs{1};
44 std::vector<uint32_t> m_params{2, 3, 4, 5, 6, 7};
45 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
46 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
47 std::vector<int32_t> m_kenelsize_dim{2};
48 std::vector<int32_t> m_stride_dim{2};
49 std::vector<int32_t> m_param_dim{};
50 };
51
SetUp()52 void MaxPoolBuilderTest::SetUp() {}
53
TearDown()54 void MaxPoolBuilderTest::TearDown() {}
55
SetRoundMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)56 void MaxPoolBuilderTest::SetRoundMode(OH_NN_DataType dataType,
57 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
58 {
59 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
60 int32_t* roundModeValue = new (std::nothrow) int32_t(0);
61 EXPECT_NE(nullptr, roundModeValue);
62 tensor->SetBuffer(roundModeValue, sizeof(int32_t));
63 m_allTensors.emplace_back(tensor);
64 }
65
SetGlobal(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)66 void MaxPoolBuilderTest::SetGlobal(OH_NN_DataType dataType,
67 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
68 {
69 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
70 bool* globalValue = new (std::nothrow) bool(false);
71 EXPECT_NE(nullptr, globalValue);
72 tensor->SetBuffer(globalValue, sizeof(bool));
73 m_allTensors.emplace_back(tensor);
74 }
75
SetPadMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)76 void MaxPoolBuilderTest::SetPadMode(OH_NN_DataType dataType,
77 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
78 {
79 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
80 int8_t* padModeValue = new (std::nothrow) int8_t(0);
81 EXPECT_NE(nullptr, padModeValue);
82 tensor->SetBuffer(padModeValue, sizeof(int8_t));
83 m_allTensors.emplace_back(tensor);
84 }
85
SetParam()86 void MaxPoolBuilderTest::SetParam()
87 {
88 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
89 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
90 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
91 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
92 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
93 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
94 }
95
96 /**
97 * @tc.name: maxpool_build_pad_mode_001
98 * @tc.desc: Verify the success of the build function
99 * @tc.type: FUNC
100 */
101 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_001, TestSize.Level1)
102 {
103 m_paramsIndex = m_params;
104 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
105 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
106 SetParam();
107 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
108 }
109
110 /**
111 * @tc.name: maxpool_build_pad_mode_002
112 * @tc.desc: Verify the forbidden of the build function
113 * @tc.type: FUNC
114 */
115 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_002, TestSize.Level1)
116 {
117 m_paramsIndex = m_params;
118 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
119 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
120 SetParam();
121 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
122 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
123 }
124
125 /**
126 * @tc.name: maxpool_build_pad_mode_003
127 * @tc.desc: Verify the missing input of the build function
128 * @tc.type: FUNC
129 */
130 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_003, TestSize.Level1)
131 {
132 m_inputs = {};
133 m_outputs = {0};
134 m_params = {1, 2, 3, 4, 5, 6};
135 m_paramsIndex = m_params;
136
137 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
138 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
139 SetParam();
140 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
141 }
142
143 /**
144 * @tc.name: maxpool_build_pad_mode_004
145 * @tc.desc: Verify the missing output of the build function
146 * @tc.type: FUNC
147 */
148 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_004, TestSize.Level1)
149 {
150 m_inputs = {0};
151 m_outputs = {};
152 m_params = {1, 2, 3, 4, 5, 6};
153 m_paramsIndex = m_params;
154
155 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
156 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
157 SetParam();
158 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
159 }
160
161 /**
162 * @tc.name: maxpool_build_pad_mode_005
163 * @tc.desc: Verify the inputIndex out of bounds of the build function
164 * @tc.type: FUNC
165 */
166 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_005, TestSize.Level1)
167 {
168 m_inputs = {8};
169 m_outputs = {1};
170 m_params = {2, 3, 4, 5, 6, 7};
171 m_paramsIndex = m_params;
172
173 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
174 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
175 SetParam();
176 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
177 }
178
179 /**
180 * @tc.name: maxpool_build_pad_mode_006
181 * @tc.desc: Verify the outputIndex out of bounds of the build function
182 * @tc.type: FUNC
183 */
184 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_006, TestSize.Level1)
185 {
186 m_inputs = {0};
187 m_outputs = {8};
188 m_params = {2, 3, 4, 5, 6, 7};
189 m_paramsIndex = m_params;
190
191 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
192 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
193 SetParam();
194 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
195 }
196
197 /**
198 * @tc.name: maxpool_build_pad_mode_007
199 * @tc.desc: Verify the invalid kernelSize of the build function
200 * @tc.type: FUNC
201 */
202 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_007, TestSize.Level1)
203 {
204 m_paramsIndex = m_params;
205 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
206 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
207
208 int32_t kernelsNum{2};
209 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
210 OH_NN_MAX_POOL_KERNEL_SIZE);
211 int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1};
212 EXPECT_NE(nullptr, kernelSizeValue);
213 tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum);
214 m_allTensors.emplace_back(tensor);
215
216 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
217 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
218 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
219 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
220 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
221 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
222 }
223
224 /**
225 * @tc.name: maxpool_build_pad_mode_008
226 * @tc.desc: Verify the invalid stride of the build function
227 * @tc.type: FUNC
228 */
229 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_008, TestSize.Level1)
230 {
231 m_paramsIndex = m_params;
232 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
233 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
234
235 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
236 int32_t strideNum{2};
237 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
238 int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1};
239 EXPECT_NE(nullptr, strideValue);
240
241 tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum);
242 m_allTensors.emplace_back(tensor);
243 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
244 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
245 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
246 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
247 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
248 }
249
250 /**
251 * @tc.name: maxpool_build_pad_mode_009
252 * @tc.desc: Verify the invalid padmode of the build function
253 * @tc.type: FUNC
254 */
255 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_009, TestSize.Level1)
256 {
257 m_paramsIndex = m_params;
258 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
259 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
260
261 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
262 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
263 int32_t *padValueTest = new (std::nothrow) int32_t(0);
264 EXPECT_NE(nullptr, padValueTest);
265
266 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD);
267 tensor->SetBuffer(padValueTest, sizeof(int32_t));
268 m_allTensors.emplace_back(tensor);
269 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
270 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
271 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
272 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
273 }
274
275
276 /**
277 * @tc.name: maxpool_build_pad_mode_010
278 * @tc.desc: Verify the invalid activation of the build function
279 * @tc.type: FUNC
280 */
281 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_010, TestSize.Level1)
282 {
283 m_paramsIndex = m_params;
284 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
285 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
286 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
287 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
288 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
289
290 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
291 OH_NN_MAX_POOL_ACTIVATION_TYPE);
292 int32_t* activationValue = new (std::nothrow) int32_t(0);
293 EXPECT_NE(nullptr, activationValue);
294
295 tensor->SetBuffer(activationValue, sizeof(int32_t));
296 m_allTensors.emplace_back(tensor);
297 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
298 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
299 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
300 }
301
302 /**
303 * @tc.name: maxpool_build_pad_mode_011
304 * @tc.desc: Verify the invalid roundMode of the build function
305 * @tc.type: FUNC
306 */
307 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_011, TestSize.Level1)
308 {
309 m_paramsIndex = m_params;
310 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
311 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
312
313 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
314 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
315 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
316 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
317 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr,
318 OH_NN_MAX_POOL_ROUND_MODE);
319 int64_t* roundModeValue = new (std::nothrow) int64_t(0);
320 EXPECT_NE(nullptr, roundModeValue);
321
322 tensor->SetBuffer(roundModeValue, sizeof(int64_t));
323 m_allTensors.emplace_back(tensor);
324 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_MAX_POOL_GLOBAL);
325 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
326 }
327
328 /**
329 * @tc.name: maxpool_build_pad_mode_012
330 * @tc.desc: Verify the invalid activation of the build function
331 * @tc.type: FUNC
332 */
333 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_012, TestSize.Level1)
334 {
335 m_paramsIndex = m_params;
336 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
337 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
338
339 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
340 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
341 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
342 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
343 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_MAX_POOL_ROUND_MODE);
344 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
345 OH_NN_MAX_POOL_GLOBAL);
346 int32_t* globalValue = new (std::nothrow) int32_t(0);
347 EXPECT_NE(nullptr, globalValue);
348
349 tensor->SetBuffer(globalValue, sizeof(int32_t));
350 m_allTensors.emplace_back(tensor);
351 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
352 }
353
354 /**
355 * @tc.name: maxpool_build_pad_mode_013
356 * @tc.desc: Verify the scalar length of the build function
357 * @tc.type: FUNC
358 */
359 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_013, TestSize.Level1)
360 {
361 m_param_dim = {2};
362 m_paramsIndex = m_params;
363 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
364 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
365
366 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
367 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
368 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
369 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
370 EXPECT_NE(nullptr, activationValue);
371
372 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
373 OH_NN_MAX_POOL_ACTIVATION_TYPE);
374 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
375 m_allTensors.emplace_back(tensor);
376 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
377 }
378
379 /**
380 * @tc.name: maxpool_build_pad_mode_014
381 * @tc.desc: Verify the param invalid to avgpool of the build function
382 * @tc.type: FUNC
383 */
384 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_014, TestSize.Level1)
385 {
386 m_paramsIndex = m_params;
387 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
388 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
389
390 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
391 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
392 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
393 int8_t* activationValue = new (std::nothrow) int8_t(0);
394 EXPECT_NE(nullptr, activationValue);
395
396 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
397 OH_NN_DIV_ACTIVATIONTYPE);
398 tensor->SetBuffer(activationValue, sizeof(int8_t));
399 m_allTensors.emplace_back(tensor);
400 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
401 }
402
403 /**
404 * @tc.name: maxpool_build_pad_mode_015
405 * @tc.desc: Verify the invalid padmode of the build function
406 * @tc.type: FUNC
407 */
408 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_015, TestSize.Level1)
409 {
410 m_paramsIndex = m_params;
411 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
412 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
413
414 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
415 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
416 int8_t *padValueTest = new (std::nothrow) int8_t(6);
417 EXPECT_NE(nullptr, padValueTest);
418
419 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
420 tensor->SetBuffer(padValueTest, sizeof(int8_t));
421 m_allTensors.emplace_back(tensor);
422 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_ACTIVATION_TYPE);
423 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
424 }
425
426 /**
427 * @tc.name: maxpool_build_pad_mode_016
428 * @tc.desc: Verify the invalid activation value of the build function
429 * @tc.type: FUNC
430 */
431 HWTEST_F(MaxPoolBuilderTest, maxpool_build_pad_mode_016, TestSize.Level1)
432 {
433 m_paramsIndex = m_params;
434 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
435 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
436
437 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_MAX_POOL_KERNEL_SIZE);
438 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_MAX_POOL_STRIDE);
439 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_MAX_POOL_PAD_MODE);
440
441 int8_t* activationValue = new (std::nothrow) int8_t(6);
442 EXPECT_NE(nullptr, activationValue);
443 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
444 OH_NN_MAX_POOL_ACTIVATION_TYPE);
445 tensor->SetBuffer(activationValue, sizeof(int8_t));
446 m_allTensors.emplace_back(tensor);
447
448 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
449 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
450 }
451
452 /**
453 * @tc.name: maxpool_getprimitive_pad_mode_001
454 * @tc.desc: Verify the behavior of the GetPrimitive function
455 * @tc.type: FUNC
456 */
457 HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_001, TestSize.Level1)
458 {
459 m_paramsIndex = m_params;
460 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
461 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
462
463 SetParam();
464 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
465 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
466 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
467 EXPECT_NE(expectPrimitive, primitive);
468
469 std::vector<int64_t> returnKernelSize = mindspore::lite::MindIR_MaxPoolFusion_GetKernelSize(primitive.get());
470 std::vector<int64_t> kernelSizeValueTest{1, 1};
471 EXPECT_EQ(kernelSizeValueTest, returnKernelSize);
472
473 std::vector<int64_t> returnStrides = mindspore::lite::MindIR_MaxPoolFusion_GetStrides(primitive.get());
474 std::vector<int64_t> strideValueTest{1, 1};
475 int returnPadMode = mindspore::lite::MindIR_MaxPoolFusion_GetPadMode(primitive.get());
476 EXPECT_EQ(1, returnPadMode);
477 int returnActivation = mindspore::lite::MindIR_MaxPoolFusion_GetActivationType(primitive.get());
478 EXPECT_EQ(0, returnActivation);
479
480 mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR;
481 auto expectRoundMode = mindspore::lite::MindIR_MaxPoolFusion_GetRoundMode(primitive.get());
482 EXPECT_EQ(roundModeValue, expectRoundMode);
483 bool globalValue = false;
484 bool expectGlobal = mindspore::lite::MindIR_MaxPoolFusion_GetGlobal(primitive.get());
485 EXPECT_EQ(globalValue, expectGlobal);
486 }
487
488 /**
489 * @tc.name: maxpool_getprimitive_pad_mode_002
490 * @tc.desc: Verify the behavior of the GetPrimitive function
491 * @tc.type: FUNC
492 */
493 HWTEST_F(MaxPoolBuilderTest, maxpool_getprimitive_pad_mode_002, TestSize.Level1)
494 {
495 m_paramsIndex = m_params;
496 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
497 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
498
499 SetParam();
500 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
501 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
502 EXPECT_EQ(expectPrimitive, primitive);
503 }
504 } // namespace UnitTest
505 } // namespace NeuralNetworkRuntime
506 } // namespace OHOS