1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ops/avgpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class AvgPoolBuilderTest : public OpsTest {
28 public:
29 void SetUp() override;
30 void TearDown() override;
31
32 void SetPadMode(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetRoundMode(OH_NN_DataType dataType,
35 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
36 void SetGlobal(OH_NN_DataType dataType,
37 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
38 void SetParams();
39
40 public:
41 AvgPoolBuilder m_builder;
42 std::vector<uint32_t> m_inputs{0};
43 std::vector<uint32_t> m_outputs{1};
44 std::vector<uint32_t> m_params{2, 3, 4, 5, 6, 7};
45 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
46 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
47 std::vector<int32_t> m_kenelsize_dim{2};
48 std::vector<int32_t> m_stride_dim{2};
49 std::vector<int32_t> m_param_dim{};
50 };
51
SetUp()52 void AvgPoolBuilderTest::SetUp() {}
53
TearDown()54 void AvgPoolBuilderTest::TearDown() {}
55
SetRoundMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)56 void AvgPoolBuilderTest::SetRoundMode(OH_NN_DataType dataType,
57 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
58 {
59 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
60 int32_t* roundModeValue = new (std::nothrow) int32_t(0);
61 EXPECT_NE(nullptr, roundModeValue);
62 tensor->SetBuffer(roundModeValue, sizeof(int32_t));
63 m_allTensors.emplace_back(tensor);
64 }
65
SetGlobal(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)66 void AvgPoolBuilderTest::SetGlobal(OH_NN_DataType dataType,
67 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
68 {
69 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
70 bool* globalValue = new (std::nothrow) bool(false);
71 EXPECT_NE(nullptr, globalValue);
72 tensor->SetBuffer(globalValue, sizeof(bool));
73 m_allTensors.emplace_back(tensor);
74 }
75
SetPadMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)76 void AvgPoolBuilderTest::SetPadMode(OH_NN_DataType dataType,
77 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
78 {
79 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
80 int8_t* padModeValue = new (std::nothrow) int8_t(0);
81 EXPECT_NE(nullptr, padModeValue);
82 tensor->SetBuffer(padModeValue, sizeof(int8_t));
83 m_allTensors.emplace_back(tensor);
84 }
85
SetParams()86 void AvgPoolBuilderTest::SetParams()
87 {
88 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
89 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
90 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
91 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
92 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
93 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
94 }
95
96 /**
97 * @tc.name: avgpool_build_pad_mode_001
98 * @tc.desc: Verify the success of the build function
99 * @tc.type: FUNC
100 */
101 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_001, TestSize.Level1)
102 {
103 m_paramsIndex = m_params;
104 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
105 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
106
107 SetParams();
108 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
109 }
110
111 /**
112 * @tc.name: avgpool_build_pad_mode_002
113 * @tc.desc: Verify the forbidden of the build function
114 * @tc.type: FUNC
115 */
116 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_002, TestSize.Level1)
117 {
118 m_paramsIndex = m_params;
119 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
120 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
121
122 SetParams();
123 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
124 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
125 }
126
127 /**
128 * @tc.name: avgpool_build_pad_mode_003
129 * @tc.desc: Verify the missing input of the build function
130 * @tc.type: FUNC
131 */
132 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_003, TestSize.Level1)
133 {
134 m_inputs = {};
135 m_outputs = {0};
136 m_params = {1, 2, 3, 4, 5, 6};
137 m_paramsIndex = m_params;
138
139 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
140 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
141
142 SetParams();
143 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
144 }
145
146 /**
147 * @tc.name: avgpool_build_pad_mode_004
148 * @tc.desc: Verify the missing output of the build function
149 * @tc.type: FUNC
150 */
151 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_004, TestSize.Level1)
152 {
153 m_inputs = {0};
154 m_outputs = {};
155 m_params = {1, 2, 3, 4, 5, 6};
156 m_paramsIndex = m_params;
157
158 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
159 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
160
161 SetParams();
162 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
163 }
164
165 /**
166 * @tc.name: avgpool_build_pad_mode_005
167 * @tc.desc: Verify the inputIndex out of bounds of the build function
168 * @tc.type: FUNC
169 */
170 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_005, TestSize.Level1)
171 {
172 m_inputs = {8};
173 m_outputs = {1};
174 m_params = {2, 3, 4, 5, 6, 7};
175 m_paramsIndex = m_params;
176
177 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
178 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
179
180 SetParams();
181 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
182 }
183
184 /**
185 * @tc.name: avgpool_build_pad_mode_006
186 * @tc.desc: Verify the outputIndex out of bounds of the build function
187 * @tc.type: FUNC
188 */
189 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_006, TestSize.Level1)
190 {
191 m_inputs = {0};
192 m_outputs = {8};
193 m_params = {2, 3, 4, 5, 6, 7};
194 m_paramsIndex = m_params;
195
196 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
197 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
198
199 SetParams();
200 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
201 }
202
203 /**
204 * @tc.name: avgpool_build_pad_mode_007
205 * @tc.desc: Verify the invalid kernelSize of the build function
206 * @tc.type: FUNC
207 */
208 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_007, TestSize.Level1)
209 {
210 m_paramsIndex = m_params;
211 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
212 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
213
214 int32_t kernelsNum{2};
215 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
216 OH_NN_AVG_POOL_KERNEL_SIZE);
217 int32_t* kernelSizeValue = new (std::nothrow) int32_t[kernelsNum]{1, 1};
218 EXPECT_NE(nullptr, kernelSizeValue);
219
220 tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * kernelsNum);
221 m_allTensors.emplace_back(tensor);
222 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
223 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
224 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
225 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
226 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
227 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
228 }
229
230 /**
231 * @tc.name: avgpool_build_pad_mode_008
232 * @tc.desc: Verify the invalid stride of the build function
233 * @tc.type: FUNC
234 */
235 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_008, TestSize.Level1)
236 {
237 m_paramsIndex = m_params;
238 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
239 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
240 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
241
242 int32_t strideNum{2};
243 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
244 int32_t* strideValue = new (std::nothrow) int32_t[strideNum]{1, 1};
245 EXPECT_NE(nullptr, strideValue);
246
247 tensor->SetBuffer(strideValue, sizeof(int32_t) * strideNum);
248 m_allTensors.emplace_back(tensor);
249
250 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
251 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
252 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
253 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
254 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
255 }
256
257 /**
258 * @tc.name: avgpool_build_pad_mode_009
259 * @tc.desc: Verify the invalid padmode of the build function
260 * @tc.type: FUNC
261 */
262 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_009, TestSize.Level1)
263 {
264 m_paramsIndex = m_params;
265 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
266 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
267
268 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
269 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
270
271 int32_t *padValueTest = new (std::nothrow) int32_t(0);
272 EXPECT_NE(nullptr, padValueTest);
273 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
274 tensor->SetBuffer(padValueTest, sizeof(int32_t));
275 m_allTensors.emplace_back(tensor);
276
277 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
278 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
279 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
280 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
281 }
282
283
284 /**
285 * @tc.name: avgpool_build_pad_mode_010
286 * @tc.desc: Verify the invalid activation type of the build function
287 * @tc.type: FUNC
288 */
289 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_010, TestSize.Level1)
290 {
291 m_paramsIndex = m_params;
292 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
293 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
294
295 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
296 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
297 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
298 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
299 OH_NN_AVG_POOL_ACTIVATION_TYPE);
300 int32_t* activationValue = new (std::nothrow) int32_t(0);
301 EXPECT_NE(nullptr, activationValue);
302
303 tensor->SetBuffer(activationValue, sizeof(int32_t));
304 m_allTensors.emplace_back(tensor);
305 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
306 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
307 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
308 }
309
310 /**
311 * @tc.name: avgpool_build_pad_mode_011
312 * @tc.desc: Verify the invalid roundMode of the build function
313 * @tc.type: FUNC
314 */
315 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_011, TestSize.Level1)
316 {
317 m_paramsIndex = m_params;
318 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
319 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
320
321 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
322 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
323 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
324 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
325 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr,
326 OH_NN_AVG_POOL_ROUND_MODE);
327 int64_t* roundModeValue = new (std::nothrow) int64_t(0);
328 EXPECT_NE(nullptr, roundModeValue);
329
330 tensor->SetBuffer(roundModeValue, sizeof(int64_t));
331 m_allTensors.emplace_back(tensor);
332 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
333 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
334 }
335
336 /**
337 * @tc.name: avgpool_build_pad_mode_012
338 * @tc.desc: Verify the invalid activation of the build function
339 * @tc.type: FUNC
340 */
341 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_012, TestSize.Level1)
342 {
343 m_paramsIndex = m_params;
344 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
345 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
346
347 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
348 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
349 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
350 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
351 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
352 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
353 OH_NN_AVG_POOL_GLOBAL);
354 int32_t* globalValue = new (std::nothrow) int32_t(0);
355 EXPECT_NE(nullptr, globalValue);
356
357 tensor->SetBuffer(globalValue, sizeof(int32_t));
358 m_allTensors.emplace_back(tensor);
359 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
360 }
361
362 /**
363 * @tc.name: avgpool_build_pad_mode_013
364 * @tc.desc: Verify the scalar length of the build function
365 * @tc.type: FUNC
366 */
367 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_013, TestSize.Level1)
368 {
369 m_param_dim = {2};
370 m_paramsIndex = m_params;
371 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
372 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
373
374 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
375 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
376 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
377 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
378 EXPECT_NE(nullptr, activationValue);
379
380 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
381 OH_NN_AVG_POOL_ACTIVATION_TYPE);
382 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
383 m_allTensors.emplace_back(tensor);
384 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
385 }
386
387 /**
388 * @tc.name: avgpool_build_pad_mode_014
389 * @tc.desc: Verify the param invalid to avgpool of the build function
390 * @tc.type: FUNC
391 */
392 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_014, TestSize.Level1)
393 {
394 m_paramsIndex = m_params;
395 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
396 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
397
398 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
399 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
400 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
401 int8_t* activationValue = new (std::nothrow) int8_t(0);
402 EXPECT_NE(nullptr, activationValue);
403
404 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
405 OH_NN_DIV_ACTIVATIONTYPE);
406 tensor->SetBuffer(activationValue, sizeof(int8_t));
407 m_allTensors.emplace_back(tensor);
408 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
409 }
410
411 /**
412 * @tc.name: avgpool_build_pad_mode_015
413 * @tc.desc: Verify the invalid padmode of the build function
414 * @tc.type: FUNC
415 */
416 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_015, TestSize.Level1)
417 {
418 m_paramsIndex = m_params;
419 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
420 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
421
422 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
423 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
424 int8_t *padValueTest = new (std::nothrow) int8_t(6);
425 EXPECT_NE(nullptr, padValueTest);
426
427 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
428 tensor->SetBuffer(padValueTest, sizeof(int8_t));
429 m_allTensors.emplace_back(tensor);
430 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
431 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
432 }
433
434 /**
435 * @tc.name: avgpool_build_pad_mode_016
436 * @tc.desc: Verify the invalid activation value of the build function
437 * @tc.type: FUNC
438 */
439 HWTEST_F(AvgPoolBuilderTest, avgpool_build_pad_mode_016, TestSize.Level1)
440 {
441 m_paramsIndex = m_params;
442 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
443 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
444
445 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
446 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
447 SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_PAD_MODE);
448
449 int8_t* activationValue = new (std::nothrow) int8_t(6);
450 EXPECT_NE(nullptr, activationValue);
451 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
452 OH_NN_AVG_POOL_ACTIVATION_TYPE);
453 tensor->SetBuffer(activationValue, sizeof(int8_t));
454 m_allTensors.emplace_back(tensor);
455
456 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
457 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
458 }
459
460 /**
461 * @tc.name: avgpool_getprimitive_pad_mode_001
462 * @tc.desc: Verify the behavior of the GetPrimitive function
463 * @tc.type: FUNC
464 */
465 HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_001, TestSize.Level1)
466 {
467 m_paramsIndex = m_params;
468 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
469 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
470
471 SetParams();
472 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
473 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
474 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
475 EXPECT_NE(expectPrimitive, primitive);
476
477 std::vector<int64_t> returnKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get());
478 std::vector<int64_t> kernelSizeValueTest{1, 1};
479 EXPECT_EQ(kernelSizeValueTest, returnKernelSize);
480
481 std::vector<int64_t> returnStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get());
482 std::vector<int64_t> strideValueTest{1, 1};
483 int returnPadMode = mindspore::lite::MindIR_AvgPoolFusion_GetPadMode(primitive.get());
484 EXPECT_EQ(1, returnPadMode);
485 int returnActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get());
486 EXPECT_EQ(0, returnActivation);
487
488 mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR;
489 auto expectRoundMode = mindspore::lite::MindIR_AvgPoolFusion_GetRoundMode(primitive.get());
490 EXPECT_EQ(roundModeValue, expectRoundMode);
491 bool globalValue = false;
492 bool expectGlobal = mindspore::lite::MindIR_AvgPoolFusion_GetGlobal(primitive.get());
493 EXPECT_EQ(globalValue, expectGlobal);
494 }
495
496 /**
497 * @tc.name: avgpool_getprimitive_pad_mode_002
498 * @tc.desc: Verify the behavior of the GetPrimitive function
499 * @tc.type: FUNC
500 */
501 HWTEST_F(AvgPoolBuilderTest, avgpool_getprimitive_pad_mode_002, TestSize.Level1)
502 {
503 m_paramsIndex = m_params;
504 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
505 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
506 SetParams();
507 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
508 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
509 EXPECT_EQ(expectPrimitive, primitive);
510 }
511 } // namespace UnitTest
512 } // namespace NeuralNetworkRuntime
513 } // namespace OHOS