1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ops/avgpool_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class AvgPoolPadBuilderTest : public OpsTest {
28 public:
29 void SetUp() override;
30 void TearDown() override;
31
32 void SetPad(OH_NN_DataType dataType,
33 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 void SetRoundMode(OH_NN_DataType dataType,
35 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
36 void SetGlobal(OH_NN_DataType dataType,
37 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
38 void SetPadParams();
39
40 public:
41 AvgPoolBuilder m_builder;
42 std::vector<int32_t> m_input_dim{1, 3, 3, 1};
43 std::vector<int32_t> m_output_dim{1, 2, 2, 1};
44 std::vector<int32_t> m_kenelsize_dim{2};
45 std::vector<int32_t> m_stride_dim{2};
46 std::vector<int32_t> m_pad_dim{4};
47 std::vector<int32_t> m_param_dim{};
48 std::vector<uint32_t> m_inputs{0};
49 std::vector<uint32_t> m_outputs{1};
50 std::vector<uint32_t> m_params{2, 3, 4, 5, 6, 7};
51 };
52
SetUp()53 void AvgPoolPadBuilderTest::SetUp() {}
54
TearDown()55 void AvgPoolPadBuilderTest::TearDown() {}
56
SetRoundMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)57 void AvgPoolPadBuilderTest::SetRoundMode(OH_NN_DataType dataType,
58 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
59 {
60 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
61 int32_t* roundModeValue = new (std::nothrow) int32_t(0);
62 EXPECT_NE(nullptr, roundModeValue);
63 tensor->SetBuffer(roundModeValue, sizeof(int32_t));
64 m_allTensors.emplace_back(tensor);
65 }
66
SetGlobal(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)67 void AvgPoolPadBuilderTest::SetGlobal(OH_NN_DataType dataType,
68 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
69 {
70 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
71 bool* globalValue = new (std::nothrow) bool(false);
72 EXPECT_NE(nullptr, globalValue);
73 tensor->SetBuffer(globalValue, sizeof(bool));
74 m_allTensors.emplace_back(tensor);
75 }
76
SetPad(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)77 void AvgPoolPadBuilderTest::SetPad(OH_NN_DataType dataType,
78 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
79 {
80 int32_t padNum{4};
81 std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
82 int64_t* padValue = new (std::nothrow) int64_t[padNum] {0, 0, 0, 0};
83 EXPECT_NE(nullptr, padValue);
84 tensor->SetBuffer(padValue, sizeof(int64_t) * padNum);
85 m_allTensors.emplace_back(tensor);
86 }
87
SetPadParams()88 void AvgPoolPadBuilderTest::SetPadParams()
89 {
90 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
91 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
92 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
93 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
94 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
95 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
96 }
97
98 /**
99 * @tc.name: avgpool_build_pad_001
100 * @tc.desc: Verify the success of the build function
101 * @tc.type: FUNC
102 */
103 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_001, TestSize.Level1)
104 {
105 m_paramsIndex = m_params;
106 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
107 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
108 SetPadParams();
109 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
110 }
111
112 /**
113 * @tc.name: avgpool_build_pad_002
114 * @tc.desc: Verify the forbidden of the build function
115 * @tc.type: FUNC
116 */
117 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_002, TestSize.Level1)
118 {
119 m_paramsIndex = m_params;
120 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
121 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
122 SetPadParams();
123 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
124 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
125 }
126
127 /**
128 * @tc.name: avgpool_build_pad_003
129 * @tc.desc: Verify the missing input of the build function
130 * @tc.type: FUNC
131 */
132 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_003, TestSize.Level1)
133 {
134 m_inputs = {};
135 m_outputs = {0};
136 m_params = {1, 2, 3, 4, 5, 6};
137 m_paramsIndex = m_params;
138
139 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
140 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
141
142 SetPadParams();
143 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
144 }
145
146 /**
147 * @tc.name: avgpool_build_pad_004
148 * @tc.desc: Verify the missing output of the build function
149 * @tc.type: FUNC
150 */
151 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_004, TestSize.Level1)
152 {
153 m_inputs = {0};
154 m_outputs = {};
155 m_params = {1, 2, 3, 4, 5, 6};
156 m_paramsIndex = m_params;
157
158 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
159 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
160
161 SetPadParams();
162 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
163 }
164
165 /**
166 * @tc.name: avgpool_build_pad_005
167 * @tc.desc: Verify the inputIndex out of bounds of the build function
168 * @tc.type: FUNC
169 */
170 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_005, TestSize.Level1)
171 {
172 m_inputs = {8};
173 m_outputs = {1};
174 m_params = {2, 3, 4, 5, 6, 7};
175 m_paramsIndex = m_params;
176
177 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
178 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
179
180 SetPadParams();
181 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
182 }
183
184 /**
185 * @tc.name: avgpool_build_pad_006
186 * @tc.desc: Verify the outputIndex out of bounds of the build function
187 * @tc.type: FUNC
188 */
189 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_006, TestSize.Level1)
190 {
191 m_inputs = {0};
192 m_outputs = {8};
193 m_params = {2, 3, 4, 5, 6, 7};
194 m_paramsIndex = m_params;
195
196 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
197 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
198
199 SetPadParams();
200 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
201 }
202
203 /**
204 * @tc.name: avgpool_build_pad_007
205 * @tc.desc: Verify the invalid kernelSize of the build function
206 * @tc.type: FUNC
207 */
208 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_007, TestSize.Level1)
209 {
210 m_paramsIndex = m_params;
211 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
212 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
213
214 int32_t numKernels{2};
215 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_kenelsize_dim, nullptr,
216 OH_NN_AVG_POOL_KERNEL_SIZE);
217 int32_t* kernelSizeValue = new (std::nothrow) int32_t[numKernels]{1, 1};
218 EXPECT_NE(nullptr, kernelSizeValue);
219 tensor->SetBuffer(kernelSizeValue, sizeof(int32_t) * numKernels);
220 m_allTensors.emplace_back(tensor);
221
222 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
223 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
224 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
225 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
226 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
227 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
228 }
229
230 /**
231 * @tc.name: avgpool_build_pad_008
232 * @tc.desc: Verify the invalid stride of the build function
233 * @tc.type: FUNC
234 */
235 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_008, TestSize.Level1)
236 {
237 m_paramsIndex = m_params;
238 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
239 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
240 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
241
242 int32_t numStride{2};
243 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
244 int32_t* strideValue = new (std::nothrow) int32_t[numStride]{1, 1};
245 EXPECT_NE(nullptr, strideValue);
246 tensor->SetBuffer(strideValue, sizeof(int32_t) * numStride);
247 m_allTensors.emplace_back(tensor);
248
249 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
250 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
251 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
252 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
253 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
254 }
255
256 /**
257 * @tc.name: avgpool_build_pad_009
258 * @tc.desc: Verify the invalid pad of the build function
259 * @tc.type: FUNC
260 */
261 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_009, TestSize.Level1)
262 {
263 m_paramsIndex = m_params;
264 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
265 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
266
267 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
268 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
269 int32_t padNum{4};
270 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
271 int32_t* padValue = new (std::nothrow) int32_t[padNum]{0, 0, 0, 0};
272 EXPECT_NE(nullptr, padValue);
273
274 tensor->SetBuffer(padValue, sizeof(int32_t) * padNum);
275 m_allTensors.emplace_back(tensor);
276 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
277 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
278 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
279 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
280 }
281
282 /**
283 * @tc.name: avgpool_build_pad_010
284 * @tc.desc: Verify the invalid activation of the build function
285 * @tc.type: FUNC
286 */
287 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_010, TestSize.Level1)
288 {
289 m_paramsIndex = m_params;
290 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
291 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
292
293 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
294 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
295 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
296 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
297 OH_NN_AVG_POOL_ACTIVATION_TYPE);
298 int32_t* activationValue = new (std::nothrow) int32_t(0);
299 EXPECT_NE(nullptr, activationValue);
300
301 tensor->SetBuffer(activationValue, sizeof(int32_t));
302 m_allTensors.emplace_back(tensor);
303 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
304 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
305 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
306 }
307
308 /**
309 * @tc.name: avgpool_build_pad_011
310 * @tc.desc: Verify the invalid roundMode of the build function
311 * @tc.type: FUNC
312 */
313 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_011, TestSize.Level1)
314 {
315 m_paramsIndex = m_params;
316 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
317 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
318
319 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
320 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
321 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
322 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
323 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr,
324 OH_NN_AVG_POOL_ROUND_MODE);
325 int64_t* roundModeValue = new (std::nothrow) int64_t(0);
326 EXPECT_NE(nullptr, roundModeValue);
327
328 tensor->SetBuffer(roundModeValue, sizeof(int64_t));
329 m_allTensors.emplace_back(tensor);
330 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
331 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
332 }
333
334 /**
335 * @tc.name: avgpool_build_pad_012
336 * @tc.desc: Verify the invalid activation of the build function
337 * @tc.type: FUNC
338 */
339 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_012, TestSize.Level1)
340 {
341 m_paramsIndex = m_params;
342 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
343 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
344
345 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
346 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
347 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
348 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
349 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
350 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
351 OH_NN_AVG_POOL_GLOBAL);
352 int32_t* globalValue = new (std::nothrow) int32_t(0);
353 EXPECT_NE(nullptr, globalValue);
354
355 tensor->SetBuffer(globalValue, sizeof(int32_t));
356 m_allTensors.emplace_back(tensor);
357 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
358 }
359
360 /**
361 * @tc.name: avgpool_build_pad_013
362 * @tc.desc: Verify the activation scalar length of the build function
363 * @tc.type: FUNC
364 */
365 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_013, TestSize.Level1)
366 {
367 m_param_dim = {2};
368 m_paramsIndex = m_params;
369 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
370 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
371
372 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
373 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
374 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
375 int8_t* activationValue = new (std::nothrow) int8_t[2]{1, 2};
376 EXPECT_NE(nullptr, activationValue);
377
378 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
379 OH_NN_AVG_POOL_ACTIVATION_TYPE);
380 tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
381 m_allTensors.emplace_back(tensor);
382 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
383 }
384
385 /**
386 * @tc.name: avgpool_build_pad_014
387 * @tc.desc: Verify the avgpool without set kernelsize of the build function
388 * @tc.type: FUNC
389 */
390 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_014, TestSize.Level1)
391 {
392 m_paramsIndex = m_params;
393 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
394 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
395
396 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_kenelsize_dim, nullptr,
397 OH_NN_AVG_POOL_KERNEL_SIZE);
398 m_allTensors.emplace_back(tensor);
399
400 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
401 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
402 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
403 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
404 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
405 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
406 }
407
408 /**
409 * @tc.name: avgpool_build_pad_015
410 * @tc.desc: Verify the avgpool without set stride of the build function
411 * @tc.type: FUNC
412 */
413 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_015, TestSize.Level1)
414 {
415 m_paramsIndex = m_params;
416 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
417 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
418
419 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
420 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
421 m_allTensors.emplace_back(tensor);
422
423 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
424 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
425 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
426 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
427 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
428 }
429
430 /**
431 * @tc.name: avgpool_build_pad_016
432 * @tc.desc: Verify the avgpool without set pad of the build function
433 * @tc.type: FUNC
434 */
435 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_016, TestSize.Level1)
436 {
437 m_paramsIndex = m_params;
438 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
439 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
440
441 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
442 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
443 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
444 m_allTensors.emplace_back(tensor);
445
446 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
447 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
448 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
449 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
450 }
451
452 /**
453 * @tc.name: avgpool_build_pad_017
454 * @tc.desc: Verify the avgpool without set activation of the build function
455 * @tc.type: FUNC
456 */
457 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_017, TestSize.Level1)
458 {
459 m_paramsIndex = m_params;
460 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
461 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
462
463 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
464 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
465 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
466
467 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
468 OH_NN_AVG_POOL_ACTIVATION_TYPE);
469 m_allTensors.emplace_back(tensor);
470 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
471 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
472 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
473 }
474
475 /**
476 * @tc.name: avgpool_build_pad_018
477 * @tc.desc: Verify the avgpool without set activation of the build function
478 * @tc.type: FUNC
479 */
480 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_018, TestSize.Level1)
481 {
482 m_paramsIndex = m_params;
483 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
484 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
485
486 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
487 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
488 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
489 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
490
491 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT64, m_param_dim, nullptr,
492 OH_NN_AVG_POOL_ROUND_MODE);
493 m_allTensors.emplace_back(tensor);
494 SetGlobal(OH_NN_BOOL, m_param_dim, nullptr, OH_NN_AVG_POOL_GLOBAL);
495 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
496 }
497
498 /**
499 * @tc.name: avgpool_build_pad_019
500 * @tc.desc: Verify the avgpool without set activation of the build function
501 * @tc.type: FUNC
502 */
503 HWTEST_F(AvgPoolPadBuilderTest, avgpool_build_pad_019, TestSize.Level1)
504 {
505 m_paramsIndex = m_params;
506 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
507 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
508
509 SetKernelSize(OH_NN_INT64, m_kenelsize_dim, nullptr, OH_NN_AVG_POOL_KERNEL_SIZE);
510 SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_AVG_POOL_STRIDE);
511 SetPad(OH_NN_INT64, m_pad_dim, nullptr, OH_NN_AVG_POOL_PAD);
512 SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_AVG_POOL_ACTIVATION_TYPE);
513 SetRoundMode(OH_NN_INT32, m_param_dim, nullptr, OH_NN_AVG_POOL_ROUND_MODE);
514
515 std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
516 OH_NN_AVG_POOL_GLOBAL);
517 m_allTensors.emplace_back(tensor);
518 EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
519 }
520
521 /**
522 * @tc.name: avgpool_getprimitive_pad_001
523 * @tc.desc: Verify the behavior of the GetPrimitive function
524 * @tc.type: FUNC
525 */
526 HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_001, TestSize.Level1)
527 {
528 m_paramsIndex = m_params;
529 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
530 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
531
532 SetPadParams();
533 EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
534 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
535 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
536 EXPECT_NE(expectPrimitive, primitive);
537
538 std::vector<int64_t> expetctKernelSize = mindspore::lite::MindIR_AvgPoolFusion_GetKernelSize(primitive.get());
539 std::vector<int64_t> kernelSizeValueTest{1, 1};
540 EXPECT_EQ(kernelSizeValueTest, expetctKernelSize);
541 std::vector<int64_t> expetctStrides = mindspore::lite::MindIR_AvgPoolFusion_GetStrides(primitive.get());
542 std::vector<int64_t> strideValueTest{1, 1};
543 std::vector<int64_t> expetctPadValue = mindspore::lite::MindIR_AvgPoolFusion_GetPad(primitive.get());
544 std::vector<int64_t> padValueValueTest{0, 0, 0, 0};
545 EXPECT_EQ(padValueValueTest, expetctPadValue);
546
547 int8_t activationValue = 0;
548 int expectActivation = mindspore::lite::MindIR_AvgPoolFusion_GetActivationType(primitive.get());
549 EXPECT_EQ(activationValue, expectActivation);
550 mindspore::lite::RoundMode roundModeValue = mindspore::lite::ROUND_MODE_FLOOR;
551 auto expectRoundMode = mindspore::lite::MindIR_AvgPoolFusion_GetRoundMode(primitive.get());
552 EXPECT_EQ(roundModeValue, expectRoundMode);
553 bool globalValue = false;
554 bool expectGlobal = mindspore::lite::MindIR_AvgPoolFusion_GetGlobal(primitive.get());
555 EXPECT_EQ(globalValue, expectGlobal);
556 }
557
558 /**
559 * @tc.name: avgpool_getprimitive_pad_002
560 * @tc.desc: Verify the behavior of the GetPrimitive function
561 * @tc.type: FUNC
562 */
563 HWTEST_F(AvgPoolPadBuilderTest, avgpool_getprimitive_pad_002, TestSize.Level1)
564 {
565 m_paramsIndex = m_params;
566 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
567 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
568
569 SetPadParams();
570 LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
571 LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
572 EXPECT_EQ(expectPrimitive, primitive);
573 }
574 } // namespace UnitTest
575 } // namespace NeuralNetworkRuntime
576 } // namespace OHOS
577