1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ops/depthwise_conv2d_native_builder.h"
17 
18 #include "ops_test.h"
19 
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class DepthwiseConv2DNativePadModeBuilderTest : public OpsTest {
28 public:
29     void SetUp();
30     void TearDown();
31 
32     void SetDepthwiseConv2dInput();
33     void SetPadMode(OH_NN_DataType dataType,
34         const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
35     void SetParam();
36 
37 public:
38     DepthwiseConv2DNativeBuilder m_builder;
39     std::vector<uint32_t> m_inputs{0, 1, 2};
40     std::vector<uint32_t> m_outputs{3};
41     std::vector<uint32_t> m_params{4, 5, 6, 7};
42     std::vector<int32_t> m_output_dim{1, 4, 4, 2};
43     std::vector<int32_t> m_stride_dim{2};
44     std::vector<int32_t> m_dilation_dim{2};
45     std::vector<int32_t> m_param_dim{};
46 };
47 
SetUp()48 void DepthwiseConv2DNativePadModeBuilderTest::SetUp() {}
49 
TearDown()50 void DepthwiseConv2DNativePadModeBuilderTest::TearDown() {}
51 
SetPadMode(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)52 void DepthwiseConv2DNativePadModeBuilderTest::SetPadMode(OH_NN_DataType dataType,
53     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
54 {
55     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
56     int8_t* padModeValue = new (std::nothrow) int8_t(0);
57     EXPECT_NE(nullptr, padModeValue);
58     tensor->SetBuffer(padModeValue, sizeof(int8_t));
59     m_allTensors.emplace_back(tensor);
60 }
61 
SetParam()62 void DepthwiseConv2DNativePadModeBuilderTest::SetParam()
63 {
64     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
65     SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
66     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
67     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
68 }
69 
SetDepthwiseConv2dInput()70 void DepthwiseConv2DNativePadModeBuilderTest::SetDepthwiseConv2dInput()
71 {
72     int32_t weightNum = 8;
73     int32_t biasNum = 2;
74     std::vector<int32_t> m_input_dim{1, 3, 3, 2};
75     std::vector<int32_t> weightDim = {2, 2, 2, 1};
76     std::vector<int32_t> biasDim = {2};
77     std::shared_ptr<NNTensor> inputTensor;
78     inputTensor = TransToNNTensor(OH_NN_FLOAT32, m_input_dim, nullptr, OH_NN_TENSOR);
79     m_allTensors.emplace_back(inputTensor);
80 
81     inputTensor = TransToNNTensor(OH_NN_FLOAT32, weightDim, nullptr, OH_NN_TENSOR);
82     float* weightValue = new (std::nothrow) float[8]{1, 0, 0, 1, 0, 1, 1, 0};
83     EXPECT_NE(nullptr, weightValue);
84 
85     inputTensor->SetBuffer(weightValue, weightNum * sizeof(weightValue));
86     m_allTensors.emplace_back(inputTensor);
87 
88     inputTensor = TransToNNTensor(OH_NN_FLOAT32, biasDim, nullptr, OH_NN_TENSOR);
89     float* biasValue = new (std::nothrow) float[2]{0, 0};
90     EXPECT_NE(nullptr, biasValue);
91 
92     inputTensor->SetBuffer(biasValue, biasNum * sizeof(float));
93     m_allTensors.emplace_back(inputTensor);
94 }
95 
96 /**
97  * @tc.name: depthwiseconv2d_build_padmode_001
98  * @tc.desc: Verify the success of the build function
99  * @tc.type: FUNC
100  */
101 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_001, TestSize.Level1)
102 {
103     m_paramsIndex = m_params;
104     m_inputsIndex = m_inputs;
105 
106     SetDepthwiseConv2dInput();
107     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
108     SetParam();
109     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
110 }
111 
112 /**
113  * @tc.name: depthwiseconv2d_build_padmode_002
114  * @tc.desc: Verify the forbidden of the build function
115  * @tc.type: FUNC
116  */
117 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_002, TestSize.Level1)
118 {
119     m_paramsIndex = m_params;
120     m_inputsIndex = m_inputs;
121 
122     SetDepthwiseConv2dInput();
123     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
124     SetParam();
125     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
126     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
127 }
128 
129 /**
130  * @tc.name: depthwiseconv2d_build_padmode_003
131  * @tc.desc: Verify the missing input of the build function
132  * @tc.type: FUNC
133  */
134 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_003, TestSize.Level1)
135 {
136     m_inputs = {0};
137     m_outputs = {1};
138     m_params = {2, 3, 4, 5};
139     m_paramsIndex = m_params;
140     m_inputsIndex = m_inputs;
141 
142     SetDepthwiseConv2dInput();
143     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
144     SetParam();
145     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
146 }
147 
148 /**
149  * @tc.name: depthwiseconv2d_build_padmode_004
150  * @tc.desc: Verify the missing output of the build function
151  * @tc.type: FUNC
152  */
153 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_004, TestSize.Level1)
154 {
155     m_inputs = {0, 1, 2};
156     m_outputs = {};
157     m_params = {3, 4, 5, 6};
158     m_paramsIndex = m_params;
159     m_inputsIndex = m_inputs;
160 
161     SetDepthwiseConv2dInput();
162     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
163     SetParam();
164     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
165 }
166 
167 /**
168  * @tc.name: depthwiseconv2d_build_padmode_005
169  * @tc.desc: Verify the inputIndex out of bounds of the build function
170  * @tc.type: FUNC
171  */
172 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_005, TestSize.Level1)
173 {
174     m_inputs = {0, 1, 9};
175     m_outputs = {3};
176     m_params = {4, 5, 6, 7};
177     m_paramsIndex = m_params;
178     m_inputsIndex = m_inputs;
179 
180     SetDepthwiseConv2dInput();
181     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
182     SetParam();
183     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
184 }
185 
186 /**
187  * @tc.name: depthwiseconv2d_build_padmode_006
188  * @tc.desc: Verify the outputIndex out of bounds of the build function
189  * @tc.type: FUNC
190  */
191 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_006, TestSize.Level1)
192 {
193     m_inputs = {0, 1, 2};
194     m_outputs = {9};
195     m_params = {4, 5, 6, 7};
196     m_paramsIndex = m_params;
197     m_inputsIndex = m_inputs;
198 
199     SetDepthwiseConv2dInput();
200     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
201     SetParam();
202     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
203 }
204 
205 /**
206  * @tc.name: depthwiseconv2d_build_padmode_007
207  * @tc.desc: Verify the invalid stride  of the build function
208  * @tc.type: FUNC
209  */
210 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_007, TestSize.Level1)
211 {
212     m_paramsIndex = m_params;
213     m_inputsIndex = m_inputs;
214     SetDepthwiseConv2dInput();
215     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
216 
217     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_stride_dim, nullptr,
218         OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
219     int32_t* strideValue = new (std::nothrow) int32_t[2]{1, 1};
220     EXPECT_NE(nullptr, strideValue);
221 
222     tensor->SetBuffer(strideValue, 2 * sizeof(int32_t));
223     m_allTensors.emplace_back(tensor);
224 
225     SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
226     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
227     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
228     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
229 }
230 
231 /**
232  * @tc.name: depthwiseconv2d_build_padmode_008
233  * @tc.desc: Verify the invalid dilation of the build function
234  * @tc.type: FUNC
235  */
236 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_008, TestSize.Level1)
237 {
238     m_paramsIndex = m_params;
239     m_inputsIndex = m_inputs;
240 
241     SetDepthwiseConv2dInput();
242     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
243     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
244     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_dilation_dim, nullptr,
245         OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
246     int32_t* dilationValue = new (std::nothrow) int32_t[2]{1, 1};
247     EXPECT_NE(nullptr, dilationValue);
248 
249     tensor->SetBuffer(dilationValue, 2 * sizeof(int32_t));
250     m_allTensors.emplace_back(tensor);
251     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
252     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
253     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
254 }
255 
256 /**
257  * @tc.name: depthwiseconv2d_build_padmode_009
258  * @tc.desc: Verify the invalid pad of the build function
259  * @tc.type: FUNC
260  */
261 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_009, TestSize.Level1)
262 {
263     m_paramsIndex = m_params;
264     m_inputsIndex = m_inputs;
265 
266     SetDepthwiseConv2dInput();
267     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
268     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
269     SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
270 
271     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
272         OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
273     int32_t* padModeValue = new (std::nothrow) int32_t(0);
274     EXPECT_NE(nullptr, padModeValue);
275     tensor->SetBuffer(padModeValue, sizeof(int32_t));
276     m_allTensors.emplace_back(tensor);
277     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
278     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
279 }
280 
281 /**
282  * @tc.name: depthwiseconv2d_build_padmode_010
283  * @tc.desc: Verify the invalid activation of the build function
284  * @tc.type: FUNC
285  */
286 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_010, TestSize.Level1)
287 {
288     m_paramsIndex = m_params;
289     m_inputsIndex = m_inputs;
290 
291     SetDepthwiseConv2dInput();
292     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
293     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
294     SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
295     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
296 
297     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr,
298         OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
299     int32_t* activationValue = new (std::nothrow) int32_t(0);
300     EXPECT_NE(nullptr, activationValue);
301     tensor->SetBuffer(activationValue, sizeof(int32_t));
302     m_allTensors.emplace_back(tensor);
303     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
304 }
305 
306 /**
307  * @tc.name: depthwiseconv2d_build_padmode_011
308  * @tc.desc: Verify the scalar activation of the build function
309  * @tc.type: FUNC
310  */
311 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_011, TestSize.Level1)
312 {
313     std::vector<int32_t> activationDim = {2};
314     m_paramsIndex = m_params;
315     m_inputsIndex = m_inputs;
316 
317     SetDepthwiseConv2dInput();
318     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
319     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
320     SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
321     SetPadMode(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
322 
323     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, activationDim, nullptr,
324         OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
325     int8_t* activationValue = new (std::nothrow) int8_t[2]{0, 0};
326     EXPECT_NE(nullptr, activationValue);
327 
328     tensor->SetBuffer(activationValue, 2 * sizeof(int8_t));
329     m_allTensors.emplace_back(tensor);
330     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
331 }
332 
333 /**
334  * @tc.name: depthwiseconv2d_build_padmode_012
335  * @tc.desc: Verify the invalid pad of the build function
336  * @tc.type: FUNC
337  */
338 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_build_padmode_012, TestSize.Level1)
339 {
340     m_paramsIndex = m_params;
341     m_inputsIndex = m_inputs;
342 
343     SetDepthwiseConv2dInput();
344     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
345     SetStride(OH_NN_INT64, m_stride_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES);
346     SetDilation(OH_NN_INT64, m_dilation_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION);
347 
348     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
349         OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE);
350     int8_t* padModeValue = new (std::nothrow) int8_t(10);
351     EXPECT_NE(nullptr, padModeValue);
352 
353     tensor->SetBuffer(padModeValue, sizeof(int8_t));
354     m_allTensors.emplace_back(tensor);
355     SetActivation(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE);
356     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
357 }
358 
359 /**
360  * @tc.name: depthwiseconv2d_getprimitive_padmode_001
361  * @tc.desc: Verify the success of the GetPrimitive function
362  * @tc.type: FUNC
363  */
364 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_getprimitive_padmode_001, TestSize.Level1)
365 {
366     m_paramsIndex = m_params;
367     m_inputsIndex = m_inputs;
368 
369     SetDepthwiseConv2dInput();
370     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
371     SetParam();
372     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
373 
374     LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
375     LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
376     EXPECT_NE(expectPrimitive, primitive);
377 
378     std::vector<int64_t> returnStrides = mindspore::lite::MindIR_Conv2DFusion_GetStride(primitive.get());
379     std::vector<int64_t> strideValueTest{1, 1};
380     std::vector<int64_t> returnDliation = mindspore::lite::MindIR_Conv2DFusion_GetDilation(primitive.get());
381     std::vector<int64_t> dilationValueTest{1, 1};
382     EXPECT_EQ(dilationValueTest, returnDliation);
383 
384     int returnpadMode = mindspore::lite::MindIR_Conv2DFusion_GetPadMode(primitive.get());
385     EXPECT_EQ(1, returnpadMode);
386     int returnActivation = mindspore::lite::MindIR_Conv2DFusion_GetActivationType(primitive.get());
387     EXPECT_EQ(0, returnActivation);
388 }
389 
390 /**
391  * @tc.name: depthwiseconv2d_getprimitive_padmode_002
392  * @tc.desc: Verify the nullptr return of the GetPrimitive function
393  * @tc.type: FUNC
394  */
395 HWTEST_F(DepthwiseConv2DNativePadModeBuilderTest, depthwiseconv2d_getprimitive_padmode_002, TestSize.Level1)
396 {
397     m_paramsIndex = m_params;
398     m_inputsIndex = m_inputs;
399 
400     SetDepthwiseConv2dInput();
401     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
402     SetParam();
403     LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
404     LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
405     EXPECT_EQ(expectPrimitive, primitive);
406 }
407 } // namespace UnitTest
408 } // namespace NeuralNetworkRuntime
409 } // namespace OHOS
410