1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ops/div_builder.h"
17 
18 #include "ops_test.h"
19 
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class DivFusionTest : public OpsTest {
28 public:
29     void SetUp() override;
30     void TearDown() override;
31 
32     void SaveParamsTensor(const std::vector<uint32_t>& m_params, OH_NN_DataType dataType,
33         const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
34 
35 public:
36     DivBuilder m_builder;
37     std::vector<uint32_t> m_inputs{0, 1};
38     std::vector<uint32_t> m_outputs{2};
39     std::vector<uint32_t> m_params{3};
40     std::vector<int32_t> m_input_dim{3, 3};
41     std::vector<int32_t> m_output_dim{3, 3};
42     std::vector<int32_t> m_param_dim{};
43 };
44 
SetUp()45 void DivFusionTest::SetUp() {}
46 
TearDown()47 void DivFusionTest::TearDown() {}
48 
SaveParamsTensor(const std::vector<uint32_t> & m_params,OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)49 void DivFusionTest::SaveParamsTensor(const std::vector<uint32_t>& m_params, OH_NN_DataType dataType,
50     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
51 {
52     m_paramsIndex = m_params;
53     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
54     int8_t* activationValue = new (std::nothrow) int8_t(0);
55     EXPECT_NE(nullptr, activationValue);
56     tensor->SetBuffer(activationValue, sizeof(int8_t));
57     m_allTensors.emplace_back(tensor);
58 }
59 
60 /**
61  * @tc.name: div_build_001
62  * @tc.desc: Verify the success of the build function
63  * @tc.type: FUNC
64  */
65 HWTEST_F(DivFusionTest, div_build_001, TestSize.Level1)
66 {
67     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
68     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
69     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
70     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
71 }
72 
73 /**
74  * @tc.name: div_build_002
75  * @tc.desc: Verify the forbidden of the build function
76  * @tc.type: FUNC
77  */
78 HWTEST_F(DivFusionTest, div_build_002, TestSize.Level1)
79 {
80     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
81     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
82     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
83     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
84     EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
85 }
86 
87 /**
88  * @tc.name: div_build_003
89  * @tc.desc: Verify the missing input of the build function
90  * @tc.type: FUNC
91  */
92 HWTEST_F(DivFusionTest, div_build_003, TestSize.Level1)
93 {
94     m_inputs = {0};
95     m_outputs = {1};
96     m_params = {2};
97 
98     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
99     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
100     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
101     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
102 }
103 
104 /**
105  * @tc.name: div_build_004
106  * @tc.desc: Verify the missing output of the build function
107  * @tc.type: FUNC
108  */
109 HWTEST_F(DivFusionTest, div_build_004, TestSize.Level1)
110 {
111     m_inputs = {0, 1};
112     m_outputs = {};
113     m_params = {2};
114 
115     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
116     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
117     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
118     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
119 }
120 
121 /**
122  * @tc.name: div_build_005
123  * @tc.desc: Verify the inputIndex out of bounds of the build function
124  * @tc.type: FUNC
125  */
126 HWTEST_F(DivFusionTest, div_build_005, TestSize.Level1)
127 {
128     m_inputs = {0, 6};
129     m_outputs = {2};
130     m_params = {3};
131 
132     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
133     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
134     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
135     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
136 }
137 
138 /**
139  * @tc.name: div_build_006
140  * @tc.desc: Verify the outputIndex out of bounds of the build function
141  * @tc.type: FUNC
142  */
143 HWTEST_F(DivFusionTest, div_build_006, TestSize.Level1)
144 {
145     m_inputs = {0, 1};
146     m_outputs = {6};
147     m_params = {3};
148 
149     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
150     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
151     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
152     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors));
153 }
154 
155 /**
156  * @tc.name: div_build_007
157  * @tc.desc: Verify the param invalid of the build function
158  * @tc.type: FUNC
159  */
160 HWTEST_F(DivFusionTest, div_build_007, TestSize.Level1)
161 {
162     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
163     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
164     m_paramsIndex = m_params;
165 
166     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT32, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
167     int32_t* activationValueTest = new (std::nothrow) int32_t[0];
168     EXPECT_NE(nullptr, activationValueTest);
169 
170     tensor->SetBuffer(activationValueTest, sizeof(int32_t));
171     m_allTensors.emplace_back(tensor);
172     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
173 }
174 
175 /**
176  * @tc.name: div_build_008
177  * @tc.desc: Verify the scalar length of the build function
178  * @tc.type: FUNC
179  */
180 HWTEST_F(DivFusionTest, div_build_008, TestSize.Level1)
181 {
182     m_param_dim = {2};
183     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
184     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
185     m_paramsIndex = m_params;
186     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
187     int8_t* activationValueTest = new (std::nothrow) int8_t[2]{0, 0};
188     EXPECT_NE(nullptr, activationValueTest);
189 
190     tensor->SetBuffer(activationValueTest, 2 * sizeof(int8_t));
191     m_allTensors.emplace_back(tensor);
192     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
193 }
194 
195 /**
196  * @tc.name: div_build_009
197  * @tc.desc: Verify the invalid activation value of the build function
198  * @tc.type: FUNC
199  */
200 HWTEST_F(DivFusionTest, div_build_009, TestSize.Level1)
201 {
202     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
203     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
204     m_paramsIndex = m_params;
205 
206     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
207     int8_t* activationValueTest = new (std::nothrow) int8_t(10);
208     EXPECT_NE(nullptr, activationValueTest);
209 
210     tensor->SetBuffer(activationValueTest, sizeof(int8_t));
211     m_allTensors.emplace_back(tensor);
212     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
213 }
214 
215 /**
216  * @tc.name: div_build_010
217  * @tc.desc: Verify the invalid param to div of the build function
218  * @tc.type: FUNC
219  */
220 HWTEST_F(DivFusionTest, div_build_010, TestSize.Level1)
221 {
222     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
223     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
224     m_paramsIndex = m_params;
225     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr, OH_NN_ADD_ACTIVATIONTYPE);
226     int8_t* activationValueTest = new (std::nothrow) int8_t(0);
227     EXPECT_NE(nullptr, activationValueTest);
228 
229     tensor->SetBuffer(activationValueTest, sizeof(int8_t));
230     m_allTensors.emplace_back(tensor);
231     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
232 }
233 
234 /**
235  * @tc.name: div_build_011
236  * @tc.desc: Verify the div without set activation of the build function
237  * @tc.type: FUNC
238  */
239 HWTEST_F(DivFusionTest, div_build_011, TestSize.Level1)
240 {
241     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
242     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
243 
244     std::shared_ptr<NNTensor> tensor = TransToNNTensor(OH_NN_INT8, m_param_dim, nullptr,
245         OH_NN_DIV_ACTIVATIONTYPE);
246     m_allTensors.emplace_back(tensor);
247     EXPECT_EQ(OH_NN_INVALID_PARAMETER, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors));
248 }
249 
250 /**
251  * @tc.name: div_getprimitive_001
252  * @tc.desc: Verify the success of the GetPrimitive function
253  * @tc.type: FUNC
254  */
255 HWTEST_F(DivFusionTest, div_getprimitive_001, TestSize.Level1)
256 {
257     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
258     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
259     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
260     EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors));
261 
262     LiteGraphTensorPtr primitive = m_builder.GetPrimitive();
263     LiteGraphTensorPtr expectPrimitive = {nullptr, DestroyLiteGraphPrimitive};
264     EXPECT_NE(expectPrimitive, primitive);
265 
266     int8_t activationValueTest = 0;
267     int8_t returnValue = mindspore::lite::MindIR_DivFusion_GetActivationType(primitive.get());
268     EXPECT_EQ(returnValue, activationValueTest);
269 }
270 
271 /**
272  * @tc.name: div_getprimitive_002
273  * @tc.desc: Verify the nullptr return of the GetPrimitive function
274  * @tc.type: FUNC
275  */
276 HWTEST_F(DivFusionTest, div_getprimitive_002, TestSize.Level1)
277 {
278     SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_input_dim, nullptr);
279     SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_output_dim, nullptr);
280     SaveParamsTensor(m_params, OH_NN_INT8, m_param_dim, nullptr, OH_NN_DIV_ACTIVATIONTYPE);
281 
282     LiteGraphTensorPtr primitive = {nullptr, DestroyLiteGraphPrimitive};
283     LiteGraphTensorPtr expectPrimitive = m_builder.GetPrimitive();
284     EXPECT_EQ(primitive, expectPrimitive);
285 }
286 } // namespace UnitTest
287 } // namespace NeuralNetworkRuntime
288 } // namespace OHOS