1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ops/pad_builder.h"
17
18 #include "ops_test.h"
19
20 using namespace testing;
21 using namespace testing::ext;
22 using namespace OHOS::NeuralNetworkRuntime::Ops;
23
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 namespace UnitTest {
27 class PadBuilderTest : public OpsTest {
28 public:
29 void SetUp() override;
30 void TearDown() override;
31
32 protected:
33 void SetConstValueTensor(OH_NN_DataType dataType,
34 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
35 void SetPaddingModeTensor(OH_NN_DataType dataType,
36 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type);
37
38 protected:
39 PadBuilder m_pad;
40 std::vector<uint32_t> m_inputs {0, 1};
41 std::vector<uint32_t> m_outputs {2};
42 std::vector<uint32_t> m_params {3, 4};
43 std::vector<int32_t> m_inputDim {1, 1, 2, 3};
44 std::vector<int32_t> m_outputDim {1, 2, 7, 7};
45 std::vector<int32_t> m_paramDim {};
46 };
47
SetUp()48 void PadBuilderTest::SetUp() {}
49
TearDown()50 void PadBuilderTest::TearDown() {}
51
SetConstValueTensor(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)52 void PadBuilderTest::SetConstValueTensor(OH_NN_DataType dataType,
53 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
54 {
55 std::shared_ptr<NNTensor> constantValueTensor = TransToNNTensor(dataType, dim, quantParam, type);
56 float* constantValue = new (std::nothrow) float(2.0);
57 EXPECT_NE(nullptr, constantValue);
58 constantValueTensor->SetBuffer(constantValue, sizeof(float));
59 m_allTensors.emplace_back(constantValueTensor);
60 }
61
SetPaddingModeTensor(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)62 void PadBuilderTest::SetPaddingModeTensor(OH_NN_DataType dataType,
63 const std::vector<int32_t> &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
64 {
65 std::shared_ptr<NNTensor> paddingModeValueTensor = TransToNNTensor(dataType, dim, quantParam, type);
66 int32_t* paddingModeValue = new (std::nothrow) int32_t(0);
67 EXPECT_NE(nullptr, paddingModeValue);
68 paddingModeValueTensor->SetBuffer(paddingModeValue, sizeof(int32_t));
69 m_allTensors.emplace_back(paddingModeValueTensor);
70 }
71
72 /**
73 * @tc.name: pad_build_001
74 * @tc.desc: Verify that the build function returns a successful message.
75 * @tc.type: FUNC
76 */
77 HWTEST_F(PadBuilderTest, pad_build_001, TestSize.Level0)
78 {
79 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
80 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
81 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
82 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
83
84 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
85 EXPECT_EQ(OH_NN_SUCCESS, ret);
86 }
87
88 /**
89 * @tc.name: pad_build_002
90 * @tc.desc: Verify that the build function returns a failed message with true m_isBuild.
91 * @tc.type: FUNC
92 */
93 HWTEST_F(PadBuilderTest, pad_build_002, TestSize.Level0)
94 {
95 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
96 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
97 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
98 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
99
100 EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors));
101 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
102 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
103 }
104
105 /**
106 * @tc.name: pad_build_003
107 * @tc.desc: Verify that the build function returns a failed message with invalided input.
108 * @tc.type: FUNC
109 */
110 HWTEST_F(PadBuilderTest, pad_build_003, TestSize.Level0)
111 {
112 m_inputs = {0, 1, 2};
113 m_outputs = {3};
114 m_params = {4, 5};
115
116 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
117 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
118 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
119 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
120
121 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
122 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
123 }
124
125 /**
126 * @tc.name: pad_build_004
127 * @tc.desc: Verify that the build function returns a failed message with invalided output.
128 * @tc.type: FUNC
129 */
130 HWTEST_F(PadBuilderTest, pad_build_004, TestSize.Level0)
131 {
132 m_outputs = {2, 3};
133 m_params = {4, 5};
134
135 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
136 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
137 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
138 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
139
140 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
141 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
142 }
143
144 /**
145 * @tc.name: pad_build_005
146 * @tc.desc: Verify that the build function returns a failed message with empty allTensor.
147 * @tc.type: FUNC
148 */
149 HWTEST_F(PadBuilderTest, pad_build_005, TestSize.Level0)
150 {
151 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputs, m_outputs, m_allTensors);
152 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
153 }
154
155 /**
156 * @tc.name: pad_build_006
157 * @tc.desc: Verify that the build function returns a failed message without output tensor.
158 * @tc.type: FUNC
159 */
160 HWTEST_F(PadBuilderTest, pad_build_006, TestSize.Level0)
161 {
162 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
163
164 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputs, m_allTensors);
165 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
166 }
167
168 /**
169 * @tc.name: pad_build_007
170 * @tc.desc: Verify that the build function returns a failed message with invalid constant's dataType.
171 * @tc.type: FUNC
172 */
173 HWTEST_F(PadBuilderTest, pad_build_007, TestSize.Level0)
174 {
175 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
176 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
177 std::shared_ptr<NNTensor> constantValueTensor = TransToNNTensor(OH_NN_INT32, m_paramDim,
178 nullptr, OH_NN_PAD_CONSTANT_VALUE);
179 int32_t constantValue = 0;
180 constantValueTensor->SetBuffer(&constantValue, sizeof(constantValue));
181 m_allTensors.emplace_back(constantValueTensor);
182 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
183
184 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
185 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
186 constantValueTensor->SetBuffer(nullptr, 0);
187 }
188
189 /**
190 * @tc.name: pad_build_008
191 * @tc.desc: Verify that the build function returns a failed message with invalid paddingMode's dataType.
192 * @tc.type: FUNC
193 */
194 HWTEST_F(PadBuilderTest, pad_build_008, TestSize.Level0)
195 {
196 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
197 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
198
199 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
200 std::shared_ptr<NNTensor> paddingModeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim,
201 nullptr, OH_NN_PAD_PADDING_MODE);
202 int64_t paddingMode = 0;
203 paddingModeTensor->SetBuffer(&paddingMode, sizeof(int64_t));
204 m_allTensors.emplace_back(paddingModeTensor);
205
206 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
207 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
208 paddingModeTensor->SetBuffer(nullptr, 0);
209 }
210
211 /**
212 * @tc.name: pad_build_009
213 * @tc.desc: Verify that the build function returns a failed message with invalid constant's dimension.
214 * @tc.type: FUNC
215 */
216 HWTEST_F(PadBuilderTest, pad_build_009, TestSize.Level0)
217 {
218 m_paramDim = {2};
219
220 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
221 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
222
223 std::shared_ptr<NNTensor> constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim,
224 nullptr, OH_NN_PAD_CONSTANT_VALUE);
225 float constantValue[2] = {2.0, 2.0};
226 constantValueTensor->SetBuffer(constantValue, 2 * sizeof(float));
227 m_allTensors.emplace_back(constantValueTensor);
228 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
229
230 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
231 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
232 constantValueTensor->SetBuffer(nullptr, 0);
233 }
234
235 /**
236 * @tc.name: pad_build_010
237 * @tc.desc: Verify that the build function returns a failed message with passing invalid constvalue.
238 * @tc.type: FUNC
239 */
240 HWTEST_F(PadBuilderTest, pad_build_010, TestSize.Level0)
241 {
242 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
243 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
244 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS);
245 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
246
247 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
248 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
249 }
250
251 /**
252 * @tc.name: pad_build_011
253 * @tc.desc: Verify that the build function returns a failed message with passing invalid paddingMode.
254 * @tc.type: FUNC
255 */
256 HWTEST_F(PadBuilderTest, pad_build_011, TestSize.Level0)
257 {
258 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
259 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
260 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
261 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_ONE_HOT_AXIS);
262
263 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
264 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
265 }
266
267 /**
268 * @tc.name: pad_build_012
269 * @tc.desc: Verify that the build function returns a failed message without set buffer for constantValue.
270 * @tc.type: FUNC
271 */
272 HWTEST_F(PadBuilderTest, pad_build_012, TestSize.Level0)
273 {
274 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
275 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
276 std::shared_ptr<NNTensor> constantValueTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim,
277 nullptr, OH_NN_PAD_CONSTANT_VALUE);
278 m_allTensors.emplace_back(constantValueTensor);
279 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
280
281 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
282 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
283 }
284
285 /**
286 * @tc.name: pad_build_013
287 * @tc.desc: Verify that the build function returns a failed message without set buffer for paddingMode.
288 * @tc.type: FUNC
289 */
290 HWTEST_F(PadBuilderTest, pad_build_013, TestSize.Level0)
291 {
292 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
293 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
294 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
295 std::shared_ptr<NNTensor> paddingModeTensor = TransToNNTensor(OH_NN_INT32, m_paramDim,
296 nullptr, OH_NN_PAD_PADDING_MODE);
297 m_allTensors.emplace_back(paddingModeTensor);
298
299 OH_NN_ReturnCode ret = m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors);
300 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
301 }
302
303 /**
304 * @tc.name: pad_getprimitive_001
305 * @tc.desc: Verify that the getPrimitive function returns a successful message
306 * @tc.type: FUNC
307 */
308 HWTEST_F(PadBuilderTest, pad_getprimitive_001, TestSize.Level0)
309 {
310 SaveInputTensor(m_inputs, OH_NN_FLOAT32, m_inputDim, nullptr);
311 SaveOutputTensor(m_outputs, OH_NN_FLOAT32, m_outputDim, nullptr);
312 SetConstValueTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_PAD_CONSTANT_VALUE);
313 SetPaddingModeTensor(OH_NN_INT32, m_paramDim, nullptr, OH_NN_PAD_PADDING_MODE);
314
315 float constantValue = 2.0;
316 mindspore::lite::PaddingMode paddingModeValue = mindspore::lite::PADDING_MODE_CONSTANT;
317 EXPECT_EQ(OH_NN_SUCCESS, m_pad.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors));
318 LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive();
319 LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive);
320 EXPECT_NE(expectPrimitive, primitive);
321
322 auto returnValue = mindspore::lite::MindIR_PadFusion_GetConstantValue(primitive.get());
323 EXPECT_EQ(returnValue, constantValue);
324 auto returnPaddingMode = mindspore::lite::MindIR_PadFusion_GetPaddingMode(primitive.get());
325 EXPECT_EQ(returnPaddingMode, paddingModeValue);
326 }
327
328 /**
329 * @tc.name: pad_getprimitive_002
330 * @tc.desc: Verify that the getPrimitive function returns a failed message without build.
331 * @tc.type: FUNC
332 */
333 HWTEST_F(PadBuilderTest, pad_getprimitive_002, TestSize.Level0)
334 {
335 PadBuilder pad;
336 LiteGraphPrimitvePtr primitive = m_pad.GetPrimitive();
337 LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive);
338 EXPECT_EQ(expectPrimitive, primitive);
339 }
340 }
341 }
342 }