1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <gtest/gtest.h>
17 #include <gmock/gmock.h>
18
19 #include "nnexecutor.h"
20 #include "nncompiler.h"
21 #include "nnbackend.h"
22 #include "device.h"
23 #include "prepared_model.h"
24 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h"
25 #include "common/utils.h"
26 #include "common/log.h"
27
28 using namespace testing;
29 using namespace testing::ext;
30 using namespace OHOS::NeuralNetworkRuntime;
31
32 namespace OHOS {
33 namespace NeuralNetworkRuntime {
34 namespace UnitTest {
35 class NNExecutorTest : public testing::Test {
36 public:
37 NNExecutorTest() = default;
38 ~NNExecutorTest() = default;
39
40 public:
41 uint32_t m_index {0};
42 const std::vector<int32_t> m_dim {3, 3};
43 const std::vector<int32_t> m_dimOut {3, 3};
44 const int32_t m_dimArry[2] {3, 3};
45 uint32_t m_dimensionCount {2};
46 float m_dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8};
47 };
48
49 class MockIDevice : public Device {
50 public:
51 MOCK_METHOD1(GetDeviceName, OH_NN_ReturnCode(std::string&));
52 MOCK_METHOD1(GetVendorName, OH_NN_ReturnCode(std::string&));
53 MOCK_METHOD1(GetVersion, OH_NN_ReturnCode(std::string&));
54 MOCK_METHOD1(GetDeviceType, OH_NN_ReturnCode(OH_NN_DeviceType&));
55 MOCK_METHOD1(GetDeviceStatus, OH_NN_ReturnCode(DeviceStatus&));
56 MOCK_METHOD2(GetSupportedOperation, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
57 std::vector<bool>&));
58 MOCK_METHOD1(IsFloat16PrecisionSupported, OH_NN_ReturnCode(bool&));
59 MOCK_METHOD1(IsPerformanceModeSupported, OH_NN_ReturnCode(bool&));
60 MOCK_METHOD1(IsPrioritySupported, OH_NN_ReturnCode(bool&));
61 MOCK_METHOD1(IsDynamicInputSupported, OH_NN_ReturnCode(bool&));
62 MOCK_METHOD1(IsModelCacheSupported, OH_NN_ReturnCode(bool&));
63 MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
64 const ModelConfig&,
65 std::shared_ptr<PreparedModel>&));
66 MOCK_METHOD3(PrepareModel, OH_NN_ReturnCode(const void*,
67 const ModelConfig&,
68 std::shared_ptr<PreparedModel>&));
69 MOCK_METHOD4(PrepareModelFromModelCache, OH_NN_ReturnCode(const std::vector<Buffer>&,
70 const ModelConfig&,
71 std::shared_ptr<PreparedModel>&,
72 bool&));
73 MOCK_METHOD3(PrepareOfflineModel, OH_NN_ReturnCode(std::shared_ptr<const mindspore::lite::LiteGraph>,
74 const ModelConfig&,
75 std::shared_ptr<PreparedModel>&));
76 MOCK_METHOD1(AllocateBuffer, void*(size_t));
77 MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr<TensorDesc>));
78 MOCK_METHOD2(AllocateTensorBuffer, void*(size_t, std::shared_ptr<NNTensor>));
79 MOCK_METHOD1(ReleaseBuffer, OH_NN_ReturnCode(const void*));
80 MOCK_METHOD2(AllocateBuffer, OH_NN_ReturnCode(size_t, int&));
81 MOCK_METHOD2(ReleaseBuffer, OH_NN_ReturnCode(int, size_t));
82 };
83
84 class MockIPreparedModel : public PreparedModel {
85 public:
86 MOCK_METHOD1(ExportModelCache, OH_NN_ReturnCode(std::vector<Buffer>&));
87 MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector<IOTensor>&,
88 const std::vector<IOTensor>&,
89 std::vector<std::vector<int32_t>>&,
90 std::vector<bool>&));
91 MOCK_METHOD4(Run, OH_NN_ReturnCode(const std::vector<NN_Tensor*>&,
92 const std::vector<NN_Tensor*>&,
93 std::vector<std::vector<int32_t>>&,
94 std::vector<bool>&));
95 MOCK_CONST_METHOD1(GetModelID, OH_NN_ReturnCode(uint32_t&));
96 MOCK_METHOD2(GetInputDimRanges, OH_NN_ReturnCode(std::vector<std::vector<uint32_t>>&,
97 std::vector<std::vector<uint32_t>>&));
98 };
99
100 class MockTensorDesc : public TensorDesc {
101 public:
102 MOCK_METHOD1(GetDataType, OH_NN_ReturnCode(OH_NN_DataType*));
103 MOCK_METHOD1(SetDataType, OH_NN_ReturnCode(OH_NN_DataType));
104 MOCK_METHOD1(GetFormat, OH_NN_ReturnCode(OH_NN_Format*));
105 MOCK_METHOD1(SetFormat, OH_NN_ReturnCode(OH_NN_Format));
106 MOCK_METHOD2(GetShape, OH_NN_ReturnCode(int32_t**, size_t*));
107 MOCK_METHOD2(SetShape, OH_NN_ReturnCode(const int32_t*, size_t));
108 MOCK_METHOD1(GetElementNum, OH_NN_ReturnCode(size_t*));
109 MOCK_METHOD1(GetByteSize, OH_NN_ReturnCode(size_t*));
110 MOCK_METHOD1(SetName, OH_NN_ReturnCode(const char*));
111 MOCK_METHOD1(GetName, OH_NN_ReturnCode(const char**));
112 };
113
SetTensor(OH_NN_DataType dataType,uint32_t dimensionCount,const int32_t * dimensions,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)114 OH_NN_Tensor SetTensor(OH_NN_DataType dataType, uint32_t dimensionCount, const int32_t *dimensions,
115 const OH_NN_QuantParam *quantParam, OH_NN_TensorType type)
116 {
117 OH_NN_Tensor tensor;
118 tensor.dataType = dataType;
119 tensor.dimensionCount = dimensionCount;
120 tensor.dimensions = dimensions;
121 tensor.quantParam = quantParam;
122 tensor.type = type;
123
124 return tensor;
125 }
126
127 /**
128 * @tc.name: nnexecutortest_construct_001
129 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
130 * @tc.type: FUNC
131 */
132 HWTEST_F(NNExecutorTest, nnexecutortest_construct_001, TestSize.Level0)
133 {
134 LOGE("NNExecutor nnexecutortest_construct_001");
135 size_t m_backendID {0};
136 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
137
138 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
139 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
140 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
141
142 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
143 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
144 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
145 int32_t expectDim[2] = {3, 3};
146 int32_t* ptr = expectDim;
147 uint32_t dimensionCount = 2;
148 tensorDesr->SetShape(ptr, dimensionCount);
149 pair1.first = tensorDesr;
150 pair2.first = tensorDesr;
151 m_inputTensorDescs.emplace_back(pair1);
152 m_inputTensorDescs.emplace_back(pair2);
153 m_outputTensorDescs.emplace_back(pair1);
154 m_outputTensorDescs.emplace_back(pair2);
155
156 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
157 size_t length = 9 * sizeof(float);
158 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
159 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
160
161 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
162 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
163 EXPECT_NE(nullptr, nnExecutor);
164
165 OH_NN_Memory** memory = nullptr;
166 void* const data = dataArry;
167 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
168 OH_NN_Memory* mPtr = &memoryPtr;
169 memory = &mPtr;
170
171 OH_NN_ReturnCode retOutput = nnExecutor->CreateOutputMemory(m_index, length, memory);
172 EXPECT_EQ(OH_NN_SUCCESS, retOutput);
173 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
174 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
175 OH_NN_ReturnCode retinput = nnExecutor->CreateInputMemory(m_index, length, memory);
176 EXPECT_EQ(OH_NN_SUCCESS, retinput);
177
178 delete nnExecutor;
179
180 testing::Mock::AllowLeak(device.get());
181 }
182
183 /**
184 * @tc.name: nnexecutortest_getinputdimrange_001
185 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
186 * @tc.type: FUNC
187 */
188 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_001, TestSize.Level0)
189 {
190 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_001");
191 size_t m_backendID {0};
192 std::shared_ptr<Device> m_device {nullptr};
193
194 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
195 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
196 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
197 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
198 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
199 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
200 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
201
202 size_t index = 0;
203 size_t min = 1;
204 size_t max = 10;
205 size_t *minInputDims = &min;
206 size_t *maxInputDIms = &max;
207 size_t shapeLength = 0;
208 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
209 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
210
211 testing::Mock::AllowLeak(mockIPreparedMode.get());
212 }
213
214 /**
215 * @tc.name: nnexecutortest_getinputdimrange_002
216 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
217 * @tc.type: FUNC
218 */
219 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_002, TestSize.Level0)
220 {
221 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_002");
222 size_t m_backendID {0};
223 std::shared_ptr<Device> m_device {nullptr};
224
225 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
226 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
227 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
228 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
229 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
230 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
231 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
232
233 size_t index = 0;
234 size_t max = 10;
235 size_t *maxInputDIms = &max;
236 size_t shapeLength = 0;
237 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, nullptr, &maxInputDIms, &shapeLength);
238 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
239
240 testing::Mock::AllowLeak(mockIPreparedMode.get());
241 }
242
243 /**
244 * @tc.name: nnexecutortest_getinputdimrange_003
245 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
246 * @tc.type: FUNC
247 */
248 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_003, TestSize.Level0)
249 {
250 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_003");
251 size_t m_backendID {0};
252 std::shared_ptr<Device> m_device {nullptr};
253
254 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
255 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
256 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
257 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
258 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
259 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
260 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
261
262 size_t index = 0;
263 size_t min = 1;
264 size_t *minInputDims = &min;
265 size_t shapeLength = 0;
266 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, nullptr, &shapeLength);
267 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
268
269 testing::Mock::AllowLeak(mockIPreparedMode.get());
270 }
271
272 /**
273 * @tc.name: nnexecutortest_getinputdimrange_004
274 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
275 * @tc.type: FUNC
276 */
277 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_004, TestSize.Level0)
278 {
279 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_004");
280 size_t m_backendID {0};
281 std::shared_ptr<Device> m_device {nullptr};
282
283 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
284 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
285 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
286 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
287 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
288 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
289 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
290
291 size_t index = 0;
292 size_t min = 1;
293 size_t max = 10;
294 size_t *minInputDims = &min;
295 size_t *maxInputDIms = &max;
296 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, nullptr);
297 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
298
299 testing::Mock::AllowLeak(mockIPreparedMode.get());
300 }
301
302 /**
303 * @tc.name: nnexecutortest_getinputdimrange_005
304 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
305 * @tc.type: FUNC
306 */
307 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_005, TestSize.Level0)
308 {
309 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_005");
310 size_t m_backendID {0};
311 std::shared_ptr<Device> m_device {nullptr};
312
313 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
314 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
315 .WillRepeatedly(::testing::Return(OH_NN_SUCCESS));
316 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
317 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
318 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
319 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
320
321 size_t index = 0;
322 size_t min = 1;
323 size_t max = 10;
324 size_t *minInputDims = &min;
325 size_t *maxInputDIms = &max;
326 size_t shapeLength = 0;
327 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
328 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
329
330 testing::Mock::AllowLeak(mockIPreparedMode.get());
331 }
332
333 /**
334 * @tc.name: nnexecutortest_getinputdimrange_006
335 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
336 * @tc.type: FUNC
337 */
338 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_006, TestSize.Level0)
339 {
340 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_006");
341 size_t m_backendID {0};
342 std::shared_ptr<Device> m_device {nullptr};
343
344 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
345
346 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
347 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
348 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
349 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880102(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 350 std::vector<std::vector<uint32_t>>& maxInputDims) {
351 // 这里直接修改传入的引用参数
352 minInputDims = minDims;
353 maxInputDims = maxDims;
354 return OH_NN_SUCCESS; // 假设成功的状态码
355 }));
356 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
357 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
358 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
359 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
360
361 size_t index = 0;
362 size_t min = 1;
363 size_t max = 10;
364 size_t *minInputDims = &min;
365 size_t *maxInputDIms = &max;
366 size_t shapeLength = 0;
367 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
368 EXPECT_EQ(OH_NN_SUCCESS, ret);
369
370 testing::Mock::AllowLeak(mockIPreparedMode.get());
371 }
372
373 /**
374 * @tc.name: nnexecutortest_getinputdimrange_007
375 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
376 * @tc.type: FUNC
377 */
378 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_007, TestSize.Level0)
379 {
380 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_007");
381 size_t m_backendID {0};
382 std::shared_ptr<Device> m_device {nullptr};
383
384 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
385
386 std::vector<std::vector<uint32_t>> minDims = {{1, 2}, {1, 2, 3}};
387 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
388 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
389 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880202(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 390 std::vector<std::vector<uint32_t>>& maxInputDims) {
391 // 这里直接修改传入的引用参数
392 minInputDims = minDims;
393 maxInputDims = maxDims;
394 return OH_NN_SUCCESS; // 假设成功的状态码
395 }));
396 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
397 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
398 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
399 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
400
401 size_t index = 0;
402 size_t min = 1;
403 size_t max = 10;
404 size_t *minInputDims = &min;
405 size_t *maxInputDIms = &max;
406 size_t shapeLength = 0;
407 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
408 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
409
410 testing::Mock::AllowLeak(mockIPreparedMode.get());
411 }
412
413 /**
414 * @tc.name: nnexecutortest_getinputdimrange_008
415 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
416 * @tc.type: FUNC
417 */
418 HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_008, TestSize.Level0)
419 {
420 LOGE("GetInputDimRange nnexecutortest_getinputdimrange_008");
421 size_t m_backendID {0};
422 std::shared_ptr<Device> m_device {nullptr};
423
424 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
425
426 std::vector<std::vector<uint32_t>> minDims = {{1, 2}};
427 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
428 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
429 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880302(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 430 std::vector<std::vector<uint32_t>>& maxInputDims) {
431 // 这里直接修改传入的引用参数
432 minInputDims = minDims;
433 maxInputDims = maxDims;
434 return OH_NN_SUCCESS; // 假设成功的状态码
435 }));
436 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
437 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
438 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
439 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
440
441 size_t index = 0;
442 size_t min = 1;
443 size_t max = 10;
444 size_t *minInputDims = &min;
445 size_t *maxInputDIms = &max;
446 size_t shapeLength = 0;
447 OH_NN_ReturnCode ret = nnExecutor->GetInputDimRange(index, &minInputDims, &maxInputDIms, &shapeLength);
448 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
449
450 testing::Mock::AllowLeak(mockIPreparedMode.get());
451 }
452
453 /**
454 * @tc.name: nnexecutortest_getoutputshape_001
455 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
456 * @tc.type: FUNC
457 */
458 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_001, TestSize.Level0)
459 {
460 LOGE("GetOutputShape nnexecutortest_getoutputshape_001");
461 size_t m_backendID {0};
462 std::shared_ptr<Device> m_device {nullptr};
463 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
464 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
465 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
466 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
467 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
468
469 int32_t expectDim[2] = {3, 3};
470 int32_t* ptr = expectDim;
471 int32_t** dimensions = &ptr;
472 uint32_t dimensionCount = 2;
473 uint32_t* shapeNum = &dimensionCount;
474 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
475 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
476 }
477
478 /**
479 * @tc.name: nnexecutortest_getoutputshape_002
480 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
481 * @tc.type: FUNC
482 */
483 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_002, TestSize.Level0)
484 {
485 LOGE("GetOutputShape nnexecutortest_getoutputshape_002");
486 size_t m_backendID {0};
487 std::shared_ptr<Device> m_device {nullptr};
488 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
489 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
490 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
491 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
492 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
493 m_outputTensorDescs.emplace_back(pair1);
494 m_outputTensorDescs.emplace_back(pair2);
495 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
496 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
497
498 int32_t expectDim[2] = {3, 3};
499 int32_t* ptr = expectDim;
500 int32_t** dimensions = &ptr;
501 uint32_t dimensionCount = 2;
502 uint32_t* shapeNum = &dimensionCount;
503 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
504 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
505 }
506
507 /**
508 * @tc.name: nnexecutortest_getoutputshape_003
509 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
510 * @tc.type: FUNC
511 */
512 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_003, TestSize.Level0)
513 {
514 LOGE("GetOutputShape nnexecutortest_getoutputshape_003");
515 size_t m_backendID {0};
516 std::shared_ptr<Device> m_device {nullptr};
517 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
518 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
519 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
520 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
521 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
522 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
523 pair1.first = tensorDesr;
524 m_outputTensorDescs.emplace_back(pair1);
525 m_outputTensorDescs.emplace_back(pair2);
526 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
527 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
528
529 int32_t expectDim[2] = {3, 3};
530 int32_t* ptr = expectDim;
531 int32_t** dimensions = &ptr;
532 uint32_t dimensionCount = 2;
533 uint32_t* shapeNum = &dimensionCount;
534 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
535 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
536 }
537
538 /**
539 * @tc.name: nnexecutortest_getoutputshape_004
540 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
541 * @tc.type: FUNC
542 */
543 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_004, TestSize.Level0)
544 {
545 LOGE("GetOutputShape nnexecutortest_getoutputshape_004");
546 size_t m_backendID {0};
547 std::shared_ptr<Device> m_device {nullptr};
548 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
549 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
550 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
551 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
552 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
553 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
554
555 int32_t expectDim[2] = {3, 3};
556 int32_t* ptr = expectDim;
557 uint32_t dimensionCount = 2;
558 tensorDesr->SetShape(ptr, dimensionCount);
559 pair1.first = tensorDesr;
560 pair2.first = tensorDesr;
561 m_outputTensorDescs.emplace_back(pair1);
562 m_outputTensorDescs.emplace_back(pair2);
563 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
564 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
565
566 int32_t expectDim2[2] = {3, 3};
567 int32_t* ptr2 = expectDim2;
568 int32_t** dimensions = &ptr2;
569 uint32_t* shapeNum = &dimensionCount;
570 *dimensions = nullptr;
571 OH_NN_ReturnCode ret = nnExecutor->GetOutputShape(m_index, dimensions, shapeNum);
572 EXPECT_EQ(OH_NN_SUCCESS, ret);
573 }
574
575 /**
576 * @tc.name: nnexecutortest_getinputnum_001
577 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
578 * @tc.type: FUNC
579 */
580 HWTEST_F(NNExecutorTest, nnexecutortest_getinputnum_001, TestSize.Level0)
581 {
582 LOGE("GetInputNum nnexecutortest_getinputnum_001");
583 size_t m_backendID {0};
584 std::shared_ptr<Device> m_device {nullptr};
585 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
586 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
587 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
588 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
589 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
590
591 size_t ret = nnExecutor->GetInputNum();
592 EXPECT_EQ(0, ret);
593 }
594
595 /**
596 * @tc.name: nnexecutortest_getoutputnum_001
597 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
598 * @tc.type: FUNC
599 */
600 HWTEST_F(NNExecutorTest, nnexecutortest_getoutputnum_001, TestSize.Level0)
601 {
602 LOGE("GetOutputNum nnexecutortest_getoutputnum_001");
603 size_t m_backendID {0};
604 std::shared_ptr<Device> m_device {nullptr};
605 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
606 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
607 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
608 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
609 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
610
611
612 size_t ret = nnExecutor->GetOutputNum();
613 EXPECT_EQ(0, ret);
614 }
615
616 /**
617 * @tc.name: nnexecutortest_createinputtensordesc_001
618 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
619 * @tc.type: FUNC
620 */
621 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_001, TestSize.Level0)
622 {
623 LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_001");
624 size_t m_backendID {0};
625 std::shared_ptr<Device> m_device {nullptr};
626 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
627 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
628 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
629 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
630 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
631
632 size_t index = 1;
633 NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
634 EXPECT_EQ(nullptr, ret);
635 }
636
637 /**
638 * @tc.name: nnexecutortest_createinputtensordesc_002
639 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
640 * @tc.type: FUNC
641 */
642 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_002, TestSize.Level0)
643 {
644 LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_002");
645 size_t m_backendID {0};
646 std::shared_ptr<Device> m_device {nullptr};
647 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
648 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
649 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
650
651 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
652 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
653 m_inputTensorDescs.emplace_back(pair1);
654 m_inputTensorDescs.emplace_back(pair2);
655
656 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
657 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
658
659 size_t index = 1;
660 NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
661 EXPECT_EQ(nullptr, ret);
662 }
663
664 /**
665 * @tc.name: nnexecutortest_createinputtensordesc_003
666 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
667 * @tc.type: FUNC
668 */
669 HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_003, TestSize.Level0)
670 {
671 LOGE("CreateInputTensorDesc nnexecutortest_createinputtensordesc_003");
672 size_t m_backendID {0};
673 std::shared_ptr<Device> m_device {nullptr};
674 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
675 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
676 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
677
678 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
679 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
680 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
681 int32_t expectDim[2] = {3, 3};
682 int32_t* ptr = expectDim;
683 uint32_t dimensionCount = 2;
684 tensorDesr->SetShape(ptr, dimensionCount);
685 pair1.first = tensorDesr;
686 pair2.first = tensorDesr;
687 m_inputTensorDescs.emplace_back(pair1);
688 m_inputTensorDescs.emplace_back(pair2);
689
690 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
691 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
692
693 size_t index = 0;
694 NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index);
695 EXPECT_NE(nullptr, ret);
696 }
697
698 /**
699 * @tc.name: nnexecutortest_createoutputtensordesc_001
700 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
701 * @tc.type: FUNC
702 */
703 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_001, TestSize.Level0)
704 {
705 LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_001");
706 size_t m_backendID {0};
707 std::shared_ptr<Device> m_device {nullptr};
708 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
709 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
710 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
711 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
712 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
713
714 size_t index = 1;
715 NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
716 EXPECT_EQ(nullptr, ret);
717 }
718
719 /**
720 * @tc.name: nnexecutortest_createoutputtensordesc_002
721 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
722 * @tc.type: FUNC
723 */
724 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_002, TestSize.Level0)
725 {
726 LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_002");
727 size_t m_backendID {0};
728 std::shared_ptr<Device> m_device {nullptr};
729 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
730 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
731 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
732
733 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
734 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
735 m_outputTensorDescs.emplace_back(pair1);
736 m_outputTensorDescs.emplace_back(pair2);
737
738 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
739 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
740
741 size_t index = 1;
742 NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
743 EXPECT_EQ(nullptr, ret);
744 }
745
746 /**
747 * @tc.name: nnexecutortest_createoutputtensordesc_003
748 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
749 * @tc.type: FUNC
750 */
751 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_003, TestSize.Level0)
752 {
753 LOGE("CreateOutputTensorDesc nnexecutortest_createoutputtensordesc_003");
754 size_t m_backendID {0};
755 std::shared_ptr<Device> m_device {nullptr};
756 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
757 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
758 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
759
760 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
761 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
762 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
763 int32_t expectDim[2] = {3, 3};
764 int32_t* ptr = expectDim;
765 uint32_t dimensionCount = 2;
766 tensorDesr->SetShape(ptr, dimensionCount);
767 pair1.first = tensorDesr;
768 pair2.first = tensorDesr;
769 m_outputTensorDescs.emplace_back(pair1);
770 m_outputTensorDescs.emplace_back(pair2);
771
772 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
773 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
774
775 size_t index = 1;
776 NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index);
777 EXPECT_NE(nullptr, ret);
778 }
779
MyOnRunDone(void * userData,OH_NN_ReturnCode errCode,void * outputTensor[],int32_t outputCount)780 void MyOnRunDone(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount)
781 {
782 LOGE("MyOnRunDone");
783 // 在这里处理你的逻辑,例如:
784 if (errCode != OH_NN_SUCCESS) {
785 // 处理错误
786 LOGE("Neural network execution failed with error code: %d", errCode);
787 } else {
788 // 使用 outputTensor[] 和 outputCount 处理成功的结果
789 // 例如,outputTensor 可能指向了神经网络输出数据的内存位置
790 }
791 // 如果 userData 指向了需要清理的资源,在这里进行清理
792 }
793
794 /**
795 * @tc.name: nnexecutortest_setonrundone_001
796 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
797 * @tc.type: FUNC
798 */
799 HWTEST_F(NNExecutorTest, nnexecutortest_setonrundone_001, TestSize.Level0)
800 {
801 LOGE("SetOnRunDone nnexecutortest_setonrundone_001");
802 size_t m_backendID {0};
803 std::shared_ptr<Device> m_device {nullptr};
804 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
805 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
806 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
807 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
808 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
809
810 OH_NN_ReturnCode ret = nnExecutor->SetOnRunDone(MyOnRunDone);
811 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
812 }
813
MyOnServiceDied(void * userData)814 void MyOnServiceDied(void *userData)
815 {
816 LOGE("MyOnServiceDied");
817 }
818
819 /**
820 * @tc.name: nnexecutortest_setonservicedied_001
821 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
822 * @tc.type: FUNC
823 */
824 HWTEST_F(NNExecutorTest, nnexecutortest_setonservicedied_001, TestSize.Level0)
825 {
826 LOGE("SetOnServiceDied nnexecutortest_setonservicedied_001");
827 size_t m_backendID {0};
828 std::shared_ptr<Device> m_device {nullptr};
829 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
830 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
831 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
832 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
833 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
834
835 OH_NN_ReturnCode ret = nnExecutor->SetOnServiceDied(MyOnServiceDied);
836 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
837 }
838
839 /**
840 * @tc.name: nnexecutortest_runsync_001
841 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
842 * @tc.type: FUNC
843 */
844 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_001, TestSize.Level0)
845 {
846 LOGE("RunSync nnexecutortest_runsync_001");
847 size_t m_backendID {0};
848 std::shared_ptr<Device> m_device {nullptr};
849 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
850 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
851 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
852 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
853 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
854
855 size_t inputSize = 1;
856 size_t outputSize = 1;
857 OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize);
858 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
859 }
860
861 /**
862 * @tc.name: nnexecutortest_runsync_002
863 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
864 * @tc.type: FUNC
865 */
866 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_002, TestSize.Level0)
867 {
868 LOGE("RunAsync nnexecutortest_runsync_002");
869 size_t m_backendID {0};
870 std::shared_ptr<Device> m_device {nullptr};
871 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
872 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
873 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
874 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
875 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
876
877 size_t inputSize = 0;
878 size_t outputSize = 1;
879 OH_NN_ReturnCode ret = nnExecutor->RunSync(nullptr, inputSize, nullptr, outputSize);
880 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
881 }
882
883 /**
884 * @tc.name: nnexecutortest_runsync_003
885 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
886 * @tc.type: FUNC
887 */
888 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_003, TestSize.Level0)
889 {
890 LOGE("RunAsync nnexecutortest_runsync_003");
891 size_t m_backendID {0};
892 std::shared_ptr<Device> m_device {nullptr};
893
894 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
895
896 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
897 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
898 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
899 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880402(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 900 std::vector<std::vector<uint32_t>>& maxInputDims) {
901 // 这里直接修改传入的引用参数
902 minInputDims = minDims;
903 maxInputDims = maxDims;
904 return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码
905 }));
906
907 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
908 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
909
910 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
911 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
912 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
913 int32_t expectDim[2] = {3, 3};
914 int32_t* ptr = expectDim;
915 uint32_t dimensionCount = 2;
916 tensorDesr->SetShape(ptr, dimensionCount);
917 pair1.first = tensorDesr;
918 pair2.first = tensorDesr;
919 m_inputTensorDescs.emplace_back(pair1);
920 m_inputTensorDescs.emplace_back(pair2);
921 m_outputTensorDescs.emplace_back(pair1);
922 m_outputTensorDescs.emplace_back(pair2);
923
924 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
925 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
926
927 size_t backendID = 1;
928 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
929 TensorDesc desc;
930 TensorDesc* tensorDesc = &desc;
931
932 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
933 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
934
935 size_t inputSize = 2;
936 size_t outputSize = 2;
937 OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
938 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
939
940 testing::Mock::AllowLeak(mockIPreparedMode.get());
941 testing::Mock::AllowLeak(device.get());
942 }
943
944 /**
945 * @tc.name: nnexecutortest_runsync_004
946 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
947 * @tc.type: FUNC
948 */
949 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_004, TestSize.Level0)
950 {
951 LOGE("RunAsync nnexecutortest_runsync_004");
952 size_t m_backendID {0};
953 std::shared_ptr<Device> m_device {nullptr};
954
955 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
956
957 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
958 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
959 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
960 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880502(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 961 std::vector<std::vector<uint32_t>>& maxInputDims) {
962 // 这里直接修改传入的引用参数
963 minInputDims = minDims;
964 maxInputDims = maxDims;
965 return OH_NN_SUCCESS; // 假设成功的状态码
966 }));
967
968 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
969 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
970
971 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
972 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
973 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
974 int32_t expectDim[2] = {3, 3};
975 int32_t* ptr = expectDim;
976 uint32_t dimensionCount = 2;
977 tensorDesr->SetShape(ptr, dimensionCount);
978 pair1.first = tensorDesr;
979 pair2.first = tensorDesr;
980 m_inputTensorDescs.emplace_back(pair1);
981 m_inputTensorDescs.emplace_back(pair2);
982 m_outputTensorDescs.emplace_back(pair1);
983 m_outputTensorDescs.emplace_back(pair2);
984
985 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
986 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
987
988 size_t backendID = 1;
989 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
990 TensorDesc desc;
991 TensorDesc* tensorDesc = &desc;
992
993 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
994 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
995
996 size_t inputSize = 2;
997 size_t outputSize = 2;
998 OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
999 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1000
1001 testing::Mock::AllowLeak(mockIPreparedMode.get());
1002 }
1003
1004 /**
1005 * @tc.name: nnexecutortest_runsync_005
1006 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1007 * @tc.type: FUNC
1008 */
1009 HWTEST_F(NNExecutorTest, nnexecutortest_runsync_005, TestSize.Level0)
1010 {
1011 LOGE("RunAsync nnexecutortest_runsync_005");
1012 size_t m_backendID {0};
1013 std::shared_ptr<Device> m_device {nullptr};
1014
1015 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1016
1017 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}, {1, 2, 3}};
1018 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1019 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1020 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880602(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1021 std::vector<std::vector<uint32_t>>& maxInputDims) {
1022 // 这里直接修改传入的引用参数
1023 minInputDims = minDims;
1024 maxInputDims = maxDims;
1025 return OH_NN_SUCCESS; // 假设成功的状态码
1026 }));
1027
1028 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1029 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1030
1031 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1032 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1033 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1034 int32_t expectDim[2] = {3, 3};
1035 int32_t* ptr = expectDim;
1036 uint32_t dimensionCount = 2;
1037 tensorDesr->SetShape(ptr, dimensionCount);
1038 pair1.first = tensorDesr;
1039 pair2.first = tensorDesr;
1040 m_inputTensorDescs.emplace_back(pair1);
1041 m_inputTensorDescs.emplace_back(pair2);
1042 m_outputTensorDescs.emplace_back(pair1);
1043 m_outputTensorDescs.emplace_back(pair2);
1044
1045 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1046 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1047
1048 size_t backendID = 1;
1049 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1050 TensorDesc desc;
1051 TensorDesc* tensorDesc = &desc;
1052
1053 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1054 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1055
1056 size_t inputSize = 2;
1057 size_t outputSize = 2;
1058 OH_NN_ReturnCode ret = nnExecutor->RunSync(&tensor, inputSize, &tensor, outputSize);
1059 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1060
1061 testing::Mock::AllowLeak(mockIPreparedMode.get());
1062 }
1063
1064 /**
1065 * @tc.name: nnexecutortest_runasync_001
1066 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1067 * @tc.type: FUNC
1068 */
1069 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_001, TestSize.Level0)
1070 {
1071 LOGE("RunAsync nnexecutortest_runasync_001");
1072 size_t m_backendID {0};
1073 std::shared_ptr<Device> m_device {nullptr};
1074 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1075 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1076 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1077 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1078 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1079
1080 void* buffer = m_dataArry;
1081 size_t inputSize = 1;
1082 size_t outputSize = 1;
1083 int32_t timeout = 10;
1084 OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer);
1085 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1086 }
1087
1088 /**
1089 * @tc.name: nnexecutortest_runasync_002
1090 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1091 * @tc.type: FUNC
1092 */
1093 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_002, TestSize.Level0)
1094 {
1095 LOGE("RunAsync nnexecutortest_runasync_001");
1096 size_t m_backendID {0};
1097 std::shared_ptr<Device> m_device {nullptr};
1098 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1099 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1100 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1101 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1102 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1103
1104 void* buffer = m_dataArry;
1105 size_t inputSize = 0;
1106 size_t outputSize = 1;
1107 int32_t timeout = 10;
1108 OH_NN_ReturnCode ret = nnExecutor->RunAsync(nullptr, inputSize, nullptr, outputSize, timeout, buffer);
1109 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1110 }
1111
1112 /**
1113 * @tc.name: nnexecutortest_runasync_003
1114 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1115 * @tc.type: FUNC
1116 */
1117 HWTEST_F(NNExecutorTest, nnexecutortest_runasync_003, TestSize.Level0)
1118 {
1119 LOGE("RunAsync nnexecutortest_runasync_003");
1120 size_t m_backendID {0};
1121 std::shared_ptr<Device> m_device {nullptr};
1122
1123 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1124
1125 std::vector<std::vector<uint32_t>> minDims = {{1, 2, 3}};
1126 std::vector<std::vector<uint32_t>> maxDims = {{4, 5, 6}};
1127 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1128 .WillOnce(Invoke([&minDims, &maxDims](std::vector<std::vector<uint32_t>>& minInputDims,
__anond213ed880702(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims) 1129 std::vector<std::vector<uint32_t>>& maxInputDims) {
1130 // 这里直接修改传入的引用参数
1131 minInputDims = minDims;
1132 maxInputDims = maxDims;
1133 return OH_NN_OPERATION_FORBIDDEN; // 假设成功的状态码
1134 }));
1135
1136 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1137 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1138
1139 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1140 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1141 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1142 int32_t expectDim[2] = {3, 3};
1143 int32_t* ptr = expectDim;
1144 uint32_t dimensionCount = 2;
1145 tensorDesr->SetShape(ptr, dimensionCount);
1146 pair1.first = tensorDesr;
1147 pair2.first = tensorDesr;
1148 m_inputTensorDescs.emplace_back(pair1);
1149 m_inputTensorDescs.emplace_back(pair2);
1150 m_outputTensorDescs.emplace_back(pair1);
1151 m_outputTensorDescs.emplace_back(pair2);
1152 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1153 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1154
1155 size_t backendID = 1;
1156 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1157 TensorDesc desc;
1158 TensorDesc* tensorDesc = &desc;
1159
1160 std::unique_ptr<NNBackend> hdiDevice = std::make_unique<NNBackend>(device, backendID);
1161 NN_Tensor* tensor = reinterpret_cast<NN_Tensor*>(hdiDevice->CreateTensor(tensorDesc));
1162
1163 void* buffer = m_dataArry;
1164 size_t inputSize = 2;
1165 size_t outputSize = 2;
1166 int32_t timeout = 10;
1167 OH_NN_ReturnCode ret = nnExecutor->RunAsync(&tensor, inputSize, &tensor, outputSize, timeout, buffer);
1168 EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret);
1169
1170 testing::Mock::AllowLeak(mockIPreparedMode.get());
1171 }
1172
1173 /**
1174 * @tc.name: nnexecutortest_getbackendid_001
1175 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1176 * @tc.type: FUNC
1177 */
1178 HWTEST_F(NNExecutorTest, nnexecutortest_getbackendid_001, TestSize.Level0)
1179 {
1180 LOGE("GetBackendID nnexecutortest_getbackendid_001");
1181 size_t m_backendID {0};
1182 std::shared_ptr<Device> m_device {nullptr};
1183 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1184 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1185 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1186 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1187 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1188
1189 size_t ret = nnExecutor->GetBackendID();
1190 EXPECT_EQ(0, ret);
1191 }
1192
1193 /**
1194 * @tc.name: nnexecutortest_setinput_001
1195 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1196 * @tc.type: FUNC
1197 */
1198 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_001, TestSize.Level0)
1199 {
1200 LOGE("SetInput nnexecutortest_setinput_001");
1201 size_t m_backendID {0};
1202 std::shared_ptr<Device> m_device {nullptr};
1203 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1204 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1205 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1206 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1207 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1208 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1209 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1210
1211 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1212 void* buffer = m_dataArry;
1213 size_t length = 9 * sizeof(float);
1214
1215 OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1216 EXPECT_EQ(OH_NN_FAILED, ret);
1217
1218 testing::Mock::AllowLeak(mockIPreparedMode.get());
1219 }
1220
1221 /**
1222 * @tc.name: nnexecutortest_setinput_002
1223 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1224 * @tc.type: FUNC
1225 */
1226 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_002, TestSize.Level0)
1227 {
1228 LOGE("SetInput nnexecutortest_setinput_002");
1229 size_t m_backendID {0};
1230 std::shared_ptr<Device> m_device {nullptr};
1231 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1232 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1233 .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN));
1234 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1235 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1236
1237 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1238 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1239 m_inputTensorDescs.emplace_back(pair1);
1240 m_inputTensorDescs.emplace_back(pair2);
1241
1242 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1243 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1244
1245 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1246 void* buffer = m_dataArry;
1247 size_t length = 9 * sizeof(float);
1248
1249 OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1250 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1251
1252 testing::Mock::AllowLeak(mockIPreparedMode.get());
1253 }
1254
1255 /**
1256 * @tc.name: nnexecutortest_setinput_003
1257 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1258 * @tc.type: FUNC
1259 */
1260 HWTEST_F(NNExecutorTest, nnexecutortest_setinput_003, TestSize.Level0)
1261 {
1262 LOGE("SetInput nnexecutortest_setinput_003");
1263 size_t m_backendID {0};
1264 std::shared_ptr<Device> m_device {nullptr};
1265 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1266 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1267 .WillRepeatedly(::testing::Return(OH_NN_OPERATION_FORBIDDEN));
1268 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1269 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1270
1271 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1272 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1273 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1274 int32_t expectDim[2] = {3, 3};
1275 int32_t* ptr = expectDim;
1276 uint32_t dimensionCount = 2;
1277 tensorDesr->SetShape(ptr, dimensionCount);
1278 pair1.first = tensorDesr;
1279 pair2.first = tensorDesr;
1280 m_inputTensorDescs.emplace_back(pair1);
1281 m_inputTensorDescs.emplace_back(pair2);
1282
1283 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1284 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1285
1286 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1287 void* buffer = m_dataArry;
1288 size_t length = 9 * sizeof(float);
1289
1290 OH_NN_ReturnCode ret = nnExecutor->SetInput(m_index, tensor, buffer, length);
1291 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1292
1293 testing::Mock::AllowLeak(mockIPreparedMode.get());
1294 }
1295
1296 /**
1297 * @tc.name: nnexecutortest_setinputfrommemory_001
1298 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1299 * @tc.type: FUNC
1300 */
1301 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_001, TestSize.Level0)
1302 {
1303 LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_001");
1304 size_t m_backendID {0};
1305 std::shared_ptr<Device> m_device {nullptr};
1306 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1307 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1308 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1309 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1310 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1311 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1312 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1313
1314 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1315 void* const data = m_dataArry;
1316 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1317
1318 OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1319 EXPECT_EQ(OH_NN_FAILED, ret);
1320
1321 testing::Mock::AllowLeak(mockIPreparedMode.get());
1322 }
1323
1324 /**
1325 * @tc.name: nnexecutortest_setinputfrommemory_002
1326 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1327 * @tc.type: FUNC
1328 */
1329 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_002, TestSize.Level0)
1330 {
1331 LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_002");
1332 size_t m_backendID {0};
1333 std::shared_ptr<Device> m_device {nullptr};
1334 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1335 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1336 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1337 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1338 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1339
1340 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1341 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1342 m_inputTensorDescs.emplace_back(pair1);
1343 m_inputTensorDescs.emplace_back(pair2);
1344 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1345 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1346
1347 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1348 void* const data = m_dataArry;
1349 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1350
1351 OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1352 EXPECT_EQ(OH_NN_FAILED, ret);
1353
1354 testing::Mock::AllowLeak(mockIPreparedMode.get());
1355 }
1356
1357 /**
1358 * @tc.name: nnexecutortest_setinputfrommemory_003
1359 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1360 * @tc.type: FUNC
1361 */
1362 HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_003, TestSize.Level0)
1363 {
1364 LOGE("SetInputFromMemory nnexecutortest_setinputfrommemory_003");
1365 size_t m_backendID {0};
1366 std::shared_ptr<Device> m_device {nullptr};
1367 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
1368 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
1369 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
1370 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1371 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1372
1373 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1374 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1375 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1376 int32_t expectDim[2] = {3, 3};
1377 int32_t* ptr = expectDim;
1378 uint32_t dimensionCount = 2;
1379 tensorDesr->SetShape(ptr, dimensionCount);
1380 pair1.first = tensorDesr;
1381 pair2.first = tensorDesr;
1382 m_inputTensorDescs.emplace_back(pair1);
1383 m_inputTensorDescs.emplace_back(pair2);
1384 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1385 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
1386
1387 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
1388 void* const data = m_dataArry;
1389 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1390
1391 OH_NN_ReturnCode ret = nnExecutor->SetInputFromMemory(m_index, tensor, memory);
1392 EXPECT_EQ(OH_NN_FAILED, ret);
1393
1394 testing::Mock::AllowLeak(mockIPreparedMode.get());
1395 }
1396
1397 /**
1398 * @tc.name: nnexecutortest_setoutput_001
1399 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1400 * @tc.type: FUNC
1401 */
1402 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_001, TestSize.Level0)
1403 {
1404 LOGE("SetOutput nnexecutortest_setoutput_001");
1405 size_t m_backendID {0};
1406 std::shared_ptr<Device> m_device {nullptr};
1407 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1408 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1409 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1410 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1411 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1412
1413 size_t length = 9 * sizeof(float);
1414 void* buffer = m_dataArry;
1415
1416 OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1417 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1418 }
1419
1420 /**
1421 * @tc.name: nnexecutortest_setoutput_002
1422 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1423 * @tc.type: FUNC
1424 */
1425 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_002, TestSize.Level0)
1426 {
1427 LOGE("SetOutput nnexecutortest_setoutput_002");
1428 size_t m_backendID {0};
1429 std::shared_ptr<Device> m_device {nullptr};
1430 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1431 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1432 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1433
1434 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1435 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1436 m_outputTensorDescs.emplace_back(pair1);
1437 m_outputTensorDescs.emplace_back(pair2);
1438 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1439 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1440
1441 size_t length = 9 * sizeof(float);
1442 void* buffer = m_dataArry;
1443
1444 OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1445 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1446 }
1447
1448 /**
1449 * @tc.name: nnexecutortest_setoutput_003
1450 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1451 * @tc.type: FUNC
1452 */
1453 HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_003, TestSize.Level0)
1454 {
1455 LOGE("SetOutput nnexecutortest_setoutput_003");
1456 size_t m_backendID {0};
1457 std::shared_ptr<Device> m_device {nullptr};
1458 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1459 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1460 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1461
1462 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1463 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1464 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1465 int32_t expectDim[2] = {3, 3};
1466 int32_t* ptr = expectDim;
1467 uint32_t dimensionCount = 2;
1468 tensorDesr->SetShape(ptr, dimensionCount);
1469 pair1.first = tensorDesr;
1470 pair2.first = tensorDesr;
1471 m_outputTensorDescs.emplace_back(pair1);
1472 m_outputTensorDescs.emplace_back(pair2);
1473 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1474 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1475
1476 size_t length = 9 * sizeof(float);
1477 void* buffer = m_dataArry;
1478
1479 OH_NN_ReturnCode ret = nnExecutor->SetOutput(m_index, buffer, length);
1480 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1481 }
1482
1483 /**
1484 * @tc.name: nnexecutortest_setoutputfrommemory_001
1485 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1486 * @tc.type: FUNC
1487 */
1488 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_001, TestSize.Level0)
1489 {
1490 LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_001");
1491 size_t m_backendID {0};
1492 std::shared_ptr<Device> m_device {nullptr};
1493 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1494 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1495 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1496 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1497 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1498
1499 void* const data = m_dataArry;
1500 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1501
1502 OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1503 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1504 }
1505
1506 /**
1507 * @tc.name: nnexecutortest_setoutputfrommemory_002
1508 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1509 * @tc.type: FUNC
1510 */
1511 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_002, TestSize.Level0)
1512 {
1513 LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_002");
1514 size_t m_backendID {0};
1515 std::shared_ptr<Device> m_device {nullptr};
1516 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1517 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1518 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1519
1520 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1521 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1522 m_outputTensorDescs.emplace_back(pair1);
1523 m_outputTensorDescs.emplace_back(pair2);
1524 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1525 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1526
1527 void* const data = m_dataArry;
1528 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1529
1530 OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1531 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1532 }
1533
1534 /**
1535 * @tc.name: nnexecutortest_setoutputfrommemory_003
1536 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1537 * @tc.type: FUNC
1538 */
1539 HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_003, TestSize.Level0)
1540 {
1541 LOGE("SetOutputFromMemory nnexecutortest_setoutputfrommemory_003");
1542 size_t m_backendID {0};
1543 std::shared_ptr<Device> m_device {nullptr};
1544 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1545 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1546 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1547
1548 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1549 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1550 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1551 int32_t expectDim[2] = {3, 3};
1552 int32_t* ptr = expectDim;
1553 uint32_t dimensionCount = 2;
1554 tensorDesr->SetShape(ptr, dimensionCount);
1555 pair1.first = tensorDesr;
1556 pair2.first = tensorDesr;
1557 m_outputTensorDescs.emplace_back(pair1);
1558 m_outputTensorDescs.emplace_back(pair2);
1559 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1560 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1561
1562 void* const data = m_dataArry;
1563 OH_NN_Memory memory = {data, 9 * sizeof(float)};
1564
1565 OH_NN_ReturnCode ret = nnExecutor->SetOutputFromMemory(m_index, memory);
1566 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1567 }
1568
1569 /**
1570 * @tc.name: nnexecutortest_createinputmemory_001
1571 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1572 * @tc.type: FUNC
1573 */
1574 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_001, TestSize.Level0)
1575 {
1576 LOGE("CreateInputMemory nnexecutortest_createinputmemory_001");
1577 size_t m_backendID {0};
1578 std::shared_ptr<Device> m_device {nullptr};
1579 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1580 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1581 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1582 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1583 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1584
1585 OH_NN_Memory** memory = nullptr;
1586 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1587 void* const data = dataArry;
1588 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1589 OH_NN_Memory* ptr = &memoryPtr;
1590 memory = &ptr;
1591 size_t length = 9 * sizeof(float);
1592
1593 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1594 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1595 }
1596
1597 /**
1598 * @tc.name: nnexecutortest_createinputmemory_002
1599 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1600 * @tc.type: FUNC
1601 */
1602 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_002, TestSize.Level0)
1603 {
1604 LOGE("CreateInputMemory nnexecutortest_createinputmemory_002");
1605 size_t m_backendID {0};
1606 std::shared_ptr<Device> m_device {nullptr};
1607 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1608 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1609 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1610
1611 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1612 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1613 m_inputTensorDescs.emplace_back(pair1);
1614 m_inputTensorDescs.emplace_back(pair2);
1615 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1616 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1617
1618 OH_NN_Memory** memory = nullptr;
1619 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1620 void* const data = dataArry;
1621 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1622 OH_NN_Memory* ptr = &memoryPtr;
1623 memory = &ptr;
1624 size_t length = 9 * sizeof(float);
1625
1626 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1627 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1628 }
1629
1630 /**
1631 * @tc.name: nnexecutortest_createinputmemory_003
1632 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1633 * @tc.type: FUNC
1634 */
1635 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_003, TestSize.Level0)
1636 {
1637 LOGE("CreateInputMemory nnexecutortest_createinputmemory_003");
1638 size_t m_backendID {0};
1639 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1640
1641 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1642 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1643 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1644
1645 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1646 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1647 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1648 int32_t expectDim[2] = {3, 3};
1649 int32_t* ptr = expectDim;
1650 uint32_t dimensionCount = 2;
1651 tensorDesr->SetShape(ptr, dimensionCount);
1652 pair1.first = tensorDesr;
1653 pair2.first = tensorDesr;
1654 m_inputTensorDescs.emplace_back(pair1);
1655 m_inputTensorDescs.emplace_back(pair2);
1656
1657 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1658 size_t length = 9 * sizeof(float);
1659 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1660 .WillRepeatedly(::testing::Return(nullptr));
1661
1662 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1663 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1664
1665 OH_NN_Memory** memory = nullptr;
1666 void* const data = dataArry;
1667 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1668 OH_NN_Memory* mPtr = &memoryPtr;
1669 memory = &mPtr;
1670
1671 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1672 EXPECT_EQ(OH_NN_MEMORY_ERROR, ret);
1673
1674 testing::Mock::AllowLeak(device.get());
1675 }
1676
1677 /**
1678 * @tc.name: nnexecutortest_createinputmemory_004
1679 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1680 * @tc.type: FUNC
1681 */
1682 HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_004, TestSize.Level0)
1683 {
1684 LOGE("CreateInputMemory nnexecutortest_createinputmemory_004");
1685 size_t m_backendID {0};
1686 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1687
1688 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1689 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1690 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1691
1692 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1693 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1694 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1695 int32_t expectDim[2] = {3, 3};
1696 int32_t* ptr = expectDim;
1697 uint32_t dimensionCount = 2;
1698 tensorDesr->SetShape(ptr, dimensionCount);
1699 pair1.first = tensorDesr;
1700 pair2.first = tensorDesr;
1701 m_inputTensorDescs.emplace_back(pair1);
1702 m_inputTensorDescs.emplace_back(pair2);
1703
1704 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1705 size_t length = 9 * sizeof(float);
1706 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1707 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1708
1709 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1710 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1711
1712 OH_NN_Memory** memory = nullptr;
1713 void* const data = dataArry;
1714 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1715 OH_NN_Memory* mPtr = &memoryPtr;
1716 memory = &mPtr;
1717
1718 OH_NN_ReturnCode ret = nnExecutor->CreateInputMemory(m_index, length, memory);
1719 EXPECT_EQ(OH_NN_SUCCESS, ret);
1720
1721 testing::Mock::AllowLeak(device.get());
1722 }
1723
1724 /**
1725 * @tc.name: nnexecutortest_destroyinputmemory_001
1726 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1727 * @tc.type: FUNC
1728 */
1729 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_001, TestSize.Level0)
1730 {
1731 LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_001");
1732 size_t m_backendID {0};
1733 std::shared_ptr<Device> m_device {nullptr};
1734 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1735 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1736 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1737 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1738 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1739
1740 size_t length = 9 * sizeof(float);
1741 OH_NN_Memory** memory = nullptr;
1742 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1743 void* const data = dataArry;
1744 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1745 OH_NN_Memory* ptr = &memoryPtr;
1746 memory = &ptr;
1747
1748 nnExecutor->CreateInputMemory(m_index, length, memory);
1749 OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1750 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1751 }
1752
1753 /**
1754 * @tc.name: nnexecutortest_destroyinputmemory_002
1755 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1756 * @tc.type: FUNC
1757 */
1758 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_002, TestSize.Level0)
1759 {
1760 LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_002");
1761 size_t m_backendID {0};
1762 std::shared_ptr<Device> m_device {nullptr};
1763 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1764 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1765 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1766
1767 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1768 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1769 m_inputTensorDescs.emplace_back(pair1);
1770 m_inputTensorDescs.emplace_back(pair2);
1771 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1772 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1773
1774 size_t length = 9 * sizeof(float);
1775 OH_NN_Memory** memory = nullptr;
1776 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1777 void* const data = dataArry;
1778 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1779 OH_NN_Memory* ptr = &memoryPtr;
1780 memory = &ptr;
1781
1782 nnExecutor->CreateInputMemory(m_index, length, memory);
1783 OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1784 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1785 }
1786
1787 /**
1788 * @tc.name: nnexecutortest_destroyinputmemory_003
1789 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1790 * @tc.type: FUNC
1791 */
1792 HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_003, TestSize.Level0)
1793 {
1794 LOGE("DestroyInputMemory nnexecutortest_destroyinputmemory_003");
1795 size_t m_backendID {0};
1796 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1797 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1798 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1799 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1800
1801 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1802 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1803 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1804 int32_t expectDim[2] = {3, 3};
1805 int32_t* ptr = expectDim;
1806 uint32_t dimensionCount = 2;
1807 tensorDesr->SetShape(ptr, dimensionCount);
1808 pair1.first = tensorDesr;
1809 pair2.first = tensorDesr;
1810 m_inputTensorDescs.emplace_back(pair1);
1811 m_inputTensorDescs.emplace_back(pair2);
1812
1813 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1814 size_t length = 9 * sizeof(float);
1815 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first))
1816 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1817 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1818 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1819
1820 OH_NN_Memory** memory = nullptr;
1821 void* const data = dataArry;
1822 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1823 OH_NN_Memory* mPtr = &memoryPtr;
1824 memory = &mPtr;
1825
1826 nnExecutor->CreateInputMemory(m_index, length, memory);
1827 OH_NN_ReturnCode ret = nnExecutor->DestroyInputMemory(m_index, memory);
1828 EXPECT_EQ(OH_NN_SUCCESS, ret);
1829
1830 testing::Mock::AllowLeak(device.get());
1831 }
1832
1833 /**
1834 * @tc.name: nnexecutortest_createoutputmemory_001
1835 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1836 * @tc.type: FUNC
1837 */
1838 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_001, TestSize.Level0)
1839 {
1840 LOGE("CreateOutputMemory nnexecutortest_createoutputmemory_001");
1841 size_t m_backendID {0};
1842 std::shared_ptr<Device> m_device {nullptr};
1843 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1844 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1845 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1846 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1847 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1848
1849 size_t length = 9 * sizeof(float);
1850 OH_NN_Memory** memory = nullptr;
1851 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1852 void* const data = dataArry;
1853 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1854 OH_NN_Memory* ptr = &memoryPtr;
1855 memory = &ptr;
1856
1857 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1858 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1859 }
1860
1861 /**
1862 * @tc.name: nnexecutortest_createoutputmemory_002
1863 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1864 * @tc.type: FUNC
1865 */
1866 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_002, TestSize.Level0)
1867 {
1868 LOGE("CreateInputMemory nnexecutortest_createoutputmemory_002");
1869 size_t m_backendID {0};
1870 std::shared_ptr<Device> m_device {nullptr};
1871 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1872 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1873 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1874
1875 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1876 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1877 m_outputTensorDescs.emplace_back(pair1);
1878 m_outputTensorDescs.emplace_back(pair2);
1879 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1880 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1881
1882 OH_NN_Memory** memory = nullptr;
1883 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1884 void* const data = dataArry;
1885 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1886 OH_NN_Memory* ptr = &memoryPtr;
1887 memory = &ptr;
1888 size_t length = 9 * sizeof(float);
1889
1890 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1891 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
1892 }
1893
1894 /**
1895 * @tc.name: nnexecutortest_createoutputmemory_003
1896 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1897 * @tc.type: FUNC
1898 */
1899 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_003, TestSize.Level0)
1900 {
1901 LOGE("CreateInputMemory nnexecutortest_createoutputmemory_003");
1902 size_t m_backendID {0};
1903 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1904
1905 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1906 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1907 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1908
1909 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1910 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1911 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1912 int32_t expectDim[2] = {3, 3};
1913 int32_t* ptr = expectDim;
1914 uint32_t dimensionCount = 2;
1915 tensorDesr->SetShape(ptr, dimensionCount);
1916 pair1.first = tensorDesr;
1917 pair2.first = tensorDesr;
1918 m_outputTensorDescs.emplace_back(pair1);
1919 m_outputTensorDescs.emplace_back(pair2);
1920
1921 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1922 size_t length = 9 * sizeof(float);
1923 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
1924 .WillRepeatedly(::testing::Return(nullptr));
1925
1926 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1927 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1928
1929 OH_NN_Memory** memory = nullptr;
1930 void* const data = dataArry;
1931 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1932 OH_NN_Memory* mPtr = &memoryPtr;
1933 memory = &mPtr;
1934
1935 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1936 EXPECT_EQ(OH_NN_MEMORY_ERROR, ret);
1937
1938 testing::Mock::AllowLeak(device.get());
1939 }
1940
1941 /**
1942 * @tc.name: nnexecutortest_createoutputmemory_004
1943 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1944 * @tc.type: FUNC
1945 */
1946 HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_004, TestSize.Level0)
1947 {
1948 LOGE("CreateInputMemory nnexecutortest_createoutputmemory_004");
1949 size_t m_backendID {0};
1950 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
1951
1952 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1953 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
1954 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
1955
1956 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
1957 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
1958 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
1959 int32_t expectDim[2] = {3, 3};
1960 int32_t* ptr = expectDim;
1961 uint32_t dimensionCount = 2;
1962 tensorDesr->SetShape(ptr, dimensionCount);
1963 pair1.first = tensorDesr;
1964 pair2.first = tensorDesr;
1965 m_outputTensorDescs.emplace_back(pair1);
1966 m_outputTensorDescs.emplace_back(pair2);
1967
1968 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1969 size_t length = 9 * sizeof(float);
1970 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
1971 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
1972
1973 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
1974 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
1975
1976 OH_NN_Memory** memory = nullptr;
1977 void* const data = dataArry;
1978 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
1979 OH_NN_Memory* mPtr = &memoryPtr;
1980 memory = &mPtr;
1981
1982 OH_NN_ReturnCode ret = nnExecutor->CreateOutputMemory(m_index, length, memory);
1983 EXPECT_EQ(OH_NN_SUCCESS, ret);
1984
1985 testing::Mock::AllowLeak(device.get());
1986 }
1987
1988 /**
1989 * @tc.name: nnexecutortest_destroyoutputmemory_001
1990 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
1991 * @tc.type: FUNC
1992 */
1993 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_001, TestSize.Level0)
1994 {
1995 LOGE("DestroyOutputMemory nnexecutortest_destroyoutputmemory_001");
1996 size_t m_backendID {0};
1997 std::shared_ptr<Device> m_device {nullptr};
1998 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
1999 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2000 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2001 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2002 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
2003
2004 size_t length = 9 * sizeof(float);
2005 OH_NN_Memory** memory = nullptr;
2006 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2007 void* const data = dataArry;
2008 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2009 OH_NN_Memory* ptr = &memoryPtr;
2010 memory = &ptr;
2011
2012 nnExecutor->CreateOutputMemory(m_index, length, memory);
2013 OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2014 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2015 }
2016
2017 /**
2018 * @tc.name: nnexecutortest_destroyoutputmemory_002
2019 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2020 * @tc.type: FUNC
2021 */
2022 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_002, TestSize.Level0)
2023 {
2024 LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_002");
2025 size_t m_backendID {0};
2026 std::shared_ptr<Device> m_device {nullptr};
2027 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2028 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2029 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2030
2031 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2032 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2033 m_outputTensorDescs.emplace_back(pair1);
2034 m_outputTensorDescs.emplace_back(pair2);
2035 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2036 m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
2037
2038 size_t length = 9 * sizeof(float);
2039 OH_NN_Memory** memory = nullptr;
2040 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2041 void* const data = dataArry;
2042 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2043 OH_NN_Memory* ptr = &memoryPtr;
2044 memory = &ptr;
2045
2046 nnExecutor->CreateOutputMemory(m_index, length, memory);
2047 OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2048 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2049 }
2050
2051 /**
2052 * @tc.name: nnexecutortest_destroyoutputmemory_003
2053 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2054 * @tc.type: FUNC
2055 */
2056 HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_003, TestSize.Level0)
2057 {
2058 LOGE("DestroyInputMemory nnexecutortest_destroyoutputmemory_003");
2059 size_t m_backendID {0};
2060 std::shared_ptr<MockIDevice> device = std::make_shared<MockIDevice>();
2061 std::shared_ptr<PreparedModel> m_preparedModel {nullptr};
2062 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2063 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2064
2065 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2066 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2067 std::shared_ptr<TensorDesc> tensorDesr = std::make_shared<TensorDesc>();
2068 int32_t expectDim[2] = {3, 3};
2069 int32_t* ptr = expectDim;
2070 uint32_t dimensionCount = 2;
2071 tensorDesr->SetShape(ptr, dimensionCount);
2072 pair1.first = tensorDesr;
2073 pair2.first = tensorDesr;
2074 m_outputTensorDescs.emplace_back(pair1);
2075 m_outputTensorDescs.emplace_back(pair2);
2076
2077 float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
2078 size_t length = 9 * sizeof(float);
2079 EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first))
2080 .WillRepeatedly(::testing::Return(reinterpret_cast<void*>(0x1000)));
2081 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2082 m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs);
2083
2084 OH_NN_Memory** memory = nullptr;
2085 void* const data = dataArry;
2086 OH_NN_Memory memoryPtr = {data, 9 * sizeof(float)};
2087 OH_NN_Memory* mPtr = &memoryPtr;
2088 memory = &mPtr;
2089
2090 nnExecutor->CreateOutputMemory(m_index, length, memory);
2091 OH_NN_ReturnCode ret = nnExecutor->DestroyOutputMemory(m_index, memory);
2092 EXPECT_EQ(OH_NN_SUCCESS, ret);
2093
2094 testing::Mock::AllowLeak(device.get());
2095 }
2096
2097 /**
2098 * @tc.name: nnexecutortest_run_001
2099 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2100 * @tc.type: FUNC
2101 */
2102 HWTEST_F(NNExecutorTest, nnexecutortest_run_001, TestSize.Level0)
2103 {
2104 LOGE("Run nnexecutortest_run_001");
2105 size_t m_backendID {0};
2106 std::shared_ptr<Device> m_device {nullptr};
2107 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2108 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2109 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2110 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2111 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2112 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2113 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2114
2115 size_t length = 9 * sizeof(float);
2116 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2117 void* buffer = m_dataArry;
2118
2119 nnExecutor->SetInput(m_index, tensor, buffer, length);
2120 nnExecutor->SetOutput(m_index, buffer, length);
2121 OH_NN_ReturnCode ret = nnExecutor->Run();
2122 EXPECT_EQ(OH_NN_SUCCESS, ret);
2123
2124 testing::Mock::AllowLeak(mockIPreparedMode.get());
2125 }
2126
2127 /**
2128 * @tc.name: nnexecutortest_run_002
2129 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2130 * @tc.type: FUNC
2131 */
2132 HWTEST_F(NNExecutorTest, nnexecutortest_run_002, TestSize.Level0)
2133 {
2134 LOGE("Run nnexecutortest_run_002");
2135 size_t m_backendID {0};
2136 std::shared_ptr<Device> m_device {nullptr};
2137 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2138 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2139 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2140 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2141 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2142
2143 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2144 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2145 m_inputTensorDescs.emplace_back(pair1);
2146 m_inputTensorDescs.emplace_back(pair2);
2147 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2148 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2149
2150 size_t length = 9 * sizeof(float);
2151 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2152 void* buffer = m_dataArry;
2153
2154 nnExecutor->SetInput(m_index, tensor, buffer, length);
2155 nnExecutor->SetOutput(m_index, buffer, length);
2156 OH_NN_ReturnCode ret = nnExecutor->Run();
2157 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2158
2159 testing::Mock::AllowLeak(mockIPreparedMode.get());
2160 }
2161
2162 /**
2163 * @tc.name: nnexecutortest_run_003
2164 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2165 * @tc.type: FUNC
2166 */
2167 HWTEST_F(NNExecutorTest, nnexecutortest_run_003, TestSize.Level0)
2168 {
2169 LOGE("Run nnexecutortest_run_003");
2170 size_t m_backendID {0};
2171 std::shared_ptr<Device> m_device {nullptr};
2172 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2173 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2174 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2175 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2176 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2177
2178 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2179 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2180 m_inputTensorDescs.emplace_back(pair1);
2181 m_inputTensorDescs.emplace_back(pair2);
2182 m_outputTensorDescs.emplace_back(pair1);
2183 m_outputTensorDescs.emplace_back(pair2);
2184 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2185 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2186
2187 size_t length = 9 * sizeof(float);
2188 OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR);
2189 void* buffer = m_dataArry;
2190
2191 nnExecutor->SetInput(m_index, tensor, buffer, length);
2192 nnExecutor->SetOutput(m_index, buffer, length);
2193 OH_NN_ReturnCode ret = nnExecutor->Run();
2194 EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
2195
2196 testing::Mock::AllowLeak(mockIPreparedMode.get());
2197 }
2198
2199 /**
2200 * @tc.name: nnexecutortest_setextensionconfig_001
2201 * @tc.desc: Verify the QuantParams function return nullptr in case of fd -1.
2202 * @tc.type: FUNC
2203 */
2204 HWTEST_F(NNExecutorTest, nnexecutortest_setextensionconfig_001, TestSize.Level0)
2205 {
2206 LOGE("SetExtensionConfig nnexecutortest_setextensionconfig_001");
2207 size_t m_backendID {0};
2208 std::shared_ptr<Device> m_device {nullptr};
2209 std::shared_ptr<MockIPreparedModel> mockIPreparedMode = std::make_shared<MockIPreparedModel>();
2210 EXPECT_CALL(*((MockIPreparedModel *) mockIPreparedMode.get()), GetInputDimRanges(::testing::_, ::testing::_))
2211 .WillRepeatedly(::testing::Return(OH_NN_FAILED));
2212 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_inputTensorDescs;
2213 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> m_outputTensorDescs;
2214
2215 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair1;
2216 std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> pair2;
2217 m_inputTensorDescs.emplace_back(pair1);
2218 m_inputTensorDescs.emplace_back(pair2);
2219 m_outputTensorDescs.emplace_back(pair1);
2220 m_outputTensorDescs.emplace_back(pair2);
2221 NNExecutor* nnExecutor = new (std::nothrow) NNExecutor(
2222 m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs);
2223
2224 std::unordered_map<std::string, std::vector<char>> configMap;
2225 std::string callingPidStr = "callingPid";
2226 std::vector<char> vecCallingPid(callingPidStr.begin(), callingPidStr.end());
2227 configMap["callingPid"] = vecCallingPid;
2228
2229 std::string hiaiModelIdStr = "hiaiModelId";
2230 std::vector<char> vechiaiModelId(hiaiModelIdStr.begin(), hiaiModelIdStr.end());
2231 configMap["hiaiModelId"] = vechiaiModelId;
2232
2233 std::string vecNeedLatencyStr = "isNeedModelLatency";
2234 std::vector<char> vecNeedLatency(vecNeedLatencyStr.begin(), vecNeedLatencyStr.end());
2235 configMap["isNeedModelLatency"] = vecNeedLatency;
2236 OH_NN_ReturnCode retSetExtensionConfig = nnExecutor->SetExtensionConfig(configMap);
2237 EXPECT_EQ(OH_NN_SUCCESS, retSetExtensionConfig);
2238
2239 ExecutorConfig* retGetExecutorConfig = nnExecutor->GetExecutorConfig();
2240 EXPECT_NE(nullptr, retGetExecutorConfig);
2241
2242 testing::Mock::AllowLeak(mockIPreparedMode.get());
2243 }
2244 } // namespace UnitTest
2245 } // namespace NeuralNetworkRuntime
2246 } // namespace OHOS