1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "end_to_end_test.h"
17 
18 #include <cmath>
19 #include <cstdio>
20 #include <filesystem>
21 #include <string>
22 #include <vector>
23 
24 #include "securec.h"
25 
26 #include "common/log.h"
27 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime.h"
28 
29 namespace fs = std::filesystem;
30 
31 namespace OHOS {
32 namespace NeuralNetworkRuntime {
33 namespace SystemTest {
34 const float INPUT_ONE = 1.23;
35 const float INPUT_TWO = 2.34;
36 const float EXPECTED_OUTPUT = 5.91;
37 const int8_t EXPECTED_QUANT_OUTPUT = 10;
38 const float EPSILON = 1e-4;
39 const uint32_t NO_DEVICE_COUNT = 0;
40 const int32_t ELEMENT_COUNT = 12;
41 const uint32_t ADDEND_DATA_LENGTH = ELEMENT_COUNT * sizeof(float);
42 const std::string CACHE_DIR = "/data/local/tmp/nnrt_st_cache";
43 const uint32_t CACHE_VERSION = 1;
44 const int REPEAT_TIMES = 100;
45 
46 // End2EndTest build a model with two connected add operations.
BuildModel(const std::vector<CppTensor> & tensors)47 OH_NN_ReturnCode End2EndTest::BuildModel(const std::vector<CppTensor>& tensors)
48 {
49     m_model = OH_NNModel_Construct();
50     if (m_model == nullptr) {
51         LOGE("End2EndTest::BuildModel failed, error happens when creating OH_NNModel.");
52         return OH_NN_MEMORY_ERROR;
53     }
54 
55     OH_NN_ReturnCode status = AddTensors(tensors);
56     if (status != OH_NN_SUCCESS) {
57         LOGE("End2EndTest::BuildModel failed, error happens when adding tensors.");
58         return status;
59     }
60 
61     status = AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3});
62     if (status != OH_NN_SUCCESS) {
63         LOGE("End2EndTest::BuildModel failed, error happends when adding first Add operation into the model.");
64         return status;
65     }
66 
67     status = AddOperation(OH_NN_OPS_ADD, {2}, {3, 1}, {4});
68     if (status != OH_NN_SUCCESS) {
69         LOGE("End2EndTest::BuildModel failed, error happends when adding second Add operation into the model.");
70         return status;
71     }
72 
73     status = SpecifyInputAndOutput({0, 1}, {4});
74     if (status != OH_NN_SUCCESS) {
75         LOGE("End2EndTest::BuildModel failed, error happends when specifying the inputs and outputs.");
76         return status;
77     }
78 
79     status = OH_NNModel_Finish(m_model);
80     if (status != OH_NN_SUCCESS) {
81         LOGE("End2EndTest::BuildModel failed, error happends during constructing the model.");
82         return status;
83     }
84 
85     return status;
86 }
87 
IsExpectedOutput(const float * outputBuffer)88 OH_NN_ReturnCode End2EndTest::IsExpectedOutput(const float* outputBuffer)
89 {
90     if (outputBuffer == nullptr) {
91         LOGE("End2EndTest::IsExpectedOutput failed, pass nullptr to outputBuffer.");
92         return OH_NN_INVALID_PARAMETER;
93     }
94 
95     for (int i = 0; i < ELEMENT_COUNT; i++) {
96         LOGI("Comparing inference output with expected value, output index: %d, output value: %f, "
97              "expected value: %f.", i, outputBuffer[i], EXPECTED_OUTPUT);
98         if (std::abs(outputBuffer[i] - EXPECTED_OUTPUT) > EPSILON) {
99             return OH_NN_FAILED;
100         }
101     }
102     return OH_NN_SUCCESS;
103 }
104 
IsExpectedOutput(const OH_NN_Memory * outputMemory)105 OH_NN_ReturnCode End2EndTest::IsExpectedOutput(const OH_NN_Memory* outputMemory)
106 {
107     if (outputMemory == nullptr) {
108         LOGE("End2EndTest::IsExpectedOutput failed, pass nullptr to outputMemory.");
109         return OH_NN_INVALID_PARAMETER;
110     }
111 
112     if (outputMemory->length == 0) {
113         LOGE("End2EndTest::IsExpectedOutput failed, outputMemory is empty.");
114         return OH_NN_FAILED;
115     }
116 
117     float* output = static_cast<float*>(const_cast<void*>(outputMemory->data));
118     return IsExpectedOutput(output);
119 }
120 
121 /*
122  * @tc.name: end_to_end_test_001
123  * @tc.desc: Test End-to-End operation of Neural Network Runtime.
124  * @tc.type: FUNC
125  */
126 HWTEST_F(End2EndTest, end_to_end_test_001, testing::ext::TestSize.Level1)
127 {
128     // Prepare tensors
129     int8_t activationValue{0};
130     CppQuantParam quantParam{{}, {}, {}};
131     CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
132     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
133     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
134     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
135     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
136     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
137 
138     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
139 
140     m_compilation = OH_NNCompilation_Construct(m_model);
141     ASSERT_NE(nullptr, m_compilation);
142     OH_NNModel_Destroy(&m_model);
143     ASSERT_EQ(nullptr, m_model);
144 
145     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
146     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
147     size_t targetDevice = m_devices[0]; // Use the first device in system test.
148     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
149     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
150 
151     m_executor = OH_NNExecutor_Construct(m_compilation);
152     ASSERT_NE(nullptr, m_executor);
153     OH_NNCompilation_Destroy(&m_compilation);
154     ASSERT_EQ(nullptr, m_compilation);
155 
156     // Set value of firstAddend
157     std::vector<float> firstAddendValue(ELEMENT_COUNT, INPUT_ONE);
158     ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
159 
160     // Set value of secondAddend
161     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
162     ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2},  (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
163 
164     // Set output buffer of output
165     float outputBuffer[ELEMENT_COUNT];
166     ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
167 
168     // Run inference and assert output value
169     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
170     ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer));
171 
172     OH_NNExecutor_Destroy(&m_executor);
173     ASSERT_EQ(nullptr, m_executor);
174 }
175 
176 /*
177  * @tc.name: end_to_end_test_002
178  * @tc.desc: Test End-to-End operation of Neural Network Runtime using OH_NN_Memory
179  * @tc.type: FUNC
180  */
181 HWTEST_F(End2EndTest, end_to_end_test_002, testing::ext::TestSize.Level1)
182 {
183     // Prepare tensors
184     int8_t activationValue{0};
185     CppQuantParam quantParam{{}, {}, {}};
186     CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
187     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
188     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
189     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
190     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
191     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
192 
193     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
194 
195     m_compilation = OH_NNCompilation_Construct(m_model);
196     ASSERT_NE(nullptr, m_compilation);
197     OH_NNModel_Destroy(&m_model);
198     ASSERT_EQ(nullptr, m_model);
199 
200     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
201     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
202     size_t targetDevice = m_devices[0]; // Use the first device in system test.
203     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
204     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
205 
206     m_executor = OH_NNExecutor_Construct(m_compilation);
207     ASSERT_NE(nullptr, m_executor);
208     OH_NNCompilation_Destroy(&m_compilation);
209     ASSERT_EQ(nullptr, m_compilation);
210 
211     // Set value of firstAddend
212     std::vector<float> firstAddendValue(ELEMENT_COUNT, INPUT_ONE);
213     OH_NN_Memory* firstAddendMemory;
214     ASSERT_EQ(OH_NN_SUCCESS,
215         SetInputFromMemory(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH, &firstAddendMemory));
216 
217     // Set value of secondAddend
218     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
219     OH_NN_Memory* secondAddendMemory;
220     ASSERT_EQ(OH_NN_SUCCESS,
221         SetInputFromMemory(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH, &secondAddendMemory));
222 
223     // Set output buffer of output
224     OH_NN_Memory* outputMemory;
225     ASSERT_EQ(OH_NN_SUCCESS, SetOutputFromMemory(0, ADDEND_DATA_LENGTH, &outputMemory));
226 
227     // Run inference and assert output value
228     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
229     ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputMemory));
230 
231     OH_NNExecutor_DestroyInputMemory(m_executor, 0, &firstAddendMemory);
232     ASSERT_EQ(nullptr, firstAddendMemory);
233     OH_NNExecutor_DestroyInputMemory(m_executor, 1, &secondAddendMemory);
234     ASSERT_EQ(nullptr, secondAddendMemory);
235     OH_NNExecutor_DestroyOutputMemory(m_executor, 0, &outputMemory);
236     ASSERT_EQ(nullptr, outputMemory);
237 
238     OH_NNExecutor_Destroy(&m_executor);
239     ASSERT_EQ(nullptr, m_executor);
240 }
241 
242 /*
243  * @tc.name: end_to_end_test_003
244  * @tc.desc: Test End-to-End operation of Neural Network Runtime with dynamic inputs.
245  * @tc.type: FUNC
246  */
247 HWTEST_F(End2EndTest, end_to_end_test_003, testing::ext::TestSize.Level1)
248 {
249     // Prepare tensors
250     int8_t activationValue{0};
251     CppQuantParam quantParam{{}, {}, {}};
252     std::vector<float> value(ELEMENT_COUNT, INPUT_ONE);
253     CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, (void*)value.data(), ADDEND_DATA_LENGTH, quantParam, OH_NN_TENSOR};
254     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
255     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
256     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
257     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
258     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
259 
260     m_model = OH_NNModel_Construct();
261     ASSERT_NE(nullptr, m_model);
262     ASSERT_EQ(OH_NN_SUCCESS, AddTensors(tensors));
263     ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {0, 1}, {3}));
264     ASSERT_EQ(OH_NN_SUCCESS, AddOperation(OH_NN_OPS_ADD, {2}, {3, 1}, {4}));
265     ASSERT_EQ(OH_NN_SUCCESS, SpecifyInputAndOutput({1}, {4}));
266     ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(m_model));
267 
268     m_compilation = OH_NNCompilation_Construct(m_model);
269     ASSERT_NE(nullptr, m_compilation);
270     OH_NNModel_Destroy(&m_model);
271     ASSERT_EQ(nullptr, m_model);
272 
273     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
274     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
275     size_t targetDevice = m_devices[0]; // Use the first device in system test.
276     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
277     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
278 
279     m_executor = OH_NNExecutor_Construct(m_compilation);
280     ASSERT_NE(nullptr, m_executor);
281     OH_NNCompilation_Destroy(&m_compilation);
282     ASSERT_EQ(nullptr, m_compilation);
283 
284     // Set value of secondAddend
285     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
286     ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2},  (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
287 
288     // Set output buffer of output
289     float outputBuffer[ELEMENT_COUNT];
290     ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
291 
292     // Run inference and assert output value
293     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
294     ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer));
295 
296     OH_NNExecutor_Destroy(&m_executor);
297     ASSERT_EQ(nullptr, m_executor);
298 }
299 
300 /*
301  * @tc.name: end_to_end_test_004
302  * @tc.desc: Test End-to-End operation of Neural Network Runtime.
303  * @tc.type: FUNC
304  */
305 HWTEST_F(End2EndTest, end_to_end_test_004, testing::ext::TestSize.Level1)
306 {
307     // Prepare tensors
308     int8_t activationValue{0};
309     CppQuantParam quantParam{{}, {}, {}};
310     CppTensor addend1{OH_NN_FLOAT32, {-1, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
311     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
312     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
313     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
314     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
315     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
316 
317     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
318 
319     m_compilation = OH_NNCompilation_Construct(m_model);
320     ASSERT_NE(nullptr, m_compilation);
321     OH_NNModel_Destroy(&m_model);
322     ASSERT_EQ(nullptr, m_model);
323 
324     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
325     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
326     size_t targetDevice = m_devices[0]; // Use the first device in system test.
327     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
328     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
329 
330     m_executor = OH_NNExecutor_Construct(m_compilation);
331     ASSERT_NE(nullptr, m_executor);
332     OH_NNCompilation_Destroy(&m_compilation);
333     ASSERT_EQ(nullptr, m_compilation);
334 
335     // Set value of firstAddend
336     std::vector<float> firstAddendValue(ELEMENT_COUNT, INPUT_ONE);
337     ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
338 
339     // Set value of secondAddend
340     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
341     ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2},  (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
342 
343     // Set output buffer of output
344     float outputBuffer[ELEMENT_COUNT];
345     ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
346 
347     // Run inference and assert output value
348     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
349     ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer));
350 
351     OH_NNExecutor_Destroy(&m_executor);
352     ASSERT_EQ(nullptr, m_executor);
353 }
354 
355 /*
356  * @tc.name: end_to_end_test_005
357  * @tc.desc: Test End-to-End execution with cache setting and loading.
358  * @tc.type: FUNC
359  */
360 HWTEST_F(End2EndTest, end_to_end_test_005, testing::ext::TestSize.Level1)
361 {
362     // Prepare tensors
363     int8_t activationValue{0};
364     CppQuantParam quantParam{{}, {}, {}};
365     CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
366     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
367     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
368     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
369     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
370     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
371     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
372 
373     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
374     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
375     size_t targetDevice = m_devices[0]; // Use the first device in system test.
376 
377     // Used to export cache.
378     OH_NNCompilation* compilationCacheExporter = OH_NNCompilation_Construct(m_model);
379     ASSERT_NE(nullptr, compilationCacheExporter);
380 
381     const fs::path cachePath{CACHE_DIR};
382     ASSERT_EQ(false, fs::exists(cachePath));
383     ASSERT_EQ(true, fs::create_directory(cachePath));
384 
385     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(compilationCacheExporter, targetDevice));
386     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(compilationCacheExporter, CACHE_DIR.c_str(), CACHE_VERSION));
387     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(compilationCacheExporter));
388     ASSERT_EQ(false, fs::is_empty(cachePath));
389     OH_NNCompilation_Destroy(&compilationCacheExporter);
390     ASSERT_EQ(nullptr, compilationCacheExporter);
391 
392     // This compilation loads cache.
393     m_compilation = OH_NNCompilation_Construct(m_model);
394     ASSERT_NE(nullptr, m_compilation);
395     OH_NNModel_Destroy(&m_model);
396     ASSERT_EQ(nullptr, m_model);
397 
398     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
399     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetCache(m_compilation, CACHE_DIR.c_str(), CACHE_VERSION));
400     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
401 
402     m_executor = OH_NNExecutor_Construct(m_compilation);
403     ASSERT_NE(nullptr, m_executor);
404     OH_NNCompilation_Destroy(&m_compilation);
405     ASSERT_EQ(nullptr, m_compilation);
406 
407     // Set value of firstAddend
408     std::vector<float> firstAddendValue(ELEMENT_COUNT, INPUT_ONE);
409     ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
410 
411     // Set value of secondAddend
412     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
413     ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2},  (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
414 
415     // Set output buffer of output
416     float outputBuffer[ELEMENT_COUNT];
417     ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
418 
419     // Run inference and assert output value
420     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
421     ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer));
422 
423     OH_NNExecutor_Destroy(&m_executor);
424     ASSERT_EQ(nullptr, m_executor);
425 
426     // If cache directory and files and delete, remove_all() should return a value larger than 1.
427     // The actual value depends on the implementation of NNRt service.
428     ASSERT_GT(fs::remove_all(cachePath), (std::uintmax_t)1);
429 }
430 
431 /*
432  * @tc.name: end_to_end_test_006
433  * @tc.desc: Test End-to-End execution mixing SetInput and SetInputFromMemory functions.
434  * @tc.type: FUNC
435  */
436 HWTEST_F(End2EndTest, end_to_end_test_006, testing::ext::TestSize.Level1)
437 {
438     // Prepare tensors
439     int8_t activationValue{0};
440     CppQuantParam quantParam{{}, {}, {}};
441     CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
442     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
443     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
444     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
445     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
446     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
447 
448     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
449 
450     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
451     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
452     size_t targetDevice = m_devices[0]; // Use the first device in system test.
453 
454     // This compilation loads cache.
455     m_compilation = OH_NNCompilation_Construct(m_model);
456     ASSERT_NE(nullptr, m_compilation);
457     OH_NNModel_Destroy(&m_model);
458     ASSERT_EQ(nullptr, m_model);
459 
460     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
461     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
462 
463     m_executor = OH_NNExecutor_Construct(m_compilation);
464     ASSERT_NE(nullptr, m_executor);
465     OH_NNCompilation_Destroy(&m_compilation);
466     ASSERT_EQ(nullptr, m_compilation);
467 
468     // Set value of firstAddend
469     std::vector<float> firstAddendValue(ELEMENT_COUNT, INPUT_ONE);
470     ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
471 
472     // Set value of secondAddend
473     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
474     OH_NN_Memory* secondAddendMemory;
475     ASSERT_EQ(OH_NN_SUCCESS,
476         SetInputFromMemory(1, {3, 2, 2}, (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH, &secondAddendMemory));
477 
478     // Set output buffer of output
479     OH_NN_Memory* outputMemory;
480     ASSERT_EQ(OH_NN_SUCCESS, SetOutputFromMemory(0, ADDEND_DATA_LENGTH, &outputMemory));
481 
482     // Run inference and assert output value
483     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
484     ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputMemory));
485 
486     OH_NNExecutor_DestroyInputMemory(m_executor, 1, &secondAddendMemory);
487     ASSERT_EQ(nullptr, secondAddendMemory);
488     OH_NNExecutor_DestroyOutputMemory(m_executor, 0, &outputMemory);
489     ASSERT_EQ(nullptr, outputMemory);
490 
491     OH_NNExecutor_Destroy(&m_executor);
492     ASSERT_EQ(nullptr, m_executor);
493 }
494 
495 /*
496  * @tc.name: end_to_end_test_007
497  * @tc.desc: Test End-to-End operation of Neural Network Runtime with quantization.
498  * @tc.type: FUNC
499  */
500 HWTEST_F(End2EndTest, end_to_end_test_007, testing::ext::TestSize.Level1)
501 {
502     // Prepare tensors
503     int8_t activationValue{0};
504     CppQuantParam quantParam{{}, {}, {}};
505     CppQuantParam quantParam1{{8}, {0.2}, {0}};
506     CppQuantParam quantParam2{{8}, {0.4}, {0}};
507     CppTensor addend1{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR};
508     CppTensor addend2{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR};
509     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
510     CppTensor immediateTensor{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam1, OH_NN_TENSOR};
511     CppTensor output{OH_NN_INT8, {3, 2, 2}, nullptr, 0, quantParam2, OH_NN_TENSOR};
512     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
513 
514     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
515 
516     m_compilation = OH_NNCompilation_Construct(m_model);
517     ASSERT_NE(nullptr, m_compilation);
518     OH_NNModel_Destroy(&m_model);
519     ASSERT_EQ(nullptr, m_model);
520 
521     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
522     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
523     size_t targetDevice = m_devices[0]; // Use the first device in system test.
524     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
525     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
526 
527     m_executor = OH_NNExecutor_Construct(m_compilation);
528     ASSERT_NE(nullptr, m_executor);
529     OH_NNCompilation_Destroy(&m_compilation);
530     ASSERT_EQ(nullptr, m_compilation);
531 
532     // Set value of firstAddend
533     std::vector<int8_t> firstAddendValue(ELEMENT_COUNT, 4);
534     ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
535 
536     // Set value of secondAddend
537     std::vector<int8_t> secondAddendValue(ELEMENT_COUNT, 8);
538     ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2},  (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
539 
540     // Set output buffer of output
541     int8_t outputBuffer[ELEMENT_COUNT];
542     ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
543 
544     // Run inference and assert output value
545     ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
546     for (int i = 0; i < ELEMENT_COUNT; i++) {
547         printf("Comparing output with expected value, output index: %d, output value: %d, expected value: %d.",
548              i, static_cast<int>(outputBuffer[i]), static_cast<int>(EXPECTED_QUANT_OUTPUT));
549         ASSERT_EQ(outputBuffer[i], EXPECTED_QUANT_OUTPUT);
550     }
551 
552     OH_NNExecutor_Destroy(&m_executor);
553     ASSERT_EQ(nullptr, m_executor);
554 }
555 
556 /*
557  * @tc.name: end_to_end_test_008
558  * @tc.desc: Test End-to-End operation of Neural Network Runtime by calling OH_NNExecutor_Run multiple times.
559  * @tc.type: FUNC
560  */
561 HWTEST_F(End2EndTest, end_to_end_test_008, testing::ext::TestSize.Level1)
562 {
563     // Prepare tensors
564     int8_t activationValue{0};
565     CppQuantParam quantParam{{}, {}, {}};
566     CppTensor addend1{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
567     CppTensor addend2{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
568     CppTensor activation{OH_NN_INT8, {}, (void*)(&activationValue), 1, quantParam, OH_NN_ADD_ACTIVATIONTYPE};
569     CppTensor immediateTensor{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
570     CppTensor output{OH_NN_FLOAT32, {3, 2, 2}, nullptr, 0, quantParam, OH_NN_TENSOR};
571     std::vector<CppTensor> tensors{addend1, addend2, activation, immediateTensor, output};
572 
573     ASSERT_EQ(OH_NN_SUCCESS, BuildModel(tensors));
574 
575     m_compilation = OH_NNCompilation_Construct(m_model);
576     ASSERT_NE(nullptr, m_compilation);
577     OH_NNModel_Destroy(&m_model);
578     ASSERT_EQ(nullptr, m_model);
579 
580     ASSERT_EQ(OH_NN_SUCCESS, GetDevices());
581     ASSERT_GT(m_devices.size(), NO_DEVICE_COUNT); // Expect available accelerator.
582     size_t targetDevice = m_devices[0]; // Use the first device in system test.
583     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_SetDevice(m_compilation, targetDevice));
584     ASSERT_EQ(OH_NN_SUCCESS, OH_NNCompilation_Build(m_compilation));
585 
586     m_executor = OH_NNExecutor_Construct(m_compilation);
587     ASSERT_NE(nullptr, m_executor);
588     OH_NNCompilation_Destroy(&m_compilation);
589     ASSERT_EQ(nullptr, m_compilation);
590 
591     std::vector<float> firstAddendValue(ELEMENT_COUNT, INPUT_ONE);
592     std::vector<float> secondAddendValue(ELEMENT_COUNT, INPUT_TWO);
593     float outputBuffer[ELEMENT_COUNT];
594 
595     // Test inference multiple times.
596     for (int i = 0; i < REPEAT_TIMES; i++) {
597 
598         // Set value of firstAddend
599         ASSERT_EQ(OH_NN_SUCCESS, SetInput(0, {3, 2, 2}, (void*)firstAddendValue.data(), ADDEND_DATA_LENGTH));
600 
601         // Set value of secondAddend
602         ASSERT_EQ(OH_NN_SUCCESS, SetInput(1, {3, 2, 2},  (void*)secondAddendValue.data(), ADDEND_DATA_LENGTH));
603 
604         // Set output buffer of output
605         ASSERT_EQ(OH_NN_SUCCESS, SetOutput(0, (void*)outputBuffer, ADDEND_DATA_LENGTH));
606 
607         // Run inference and assert output value
608         ASSERT_EQ(OH_NN_SUCCESS, OH_NNExecutor_Run(m_executor));
609         ASSERT_EQ(OH_NN_SUCCESS, IsExpectedOutput(outputBuffer));
610     }
611 
612     OH_NNExecutor_Destroy(&m_executor);
613     ASSERT_EQ(nullptr, m_executor);
614 }
615 } // namespace SystemTest
616 } // NeuralNetworkRuntime
617 } // OHOS