1 /*
2  * Copyright (c) 2022-2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @addtogroup NeuralNeworkRuntime
18  * @{
19  *
20  * @brief Provides APIs of Neural Network Runtime for accelerating the model inference.
21  *
22  * @since 9
23  * @version 2.0
24  */
25 
26 /**
27  * @file neural_network_core.h
28  *
29  * @brief Defines the Neural Network Core APIs. The AI inference framework uses the Native APIs provided by
30  *        Neural Network Core to compile models and perform inference and computing on acceleration hardware.
31  *
32  * Note: Currently, the APIs of Neural Network Core do not support multi-thread calling. \n
33  *
34  * include "neural_network_runtime/neural_network_core.h"
35  * @library libneural_network_core.so
36  * @kit Neural Network Runtime Kit
37  * @Syscap SystemCapability.Ai.NeuralNetworkRuntime
38  * @since 11
39  * @version 1.0
40  */
41 
42 #ifndef NEURAL_NETWORK_CORE_H
43 #define NEURAL_NETWORK_CORE_H
44 
45 #include "neural_network_runtime_type.h"
46 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
51 /**
52  * @brief Creates a compilation instance of the {@link OH_NNCompilation} type.
53  *
54  * After the OH_NNModel module completes model construction, APIs provided by the OH_NNCompilation module pass the
55  * model to underlying device for compilation. This method creates a {@link OH_NNCompilation} instance
56  * based on the passed {@link OH_NNModel} instance. The {@link OH_NNCompilation_SetDevice} method is called
57  * to set the device to compile on, and {@link OH_NNCompilation_Build} is then called to complete compilation.\n
58  *
59  * In addition to computing device selection, the OH_NNCompilation module supports features such as model caching,
60  * performance preference, priority setting, and float16 computing, which can be implemented by the following methods:\n
61  * {@link OH_NNCompilation_SetCache}\n
62  * {@link OH_NNCompilation_SetPerformanceMode}\n
63  * {@link OH_NNCompilation_SetPriority}\n
64  * {@link OH_NNCompilation_EnableFloat16}\n
65  *
66  * After {@link OH_NNCompilation_Build} is called, the {@link OH_NNModel} instance can be released.\n
67  *
68  * @param model Pointer to the {@link OH_NNModel} instance.
69  * @return Pointer to a {@link OH_NNCompilation} instance, or NULL if it fails to create.
70  * @since 9
71  * @version 1.0
72  */
73 OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model);
74 
75 /**
76  * @brief Creates a compilation instance based on an offline model file.
77  *
78  * This method conflicts with the way of passing an online built model or an offline model file buffer,
79  * and you have to choose only one of the three construction methods. \n
80  *
81  * Offline model is a type of model that is offline compiled by the model converter provided by a device vendor.
82  * So that the offline model can only be used on the specified device,
83  * but the compilation time of offline model is usually much less than {@link OH_NNModel}. \n
84  *
85  * You should perform the offline compilation during your development
86  * and deploy the offline model in your app package. \n
87  *
88  * @param modelPath Offline model file path.
89  * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create.
90  * @since 11
91  * @version 1.0
92  */
93 OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelFile(const char *modelPath);
94 
95 /**
96  * @brief Creates a compilation instance based on an offline model file buffer.
97  *
98  * This method conflicts with the way of passing an online built model or an offline model file path,
99  * and you have to choose only one of the three construction methods. \n
100  *
101  * Note that the returned {@link OH_NNCompilation} instance only saves the
102  * <b>modelBuffer</b> pointer inside, instead of copying its data.
103  * You should not release <b>modelBuffer</b> before the {@link OH_NNCompilation} instance is destroied. \n
104  *
105  * @param modelBuffer Offline model file buffer.
106  * @param modelSize Offfline model buffer size.
107  * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create.
108  * @since 11
109  * @version 1.0
110  */
111 OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelBuffer(const void *modelBuffer, size_t modelSize);
112 
113 /**
114  * @brief Creates a empty compilation instance for restoration from cache later.
115  *
116  * See {@link OH_NNCompilation_SetCache} for the description of cache.\n
117  *
118  * The restoration time from the cache is less than compilation with {@link OH_NNModel}.\n
119  *
120  * You should call {@link OH_NNCompilation_SetCache} or {@link OH_NNCompilation_ImportCacheFromBuffer} first,
121  * and then call {@link OH_NNCompilation_Build} to complete the restoration.\n
122  *
123  * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create.
124  * @since 11
125  * @version 1.0
126  */
127 OH_NNCompilation *OH_NNCompilation_ConstructForCache();
128 
129 /**
130  * @brief Exports the cache to a given buffer.
131  *
132  * See {@link OH_NNCompilation_SetCache} for the description of cache.\n
133  *
134  * Note that the cache is the result of compilation building {@link OH_NNCompilation_Build},
135  * so that this method must be called after {@link OH_NNCompilation_Build}.\n
136  *
137  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
138  * @param buffer Pointer to the given buffer.
139  * @param length Buffer length.
140  * @param modelSize Byte size of the model cache.
141  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
142  *         If the operation fails, an error code is returned.
143  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
144  * @since 11
145  * @version 1.0
146  */
147 OH_NN_ReturnCode OH_NNCompilation_ExportCacheToBuffer(OH_NNCompilation *compilation,
148                                                       const void *buffer,
149                                                       size_t length,
150                                                       size_t *modelSize);
151 
152 /**
153  * @brief Imports the cache from a given buffer.
154  *
155  * See {@link OH_NNCompilation_SetCache} for the description of cache.\n
156  *
157  * {@link OH_NNCompilation_Build} should be called to complete the restoration after
158  * {@link OH_NNCompilation_ImportCacheFromBuffer} is called.\n
159  *
160  * Note that <b>compilation</b> only saves the <b>buffer</b> pointer inside, instead of copying its data. You should not
161  * release <b>buffer</b> before <b>compilation</b> is destroied.\n
162  *
163  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
164  * @param buffer Pointer to the given buffer.
165  * @param modelSize Byte size of the model cache.
166  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
167  *         If the operation fails, an error code is returned.
168  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
169  * @since 11
170  * @version 1.0
171  */
172 OH_NN_ReturnCode OH_NNCompilation_ImportCacheFromBuffer(OH_NNCompilation *compilation,
173                                                         const void *buffer,
174                                                         size_t modelSize);
175 
176 /**
177  * @brief Adds an extension config for a custom hardware attribute.
178  *
179  * Some devices have their own specific attributes which have not been opened in NNRt.
180  * This method provides an additional way for you to set these custom hardware attributes of the device.
181  * You should query their names and values from the device vendor's documents,
182  * and add them into compilation instance one by one. These attributes will be passed directly to device driver,
183  * and this method will return error code if the driver cannot parse them. \n
184  *
185  * After {@link OH_NNCompilation_Build} is called, the <b>configName</b> and <b>configValue</b> can be released. \n
186  *
187  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
188  * @param configName Config name.
189  * @param configValue A byte buffer saving the config value.
190  * @param configValueSize Byte size of the config value.
191  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
192  *         If the operation fails, an error code is returned.
193  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
194  * @since 11
195  * @version 1.0
196  */
197 OH_NN_ReturnCode OH_NNCompilation_AddExtensionConfig(OH_NNCompilation *compilation,
198                                                      const char *configName,
199                                                      const void *configValue,
200                                                      const size_t configValueSize);
201 
202 /**
203  * @brief Specifies the device for model compilation and computing.
204  *
205  * In the compilation phase, you need to specify the device for model compilation and computing.
206  * Call {@link OH_NNDevice_GetAllDevicesID} to obtain available device IDs.
207  * Call {@link OH_NNDevice_GetType} and {@link OH_NNDevice_GetName} to obtain device information
208  * and pass target device ID to this method for setting. \n
209  *
210  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
211  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
212  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
213  *         If the operation fails, an error code is returned.
214  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
215  * @since 9
216  * @version 1.0
217  */
218 OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID);
219 
220 /**
221  * @brief Set the cache directory and version of the compiled model.
222  *
223  * On the device that supports caching, a model can be saved as a cache file after being compiled on the device driver.
224  * The model can be directly read from the cache file in the next compilation, saving recompilation time.
225  * This method performs different operations based on the passed cache directory and version: \n
226  *
227  * - No file exists in the cache directory:
228  * Caches the compiled model to the directory and sets the cache version to <b>version</b>. \n
229  *
230  * - A complete cache file exists in the cache directory, and its version is <b>version</b>:
231  * Reads the cache file in the path and passes the data to the underlying
232  * device for conversion into executable model instances. \n
233  *
234  * - A complete cache file exists in the cache directory, and its version is earlier than <b>version</b>:
235  * When model compilation is complete on the underlying device,
236  * overwrites the cache file and changes the version number to <b>version</b>. \n
237  *
238  * - A complete cache file exists in the cache directory, and its version is later than <b>version</b>:
239  * Returns the {@link OH_NN_INVALID_PARAMETER} error code without reading the cache file. \n
240  *
241  * - The cache file in the cache directory is incomplete or you do not have the permission to access the cache file.
242  * Returns the {@link OH_NN_INVALID_FILE} error code. \n
243  *
244  * - The cache directory does not exist or you do not have the access permission.
245  * Returns the {@link OH_NN_INVALID_PATH} error code. \n
246  *
247  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
248  * @param cachePath Directory for storing model cache files. This method creates directories for different devices in
249  *                  the <b>cachePath</b> directory. You are advised to use a separate cache directory for each model.
250  * @param version Cache version.
251  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
252  *         If the operation fails, an error code is returned.
253  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
254  * @since 9
255  * @version 1.0
256  */
257 OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, const char *cachePath, uint32_t version);
258 
259 /**
260  * @brief Sets the performance mode for model computing.
261  *
262  * Allows you to set the performance mode for model computing to meet the requirements of low power consumption
263  * and ultimate performance. If this method is not called to set the performance mode in the compilation phase,
264  * the compilation instance assigns, the {@link OH_NN_PERFORMANCE_NONE} mode for the model by default.
265  * In this case, the device performs computing in the default performance mode. \n
266  *
267  * If this method is called on the device that does not support the setting of the performance mode,
268  * the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
269  *
270  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
271  * @param performanceMode Performance mode. For details about the available performance modes,
272  *                        see {@link OH_NN_PerformanceMode}.
273  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
274  *         If the operation fails, an error code is returned.
275  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
276  * @since 9
277  * @version 1.0
278  */
279 OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilation,
280                                                      OH_NN_PerformanceMode performanceMode);
281 
282 /**
283  * @brief Sets the model computing priority.
284  *
285  * Allows you to set computing priorities for models.
286  * The priorities apply only to models created by the process with the same UID.
287  * The settings will not affect models created by processes with different UIDs on different devices. \n
288  *
289  * If this method is called on the device that does not support the priority setting,
290  * the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
291  *
292  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
293  * @param priority Priority. For details about the optional priorities, see {@link OH_NN_Priority}.
294  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
295  *         If the operation fails, an error code is returned.
296  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
297  * @since 9
298  * @version 1.0
299  */
300 OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, OH_NN_Priority priority);
301 
302 /**
303  * @brief Enables float16 for computing.
304  *
305  * Float32 is used by default for the model of float type. If this method is called on a device that supports float16,
306  * float16 will be used for computing the float32 model to reduce memory usage and execution time. \n
307  *
308  * This option is useless for the model of int type, e.g. int8 type. \n
309  *
310  * If this method is called on the device that does not support float16,
311  * the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
312  *
313  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
314  * @param enableFloat16 Indicates whether to enable float16. If this parameter is
315  *                      set to <b>true</b>, float16 inference is performed.
316  *                      If this parameter is set to <b>false</b>, float32 inference is performed.
317  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
318  *         If the operation fails, an error code is returned.
319  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
320  * @since 9
321  * @version 1.0
322  */
323 OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16);
324 
325 /**
326  * @brief Compiles a model.
327  *
328  * After the compilation configuration is complete, call this method to return the compilation result.
329  * The compilation instance pushes the model and compilation options to the device for compilation.
330  * After this method is called, additional compilation operations cannot be performed. \n
331  *
332  * If the {@link OH_NNCompilation_SetDevice}, {@link OH_NNCompilation_SetCache},
333  * {@link OH_NNCompilation_SetPerformanceMode}, {@link OH_NNCompilation_SetPriority}, and
334  * {@link OH_NNCompilation_EnableFloat16} methods are called, {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n
335  *
336  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
337  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
338  *         If the operation fails, an error code is returned.
339  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
340  * @since 9
341  * @version 1.0
342  */
343 OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation);
344 
345 /**
346  * @brief Releases the <b>Compilation</b> object.
347  *
348  * This method needs to be called to release the compilation instance created by {@link OH_NNCompilation_Construct},
349  * {@link OH_NNCompilation_ConstructWithOfflineModelFile}, {@link OH_NNCompilation_ConstructWithOfflineModelBuffer} and
350  * {@link OH_NNCompilation_ConstructForCache}. Otherwise, the memory leak will occur. \n
351  *
352  * If <b>compilation</b> or <b>*compilation</b> is a null pointer,
353  * this method only prints warning logs and does not execute the release. \n
354  *
355  * @param compilation Double pointer to the {@link OH_NNCompilation} instance.
356  *                    After a compilation instance is destroyed,
357  *                    this method sets <b>*compilation</b> to a null pointer.
358  * @since 9
359  * @version 1.0
360  */
361 void OH_NNCompilation_Destroy(OH_NNCompilation **compilation);
362 
363 
364 /**
365  * @brief Creates an {@link NN_TensorDesc} instance.
366  *
367  * The {@link NN_TensorDesc} describes various tensor attributes, such as name/data type/shape/format, etc.\n
368  *
369  * The following methods can be called to create a {@link NN_Tensor} instance based on the passed {@link NN_TensorDesc}
370  * instance:\n
371  * {@link OH_NNTensor_Create}\n
372  * {@link OH_NNTensor_CreateWithSize}\n
373  * {@link OH_NNTensor_CreateWithFd}\n
374  *
375  * Note that these methods will copy the {@link NN_TensorDesc} instance into {@link NN_Tensor}. Therefore you can create
376  * multiple {@link NN_Tensor} instances with the same {@link NN_TensorDesc} instance. And you should destroy the
377  * {@link NN_TensorDesc} instance by {@link OH_NNTensorDesc_Destroy} when it is no longer used.\n
378  *
379  * @return Pointer to a {@link NN_TensorDesc} instance, or NULL if it fails to create.
380  * @since 11
381  * @version 1.0
382  */
383 NN_TensorDesc *OH_NNTensorDesc_Create();
384 
385 /**
386  * @brief Releases an {@link NN_TensorDesc} instance.
387  *
388  * When the {@link NN_TensorDesc} instance is no longer used, this method needs to be called to release it.
389  * Otherwise, the memory leak will occur. \n
390  *
391  * If <b>tensorDesc</b> or <b>*tensorDesc</b> is a null pointer,
392  * this method will return error code and does not execute the release. \n
393  *
394  * @param tensorDesc Double pointer to the {@link NN_TensorDesc} instance.
395  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
396  *         If the operation fails, an error code is returned.
397  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
398  * @since 11
399  * @version 1.0
400  */
401 OH_NN_ReturnCode OH_NNTensorDesc_Destroy(NN_TensorDesc **tensorDesc);
402 
403 /**
404  * @brief Sets the name of a {@link NN_TensorDesc}.
405  *
406  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor name.
407  * The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>.\n
408  *
409  * if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code.\n
410  *
411  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
412  * @param name The name of the tensor that needs to be set.
413  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
414  *         If the operation fails, an error code is returned.
415  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
416  * @since 11
417  * @version 1.0
418  */
419 OH_NN_ReturnCode OH_NNTensorDesc_SetName(NN_TensorDesc *tensorDesc, const char *name);
420 
421 /**
422  * @brief Gets the name of a {@link NN_TensorDesc}.
423  *
424  * Call this method to obtain the name of the specified {@link NN_TensorDesc} instance.
425  * The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>.\n
426  *
427  * if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code.
428  * As an output parameter, <b>*name</b> must be a null pointer, otherwise the method will return an error code.
429  * Fou example, you should define char* tensorName = NULL, and pass &tensorName as the argument of <b>name</b>.\n
430  *
431  * You do not need to release the memory of <b>name</b>. It will be released when <b>tensorDesc</b> is destroied.\n
432  *
433  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
434  * @param name The retured name of the tensor.
435  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
436  *         If the operation fails, an error code is returned.
437  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
438  * @since 11
439  * @version 1.0
440  */
441 OH_NN_ReturnCode OH_NNTensorDesc_GetName(const NN_TensorDesc *tensorDesc, const char **name);
442 
443 /**
444  * @brief Sets the data type of a {@link NN_TensorDesc}.
445  *
446  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor data type. \n
447  *
448  * if <b>tensorDesc</b> is a null pointer, this method will return error code. \n
449  *
450  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
451  * @param dataType The data type of the tensor that needs to be set.
452  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
453  *         If the operation fails, an error code is returned.
454  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
455  * @since 11
456  * @version 1.0
457  */
458 OH_NN_ReturnCode OH_NNTensorDesc_SetDataType(NN_TensorDesc *tensorDesc, OH_NN_DataType dataType);
459 
460 /**
461  * @brief Gets the data type of a {@link NN_TensorDesc}.
462  *
463  * Call this method to obtain the data type of the specified {@link NN_TensorDesc} instance. \n
464  *
465  * if <b>tensorDesc</b> or <b>dataType</b> is a null pointer, this method will return error code. \n
466  *
467  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
468  * @param dataType The returned data type of the tensor.
469  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
470  *         If the operation fails, an error code is returned.
471  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
472  * @since 11
473  * @version 1.0
474  */
475 OH_NN_ReturnCode OH_NNTensorDesc_GetDataType(const NN_TensorDesc *tensorDesc, OH_NN_DataType *dataType);
476 
477 /**
478  * @brief Sets the shape of a {@link NN_TensorDesc}.
479  *
480  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor shape. \n
481  *
482  * if <b>tensorDesc</b> or <b>shape</b> is a null pointer, or <b>shapeLength</b> is 0,
483  * this method will return error code. \n
484  *
485  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
486  * @param shape The shape list of the tensor that needs to be set.
487  * @param shapeLength The length of the shape list that needs to be set.
488  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
489  *         If the operation fails, an error code is returned.
490  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
491  * @since 11
492  * @version 1.0
493  */
494 OH_NN_ReturnCode OH_NNTensorDesc_SetShape(NN_TensorDesc *tensorDesc, const int32_t *shape, size_t shapeLength);
495 
496 /**
497  * @brief Gets the shape of a {@link NN_TensorDesc}.
498  *
499  * Call this method to obtain the shape of the specified {@link NN_TensorDesc} instance. \n
500  *
501  * if <b>tensorDesc</b>, <b>shape</b> or <b>shapeLength</b> is a null pointer, this method will return error code.
502  * As an output parameter, <b>*shape</b> must be a null pointer, otherwise the method will return an error code.
503  * Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of <b>shape</b>. \n
504  *
505  * You do not need to release the memory of <b>shape</b>. It will be released when <b>tensorDesc</b> is destroied. \n
506  *
507  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
508  * @param shape Return the shape list of the tensor.
509  * @param shapeLength The returned length of the shape list.
510  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
511  *         If the operation fails, an error code is returned.
512  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
513  * @since 11
514  * @version 1.0
515  */
516 OH_NN_ReturnCode OH_NNTensorDesc_GetShape(const NN_TensorDesc *tensorDesc, int32_t **shape, size_t *shapeLength);
517 
518 /**
519  * @brief Sets the format of a {@link NN_TensorDesc}.
520  *
521  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor format. \n
522  *
523  * if <b>tensorDesc</b> is a null pointer, this method will return error code. \n
524  *
525  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
526  * @param format The format of the tensor that needs to be set.
527  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
528  *         If the operation fails, an error code is returned.
529  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
530  * @since 11
531  * @version 1.0
532  */
533 OH_NN_ReturnCode OH_NNTensorDesc_SetFormat(NN_TensorDesc *tensorDesc, OH_NN_Format format);
534 
535 /**
536  * @brief Gets the format of a {@link NN_TensorDesc}.
537  *
538  * Call this method to obtain the format of the specified {@link NN_TensorDesc} instance. \n
539  *
540  * if <b>tensorDesc</b> or <b>format</b> is a null pointer, this method will return error code. \n
541  *
542  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
543  * @param format The returned format of the tensor.
544  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
545  *         If the operation fails, an error code is returned.
546  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
547  * @since 11
548  * @version 1.0
549  */
550 OH_NN_ReturnCode OH_NNTensorDesc_GetFormat(const NN_TensorDesc *tensorDesc, OH_NN_Format *format);
551 
552 /**
553  * @brief Gets the element count of a {@link NN_TensorDesc}.
554  *
555  * Call this method to obtain the element count of the specified {@link NN_TensorDesc} instance.
556  * If you need to obtain byte size of the tensor data, call {@link OH_NNTensorDesc_GetByteSize}. \n
557  *
558  * If the tensor shape is dynamic, this method will return error code, and <b>elementCount</b> will be 0. \n
559  *
560  * if <b>tensorDesc</b> or <b>elementCount</b> is a null pointer, this method will return error code. \n
561  *
562  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
563  * @param elementCount The returned element count of the tensor.
564  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
565  *         If the operation fails, an error code is returned.
566  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
567  * @since 11
568  * @version 1.0
569  */
570 OH_NN_ReturnCode OH_NNTensorDesc_GetElementCount(const NN_TensorDesc *tensorDesc, size_t *elementCount);
571 
572 /**
573  * @brief Gets the byte size of a {@link NN_TensorDesc}.
574  *
575  * Call this method to obtain the byte size of the specified {@link NN_TensorDesc} instance. \n
576  *
577  * If the tensor shape is dynamic, this method will return error code, and <b>byteSize</b> will be 0. \n
578  *
579  * If you need to obtain element count of the tensor data, call {@link OH_NNTensorDesc_GetElementCount}. \n
580  *
581  * if <b>tensorDesc</b> or <b>byteSize</b> is a null pointer, this method will return error code. \n
582  *
583  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
584  * @param byteSize The returned byte size of the tensor.
585  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
586  *         If the operation fails, an error code is returned.
587  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
588  * @since 11
589  * @version 1.0
590  */
591 OH_NN_ReturnCode OH_NNTensorDesc_GetByteSize(const NN_TensorDesc *tensorDesc, size_t *byteSize);
592 
593 /**
594  * @brief Creates a {@link NN_Tensor} instance from {@link NN_TensorDesc}.
595  *
596  * This method use {@link OH_NNTensorDesc_GetByteSize} to calculate the byte size of tensor data and allocate shared
597  * memory on device for it. The device dirver will get the tensor data directly by the "zero-copy" way.\n
598  *
599  * Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
600  * <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
601  *
602  * If the tensor shape is dynamic, this method will return error code.\n
603  *
604  * <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
605  * by default.\n
606  *
607  * <b>tensorDesc</b> must be provided, and this method will return an error code if it is a null pointer.\n
608  *
609  * Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
610  *
611  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
612  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
613  * @return Pointer to a {@link NN_Tensor} instance, or NULL if it fails to create.
614  * @since 11
615  * @version 1.0
616  */
617 NN_Tensor *OH_NNTensor_Create(size_t deviceID, NN_TensorDesc *tensorDesc);
618 
619 /**
620  * @brief Creates a {@link NN_Tensor} instance with specified size and {@link NN_TensorDesc}.
621  *
622  * This method use <b>size</b> as the byte size of tensor data and allocate shared memory on device for it.
623  * The device dirver will get the tensor data directly by the "zero-copy" way.\n
624  *
625  * Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
626  * <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
627  *
628  * <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
629  * by default.\n
630  *
631  * <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.
632  * <b>size</b> must be no less than the byte size of tensorDesc. Otherwise, this method will return an error code.
633  * If the tensor shape is dynamic, the <b>size</b> will not be checked.\n
634  *
635  * Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
636  *
637  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
638  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
639  * @param size Size of tensor data that need to be allocated.
640  * @return Pointer to a {@link NN_Tensor} instance, or NULL if it fails to create.
641  * @since 11
642  * @version 1.0
643  */
644 NN_Tensor *OH_NNTensor_CreateWithSize(size_t deviceID, NN_TensorDesc *tensorDesc, size_t size);
645 
646 /**
647  * @brief Creates a {@link NN_Tensor} instance with specified file descriptor and {@link NN_TensorDesc}.
648  *
649  * This method reuses the shared memory corresponding to the file descriptor <b>fd</b> passed. It may comes from another
650  * {@link NN_Tensor} instance. When you call the {@link OH_NNTensor_Destroy} method to release the tensor created by
651  * this method, the tensor data memory will not be released.\n
652  *
653  * Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
654  *  <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
655  *
656  * <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
657  * by default.\n
658  *
659  * <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.\n
660  *
661  * Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
662  *
663  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
664  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
665  * @param fd file descriptor of the shared memory to be resued.
666  * @param size Size of the shared memory to be resued.
667  * @param offset Offset of the shared memory to be resued.
668  * @return Pinter to a {@link NN_Tensor} instance, or NULL if it fails to create.
669  * @since 11
670  * @version 1.0
671  */
672 NN_Tensor *OH_NNTensor_CreateWithFd(size_t deviceID,
673                                     NN_TensorDesc *tensorDesc,
674                                     int fd,
675                                     size_t size,
676                                     size_t offset);
677 
678 /**
679  * @brief Releases a {@link NN_Tensor} instance.
680  *
681  * When the {@link NN_Tensor} instance is no longer used, this method needs to be called to release the instance.
682  * Otherwise, the memory leak will occur.\n
683  *
684  * If <b>tensor</b> or <b>*tensor</b> is a null pointer, this method will return error code and does not execute the
685  * release.\n
686  *
687  * @param tensor Double pointer to the {@link NN_Tensor} instance.
688  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
689  *         If the operation fails, an error code is returned.
690  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
691  * @since 11
692  * @version 1.0
693  */
694 OH_NN_ReturnCode OH_NNTensor_Destroy(NN_Tensor **tensor);
695 
696 /**
697  * @brief Gets the {@link NN_TensorDesc} instance of a {@link NN_Tensor}.
698  *
699  * Call this method to obtain the inner {@link NN_TensorDesc} instance pointer of the specified {@link NN_Tensor}
700  * instance. You can get various types of the tensor attributes such as name/format/data type/shape from the returned
701  * {@link NN_TensorDesc} instance.\n
702  *
703  * You should not destory the returned {@link NN_TensorDesc} instance because it points to the inner instance of
704  * {@link NN_Tensor}. Otherwise, a menory corruption of double free will occur when {@link OH_NNTensor_Destroy}
705  * is called.\n
706  *
707  * if <b>tensor</b> is a null pointer, this method will return null pointer.\n
708  *
709  * @param tensor Pointer to the {@link NN_Tensor} instance.
710  * @return Pointer to the {@link NN_TensorDesc} instance, or NULL if it fails to create.
711  * @since 11
712  * @version 1.0
713  */
714 NN_TensorDesc *OH_NNTensor_GetTensorDesc(const NN_Tensor *tensor);
715 
716 /**
717  * @brief Gets the data buffer of a {@link NN_Tensor}.
718  *
719  * You can read/write data from/to the tensor data buffer. The buffer is mapped from a shared memory on device,
720  * so the device dirver will get the tensor data directly by this "zero-copy" way.\n
721  *
722  * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
723  * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
724  *
725  * if <b>tensor</b> is a null pointer, this method will return null pointer.\n
726  *
727  * @param tensor Pointer to the {@link NN_Tensor} instance.
728  * @return Pointer to data buffer of the tensor, or NULL if it fails to create.
729  * @since 11
730  * @version 1.0
731  */
732 void *OH_NNTensor_GetDataBuffer(const NN_Tensor *tensor);
733 
734 /**
735  * @brief Gets the file descriptor of the shared memory of a {@link NN_Tensor}.
736  *
737  * The file descriptor <b>fd</b> corresponds to the shared memory of the tensor data, and can be resued
738  * by another {@link NN_Tensor} through {@link OH_NNTensor_CreateWithFd}.\n
739  *
740  * if <b>tensor</b> or <b>fd</b> is a null pointer, this method will return error code.\n
741  *
742  * @param tensor Pointer to the {@link NN_Tensor} instance.
743  * @param fd The returned file descriptor of the shared memory.
744  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
745  *         If the operation fails, an error code is returned.
746  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
747  * @since 11
748  * @version 1.0
749  */
750 OH_NN_ReturnCode OH_NNTensor_GetFd(const NN_Tensor *tensor, int *fd);
751 
752 /**
753  * @brief Gets the size of the shared memory of a {@link NN_Tensor}.
754  *
755  * The <b>size</b> corresponds to the shared memory of the tensor data, and can be resued by another {@link NN_Tensor}
756  * through {@link OH_NNTensor_CreateWithFd}.\n
757  *
758  * The <b>size</b> is as same as the argument <b>size</b> of {@link OH_NNTensor_CreateWithSize} and
759  * {@link OH_NNTensor_CreateWithFd}. But for a tensor created by {@link OH_NNTensor_Create},
760  * it equals to the tensor byte size.\n
761  *
762  * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
763  * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
764  *
765  * if <b>tensor</b> or <b>size</b> is a null pointer, this method will return error code.\n
766  *
767  * @param tensor Pointer to the {@link NN_Tensor} instance.
768  * @param size The returned size of tensor data.
769  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
770  *         If the operation fails, an error code is returned.
771  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
772  * @since 11
773  * @version 1.0
774  */
775 OH_NN_ReturnCode OH_NNTensor_GetSize(const NN_Tensor *tensor, size_t *size);
776 
777 /**
778  * @brief Get the data offset of a tensor.
779  *
780  * The <b>offset</b> corresponds to the shared memory of the tensor data, and can be resued by another {@link NN_Tensor}
781  * through {@link OH_NNTensor_CreateWithFd}.\n
782  *
783  * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
784  * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
785  *
786  * if <b>tensor</b> or <b>offset</b> is a null pointer, this method will return error code.\n
787  *
788  * @param tensor Pointer to the {@link NN_Tensor} instance.
789  * @param offset The returned offset of tensor data.
790  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
791  *         If the operation fails, an error code is returned.
792  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
793  * @since 11
794  * @version 1.0
795  */
796 OH_NN_ReturnCode OH_NNTensor_GetOffset(const NN_Tensor *tensor, size_t *offset);
797 
798 /**
799  * @brief Creates an executor instance of the {@link OH_NNExecutor} type.
800  *
801  * This method constructs a model inference executor associated with the device based on the passed compilation. \n
802  *
803  * After the {@link OH_NNExecutor} instance is created, you can release the {@link OH_NNCompilation}
804  * instance if you do not need to create any other executors. \n
805  *
806  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
807  * @return Pointer to a {@link OH_NNExecutor} instance, or NULL if it fails to create.
808  * @since 9
809  * @version 1.0
810  */
811 OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation);
812 
813 /**
814  * @brief Obtains the dimension information about the output tensor.
815  *
816  * After {@link OH_NNExecutor_Run} is called to complete a single inference, call this method to obtain the specified
817  * output dimension information and number of dimensions. It is commonly used in dynamic shape input and output
818  * scenarios.\n
819  *
820  * If the <b>outputIndex</b> is greater than or equal to the output tensor number, this method will return error code.
821  * The output tensor number can be got by {@link OH_NNExecutor_GetOutputCount}.\n
822  *
823  * As an output parameter, <b>*shape</b> must be a null pointer, otherwise the method will return an error code.
824  * Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of <b>shape</b>.\n
825  *
826  * You do not need to release the memory of <b>shape</b>. It will be released when <b>executor</b> is destroied.\n
827  *
828  * @param executor Pointer to the {@link OH_NNExecutor} instance.
829  * @param outputIndex Output Index value, which is in the same sequence of the data output when
830  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
831  *                    Assume that <b>outputIndices</b> is <b>{4, 6, 8}</b> when
832  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
833  *                    When {@link OH_NNExecutor_GetOutputShape} is called to obtain dimension information about
834  *                    the output tensor, <b>outputIndices</b> is <b>{0, 1, 2}</b>.
835  * @param shape Pointer to the int32_t array. The value of each element in the array is the length of the output tensor
836  *              in each dimension.
837  * @param shapeLength Pointer to the uint32_t type. The number of output dimensions is returned.
838  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
839  *         If the operation fails, an error code is returned.
840  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
841  * @since 9
842  * @version 1.0
843  */
844 OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor,
845                                               uint32_t outputIndex,
846                                               int32_t **shape,
847                                               uint32_t *shapeLength);
848 
849 /**
850  * @brief Destroys an executor instance to release the memory occupied by the executor.
851  *
852  * This method needs to be called to release the executor instance created by calling
853  * {@link OH_NNExecutor_Construct}. Otherwise, the memory leak will occur. \n
854  *
855  * If <b>executor</b> or <b>*executor</b> is a null pointer,
856  * this method only prints warning logs and does not execute the release. \n
857  *
858  * @param executor Double pointer to the {@link OH_NNExecutor} instance.
859  * @since 9
860  * @version 1.0
861  */
862 void OH_NNExecutor_Destroy(OH_NNExecutor **executor);
863 
864 /**
865  * @brief Gets the input tensor count.
866  *
867  * You can get the input tensor count from the executor, and then create an input tensor descriptor with its index by
868  * {@link OH_NNExecutor_CreateInputTensorDesc}. \n
869  *
870  * @param executor Pointer to the {@link OH_NNExecutor} instance.
871  * @param inputCount Input tensor count returned.
872  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
873  *         If the operation fails, an error code is returned.
874  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
875  * @since 11
876  * @version 1.0
877  */
878 OH_NN_ReturnCode OH_NNExecutor_GetInputCount(const OH_NNExecutor *executor, size_t *inputCount);
879 
880 /**
881  * @brief Gets the output tensor count.
882  *
883  * You can get the output tensor count from the executor, and then create an output tensor descriptor with its index by
884  * {@link OH_NNExecutor_CreateOutputTensorDesc}. \n
885  *
886  * @param executor Pointer to the {@link OH_NNExecutor} instance.
887  * @param OutputCount Output tensor count returned.
888  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
889  *         If the operation fails, an error code is returned.
890  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
891  * @since 11
892  * @version 1.0
893  */
894 OH_NN_ReturnCode OH_NNExecutor_GetOutputCount(const OH_NNExecutor *executor, size_t *outputCount);
895 
896 /**
897  * @brief Creates an input tensor descriptor with its index.
898  *
899  * The input tensor descriptor contains all attributes of the input tensor.
900  * If the <b>index</b> is greater than or equal to the input tensor number, this method will return error code.
901  * The input tensor number can be got by {@link OH_NNExecutor_GetInputCount}.\n
902  *
903  * @param executor Pointer to the {@link OH_NNExecutor} instance.
904  * @param index Input tensor index.
905  * @return Pointer to {@link NN_TensorDesc} instance, or NULL if it fails to create.
906  * @since 11
907  * @version 1.0
908  */
909 NN_TensorDesc *OH_NNExecutor_CreateInputTensorDesc(const OH_NNExecutor *executor, size_t index);
910 
911 /**
912  * @brief Creates an output tensor descriptor with its index.
913  *
914  * The output tensor descriptor contains all attributes of the output tensor.
915  * If the <b>index</b> is greater than or equal to the output tensor number, this method will return error code.
916  * The output tensor number can be got by {@link OH_NNExecutor_GetOutputCount}.\n
917  *
918  * @param executor Pointer to the {@link OH_NNExecutor} instance.
919  * @param index Output tensor index.
920  * @return Pointer to {@link NN_TensorDesc} instance, or NULL if it fails to create.
921  * @since 11
922  * @version 1.0
923  */
924 NN_TensorDesc *OH_NNExecutor_CreateOutputTensorDesc(const OH_NNExecutor *executor, size_t index);
925 
926 /**
927  * @brief Gets the dimension ranges of an input tensor.
928  *
929  * The supported dimension ranges of an input tensor with dynamic shape may be different among various devices.
930  * You can call this method to get the dimension ranges of the input tensor supported by the device.
931  * <b>*minInputDims</b> contains the minimum demensions of the input tensor, and <b>*maxInputDims</b> contains the
932  * maximum, e.g. if an input tensor has dynamic shape [-1, -1, -1, 3], its <b>*minInputDims</b> may be [1, 10, 10, 3]
933  * and <b>*maxInputDims</b> may be [100, 1024, 1024, 3] on the device.\n
934  *
935  * If the <b>index</b> is greater than or equal to the input tensor number, this method will return error code.
936  * The input tensor number can be got by {@link OH_NNExecutor_GetInputCount}.\n
937  *
938  * As an output parameter, <b>*minInputDims</b> or <b>*maxInputDims</b> must be a null pointer, otherwise the method
939  * will return an error code. For example, you should define int32_t* minInDims = NULL, and pass &minInDims as the
940  * argument of <b>minInputDims</b>.\n
941  *
942  * You do not need to release the memory of <b>*minInputDims</b> or <b>*maxInputDims</b>.
943  * It will be released when <b>executor</b> is destroied.\n
944  *
945  * @param executor Pointer to the {@link OH_NNExecutor} instance.
946  * @param index Input tensor index.
947  * @param minInputDims Returned pointer to an array contains the minimum dimensions of the input tensor.
948  * @param maxInputDims Returned pointer to an array contains the maximum dimensions of the input tensor.
949  * @param shapeLength Returned length of the shape of input tensor.
950  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
951  *         If the operation fails, an error code is returned.
952  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
953  * @since 11
954  * @version 1.0
955  */
956 OH_NN_ReturnCode OH_NNExecutor_GetInputDimRange(const OH_NNExecutor *executor,
957                                                 size_t index,
958                                                 size_t **minInputDims,
959                                                 size_t **maxInputDims,
960                                                 size_t *shapeLength);
961 
962 /**
963  * @brief Sets the callback function handle for the post-process when the asynchronous execution has been done.
964  *
965  * The definition fo the callback function: {@link NN_OnRunDone}. \n
966  *
967  * @param executor Pointer to the {@link OH_NNExecutor} instance.
968  * @param onRunDone Callback function handle {@link NN_OnRunDone}.
969  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
970  *         If the operation fails, an error code is returned.
971  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
972  * @since 11
973  * @version 1.0
974  */
975 OH_NN_ReturnCode OH_NNExecutor_SetOnRunDone(OH_NNExecutor *executor, NN_OnRunDone onRunDone);
976 
977 /**
978  * @brief Sets the callback function handle for the post-process when the
979  * device driver service is dead during asynchronous execution.
980  *
981  * The definition fo the callback function: {@link NN_OnServiceDied}. \n
982  *
983  * @param executor Pointer to the {@link OH_NNExecutor} instance.
984  * @param onServiceDied Callback function handle {@link NN_OnServiceDied}.
985  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
986  *         If the operation fails, an error code is returned.
987  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
988  * @since 11
989  * @version 1.0
990  */
991 OH_NN_ReturnCode OH_NNExecutor_SetOnServiceDied(OH_NNExecutor *executor, NN_OnServiceDied onServiceDied);
992 
993 /**
994  * @brief Synchronous execution of the model inference.
995  *
996  * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize}
997  * or {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by
998  * {@link OH_NNTensor_GetDataBuffer} must be filled. The executor will then yield out the results
999  * by inference execution and fill them into output tensors data for you to read. \n
1000  *
1001  * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape},
1002  * or you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc},
1003  * and then read its real shape by {@link OH_NNTensorDesc_GetShape}. \n
1004  *
1005  * @param executor Pointer to the {@link OH_NNExecutor} instance.
1006  * @param inputTensor An array of input tensors {@link NN_Tensor}.
1007  * @param inputCount Number of input tensors.
1008  * @param outputTensor An array of output tensors {@link NN_Tensor}.
1009  * @param outputCount Number of output tensors.
1010  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
1011  *         If the operation fails, an error code is returned.
1012  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
1013  * @since 11
1014  * @version 1.0
1015  */
1016 OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor,
1017                                        NN_Tensor *inputTensor[],
1018                                        size_t inputCount,
1019                                        NN_Tensor *outputTensor[],
1020                                        size_t outputCount);
1021 
1022 /**
1023  * @brief Asynchronous execution of the model inference.
1024  *
1025  * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize}
1026  * or {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by
1027  * {@link OH_NNTensor_GetDataBuffer} must be filled. The executor will yield out the results by inference execution
1028  * and fill them into output tensors data for you to read.\n
1029  *
1030  * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape},
1031  * or you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc},
1032  * and then read its real shape by {@link OH_NNTensorDesc_GetShape}.\n
1033  *
1034  * The method is non-blocked and will return immediately.\n
1035  *
1036  * The callback function handles are set by {@link OH_NNExecutor_SetOnRunDone}
1037  * and {@link OH_NNExecutor_SetOnServiceDied}. The inference results and error code can be got by
1038  * {@link NN_OnRunDone}. And you can deal with the abnormal termination of device driver service during
1039  * asynchronous execution by {@link NN_OnServiceDied}.\n
1040  *
1041  * If the execution time reaches the <b>timeout</b>, the execution will be terminated
1042  * with no outputs, and the <b>errCode<b> returned in callback function {@link NN_OnRunDone} will be
1043  * {@link OH_NN_TIMEOUT}.\n
1044  *
1045  * The <b>userData</b> is asynchronous execution identifier and will be returned as the first parameter of the callback
1046  * function. You can input any value you want as long as it can identify different asynchronous executions.\n
1047  *
1048  * @param executor Pointer to the {@link OH_NNExecutor} instance.
1049  * @param inputTensor An array of input tensors {@link NN_Tensor}.
1050  * @param inputCount Number of input tensors.
1051  * @param outputTensor An array of output tensors {@link NN_Tensor}.
1052  * @param outputCount Number of output tensors.
1053  * @param timeout Time limit (millisecond) of the asynchronous execution, e.g. 1000.
1054  * @param userData Asynchronous execution identifier.
1055  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
1056  *         If the operation fails, an error code is returned.
1057  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
1058  * @since 11
1059  * @version 1.0
1060  */
1061 OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor,
1062                                         NN_Tensor *inputTensor[],
1063                                         size_t inputCount,
1064                                         NN_Tensor *outputTensor[],
1065                                         size_t outputCount,
1066                                         int32_t timeout,
1067                                         void *userData);
1068 
1069 /**
1070  * @brief Obtains the IDs of all devices connected.
1071  *
1072  * Each device has an unique and fixed ID. This method returns device IDs on the current device through the uint32_t
1073  * array.\n
1074  *
1075  * Device IDs are returned through the size_t array. Each element of the array is the ID of a single device.\n
1076  *
1077  * The array memory is managed inside, so you do not need to care about it.
1078  * The data pointer is valid before this method is called next time.\n
1079  *
1080  * @param allDevicesID Pointer to the size_t array. The input <b>*allDevicesID</b> must be a null pointer.
1081  *                     Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
1082  * @param deviceCount Pointer of the uint32_t type, which is used to return the length of <b>*allDevicesID</b>.
1083  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
1084  *         If the operation fails, an error code is returned.
1085  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
1086  * @since 9
1087  * @version 1.0
1088  */
1089 OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount);
1090 
1091 /**
1092  * @brief Obtains the name of the specified device.
1093  *
1094  * <b>deviceID</b> specifies the device whose name will be obtained. The device ID needs to be obtained by calling
1095  * {@link OH_NNDevice_GetAllDevicesID}.
1096  * If it is 0, the first device in the current device list will be used by default.\n
1097  *
1098  * The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>. <b>*name</b> must be a null pointer.
1099  * Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
1100  * Fou example, you should define char* deviceName = NULL, and pass &deviceName as the argument of <b>name</b>.\n
1101  *
1102  * @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default.
1103  * @param name The device name returned.
1104  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
1105  *         If the operation fails, an error code is returned.
1106  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
1107  * @since 9
1108  * @version 1.0
1109  */
1110 OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name);
1111 
1112 /**
1113  * @brief Obtains the type information of the specified device.
1114  *
1115  * <b>deviceID</b> specifies the device whose type will be obtained. If it is 0, the first device in the current device
1116  * list will be used. Currently the following device types are supported:
1117  * - <b>OH_NN_CPU</b>: CPU device.
1118  * - <b>OH_NN_GPU</b>: GPU device.
1119  * - <b>OH_NN_ACCELERATOR</b>: machine learning dedicated accelerator.
1120  * - <b>OH_NN_OTHERS</b>: other hardware types. \n
1121  *
1122  * @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default.
1123  * @param deviceType The device type {@link OH_NN_DeviceType} returned.
1124  * @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
1125  *         If the operation fails, an error code is returned.
1126  *         For details about the error codes, see {@link OH_NN_ReturnCode}.
1127  * @since 9
1128  * @version 1.0
1129  */
1130 OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType *deviceType);
1131 
1132 #ifdef __cplusplus
1133 }
1134 #endif // __cplusplus
1135 
1136 /** @} */
1137 #endif // NEURAL_NETWORK_CORE_H
1138