1/*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony.\n
21 * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment\n
22 * oriented to the AI field.
23 *
24 * @since 3.2
25 * @version 2.1
26 */
27
28/**
29 * @file INnrtDevice.idl
30 *
31 * @brief Defines methods related to chip devices.
32 *
33 * You can use the methods to query chip device information and build AI models.
34 *
35 * @since 3.2
36 * @version 2.1
37 */
38
39/**
40 * @brief Defines the package path of the NNRt module.
41 *
42 * @since 3.2
43 * @version 2.1
44 */
45package ohos.hdi.nnrt.v2_1;
46
47import ohos.hdi.nnrt.v2_1.NnrtTypes;
48import ohos.hdi.nnrt.v2_1.ModelTypes;
49import ohos.hdi.nnrt.v2_1.IPreparedModel;
50
51/**
52 * @brief Provides methods for device management and model building.
53 *
54 * When multiple devices are registered, ensure that the combination of the device name\n
55 * and vendor name is globally unique.
56 *
57 * @since 3.2
58 * @version 2.1
59 */
60interface INnrtDevice {
61    /**
62     * @brief Obtains the device name.
63     *
64     * @param name Device name.
65     *
66     * @return Returns <b>0</b> if the operation is successful.
67     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
68     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
69     */
70    GetDeviceName([out] String name);
71
72    /**
73     * @brief Obtains the device vendor name.
74     *
75     * @param name Device vendor name.
76     *
77     * @return Returns <b>0</b> if the operation is successful.
78     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
79     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
80     */
81    GetVendorName([out] String name);
82
83    /**
84     * @brief Obtains the device type.
85     *
86     * @param deviceType Device type. For details, see {@link DeviceType}.
87     *
88     * @return Returns <b>0</b> if the operation is successful.
89     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
90     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
91     */
92    GetDeviceType([out] enum DeviceType deviceType);
93
94    /**
95     * @brief Obtains the device status.
96     *
97     * @param deviceType Device status. For details, see {@link DeviceStatus}.
98     *
99     * @return Returns <b>0</b> if the operation is successful.
100     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
101     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
102     */
103    GetDeviceStatus([out] enum DeviceStatus status);
104
105    /**
106     * @brief Obtains the device's support for the operators of the specified AI model.
107     *
108     * @param model AI model. For details, see {@link Model}.
109     * @param ops Operators supported and not supported by the device. The operators are listed in the same\n
110     * sequence as they listed in the API model.
111     *
112     * @return Returns <b>0</b> if the operation is successful.
113     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
114     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
115     */
116    GetSupportedOperation([in] struct Model model, [out] boolean[] ops);
117
118    /**
119     * @brief Checks whether the device supports the Float32 model with the Float16 precision.
120     *
121     * @param isSupported Whether the Float16 precision is supported.
122     *
123     * @return Returns <b>0</b> if the operation is successful.
124     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
125     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
126     */
127    IsFloat16PrecisionSupported([out] boolean isSupported);
128
129    /**
130     * @brief Checks whether the device supports performance preference settings. For details about the performance\n
131     * preference, see {@link PerformanceMode}.
132     *
133     * @param isSupported Whether performance preference settings are supported.
134     *
135     * @return Returns <b>0</b> if the operation is successful.
136     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
137     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
138     */
139    IsPerformanceModeSupported([out] boolean isSupported);
140
141    /**
142     * @brief Checks whether the device supports task priority settings. For details about the priority,\n
143     * see {@link Priority}.
144     *
145     * @param isSupported Whether task priority settings are supported.
146     *
147     * @return Returns <b>0</b> if the operation is successful.
148     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
149     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
150     */
151    IsPrioritySupported([out] boolean isSupported);
152
153    /**
154     * @brief Checks whether the device supports dynamic input, which allows a model of different shapes\n
155     * to be used for different operations.
156     *
157     * If dynamic input is supported, <b>-1</b> is added in the shape of the input tensor.
158     *
159     * @param isSupported Whether dynamic input is supported.
160     *
161     * @return Returns <b>0</b> if the operation is successful.
162     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
163     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
164     */
165    IsDynamicInputSupported([out] boolean isSupported);
166
167    /**
168     * @brief Builds a model.
169     *
170     * If the AI model supports dynamic input, at least one dimension of the input tensor contains <b>-1</b>.
171     *
172     * @param model Module to build. For details, see {@link Model}.
173     * @param config Model configuration. For details, see {@link ModelConfig}.
174     * @param preparedModel Model object built. For details, see {@link IPreparedModel}.
175     *
176     * @return Returns <b>0</b> if the operation is successful.
177     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
178     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
179     */
180    PrepareModel([in] struct Model model, [in] struct ModelConfig config, [out] IPreparedModel preparedModel);
181
182    /**
183     * @brief Checks whether the device supports caching of the AI models built.
184     *
185     * If yes, <b>PrepareModelFromModelCache()</b> and <b>ExportModelCache()</b> need to be implemented.
186     *
187     * @param isSupported Whether caching of the AI models built is supported.
188     *
189     * @return Returns <b>0</b> if the operation is successful.
190     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
191     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
192     */
193    IsModelCacheSupported([out] boolean isSupported);
194
195    /**
196     * @brief Loads an AI model from the cache. The AI model is exported by using <b>ExportModelCache()</b>.
197     *
198     * @param modelCache Array of the model files, which are in the same sequence as they exported. For details,\n
199     * see {@link SharedBuffer}.
200     * @param config Configuration for loading the model. For details, see {@link ModelConfig}.
201     * @param preparedModel Model object. For details, see {@link IPreparedModel}.
202     *
203     * @return Returns <b>0</b> if the operation is successful.
204     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
205     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
206     */
207    PrepareModelFromModelCache([in] struct SharedBuffer[] modelCache, [in] struct ModelConfig config,
208                               [out] IPreparedModel preparedModel);
209
210    /**
211     * @brief Loads offline model files from the cache. The offline model is obtained after offline model files\n
212     * are passed by the inference framework to the NNRt module and then parsed by the NNRt module.
213     *
214     * @param offlineModels Array of offline model files. The sequence of array elements depends on the format of\n
215     * the input offline model. For details about the element types, see the definition of SharedBuffer\n
216     * {@link SharedBuffer}.
217     * @param config Configuration for loading the offline model files. For details, see {@link ModelConfig}.
218     * @param preparedModel Model object obtained. For details, see {@link IPreparedModel}.
219     *
220     * @return Returns <b>0</b> if the operation is successful.
221     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
222     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
223     */
224    PrepareOfflineModel([in] struct SharedBuffer[] offlineModels, [in] struct ModelConfig config,
225                        [out] IPreparedModel preparedModel);
226
227    /**
228     * @brief Allocates the shared memory for the device. The shared memory allows quick access to the input\n
229     * and output data for AI inference.
230     *
231     * @param length Length of the shared memory to allocate, in bytes.
232     * @param buffer Information about the shared memory allocated, including the file descriptor and size.\n
233     * For details, see {@link SharedBuffer}.
234     *
235     * @return Returns <b>0</b> if the operation is successful.
236     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
237     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
238     */
239    AllocateBuffer([in] unsigned int length, [out] struct SharedBuffer buffer);
240
241    /**
242     * @brief Releases the shared memory.
243     *
244     * @param buffer Information about the shared memory allocated, including the file descriptor and size.\n
245     * For details, see {@link SharedBuffer}.
246     *
247     * @return Returns <b>0</b> if the operation is successful.
248     * @return Returns a non-0 value if the operation fails. A negative value is an HDF standard error code,\n
249     * and a positive value is a dedicated error code defined by NNRt. For details, see {@link NNRT_ReturnCode}.
250     */
251    ReleaseBuffer([in] struct SharedBuffer buffer);
252}
253
254/** @} */
255