1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "hdi_device_v1_0.h"
17
18 #include "hdf_base.h"
19 #include "mindir.h"
20
21 #include "hdi_prepared_model_v1_0.h"
22 #include "lite_graph_to_hdi_model_v1_0.h"
23 #include "memory_manager.h"
24 #include "transform.h"
25 #include "common/log.h"
26 #include "common/utils.h"
27
28 namespace OHOS {
29 namespace NeuralNetworkRuntime {
30 namespace {
TransHDIDeviceV1_0Type(const V1_0::DeviceType & iDeviceType)31 OH_NN_DeviceType TransHDIDeviceV1_0Type(const V1_0::DeviceType& iDeviceType)
32 {
33 switch (iDeviceType) {
34 case V1_0::DeviceType::CPU:
35 return OH_NN_CPU;
36 case V1_0::DeviceType::GPU:
37 return OH_NN_GPU;
38 case V1_0::DeviceType::ACCELERATOR:
39 return OH_NN_ACCELERATOR;
40 default:
41 return OH_NN_OTHERS;
42 }
43 }
44
TransHDIDeviceV1_0Status(const V1_0::DeviceStatus & iDeviceStatus)45 DeviceStatus TransHDIDeviceV1_0Status(const V1_0::DeviceStatus& iDeviceStatus)
46 {
47 switch (iDeviceStatus) {
48 case V1_0::DeviceStatus::AVAILABLE:
49 return DeviceStatus::AVAILABLE;
50 case V1_0::DeviceStatus::BUSY:
51 return DeviceStatus::BUSY;
52 case V1_0::DeviceStatus::OFFLINE:
53 return DeviceStatus::OFFLINE;
54 default:
55 return DeviceStatus::UNKNOWN;
56 }
57 }
58
TransPerformanceMode(const OH_NN_PerformanceMode & mode)59 V1_0::PerformanceMode TransPerformanceMode(const OH_NN_PerformanceMode& mode)
60 {
61 switch (mode) {
62 case OH_NN_PERFORMANCE_LOW:
63 return V1_0::PerformanceMode::PERFORMANCE_LOW;
64 case OH_NN_PERFORMANCE_MEDIUM:
65 return V1_0::PerformanceMode::PERFORMANCE_MEDIUM;
66 case OH_NN_PERFORMANCE_HIGH:
67 return V1_0::PerformanceMode::PERFORMANCE_HIGH;
68 case OH_NN_PERFORMANCE_EXTREME:
69 return V1_0::PerformanceMode::PERFORMANCE_EXTREME;
70 default:
71 return V1_0::PerformanceMode::PERFORMANCE_NONE;
72 }
73 }
74
TransPriority(const OH_NN_Priority & priority)75 V1_0::Priority TransPriority(const OH_NN_Priority& priority)
76 {
77 switch (priority) {
78 case OH_NN_PRIORITY_LOW:
79 return V1_0::Priority::PRIORITY_LOW;
80 case OH_NN_PRIORITY_MEDIUM:
81 return V1_0::Priority::PRIORITY_MEDIUM;
82 case OH_NN_PRIORITY_HIGH:
83 return V1_0::Priority::PRIORITY_HIGH;
84 default:
85 return V1_0::Priority::PRIORITY_NONE;
86 }
87 }
88 }
89
HDIDeviceV1_0(OHOS::sptr<V1_0::INnrtDevice> device)90 HDIDeviceV1_0::HDIDeviceV1_0(OHOS::sptr<V1_0::INnrtDevice> device) : m_iDevice(device)
91 {}
92
GetDeviceName(std::string & name)93 OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceName(std::string& name)
94 {
95 auto ret = m_iDevice->GetDeviceName(name);
96 if (ret != HDF_SUCCESS) {
97 LOGE("Get HDI device name failed. ErrorCode=%d", ret);
98 return OH_NN_UNAVAILABLE_DEVICE;
99 }
100 return OH_NN_SUCCESS;
101 }
102
GetVendorName(std::string & name)103 OH_NN_ReturnCode HDIDeviceV1_0::GetVendorName(std::string& name)
104 {
105 auto ret = m_iDevice->GetVendorName(name);
106 if (ret != HDF_SUCCESS) {
107 LOGE("Get HDI device vendor name failed. ErrorCode=%d", ret);
108 return OH_NN_UNAVAILABLE_DEVICE;
109 }
110 return OH_NN_SUCCESS;
111 }
112
GetVersion(std::string & version)113 OH_NN_ReturnCode HDIDeviceV1_0::GetVersion(std::string& version)
114 {
115 auto ret = m_iDevice->GetVersion(m_hdiVersion.first, m_hdiVersion.second);
116 if (ret != HDF_SUCCESS) {
117 LOGE("Get HDI version failed. ErrorCode=%d", ret);
118 return OH_NN_UNAVAILABLE_DEVICE;
119 }
120 version = 'v' + std::to_string(m_hdiVersion.first) + '_' + std::to_string(m_hdiVersion.second);
121 return OH_NN_SUCCESS;
122 }
123
GetDeviceType(OH_NN_DeviceType & deviceType)124 OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceType(OH_NN_DeviceType& deviceType)
125 {
126 V1_0::DeviceType iDeviceType;
127 auto ret = m_iDevice->GetDeviceType(iDeviceType);
128 if (ret != HDF_SUCCESS) {
129 LOGE("Get HDI device type failed. ErrorCode=%d", ret);
130 return OH_NN_UNAVAILABLE_DEVICE;
131 }
132
133 deviceType = TransHDIDeviceV1_0Type(iDeviceType);
134 return OH_NN_SUCCESS;
135 }
136
GetDeviceStatus(DeviceStatus & status)137 OH_NN_ReturnCode HDIDeviceV1_0::GetDeviceStatus(DeviceStatus& status)
138 {
139 V1_0::DeviceStatus iDeviceStatus;
140 auto ret = m_iDevice->GetDeviceStatus(iDeviceStatus);
141 if (ret != HDF_SUCCESS) {
142 LOGE("Get HDI device status failed. ErrorCode=%d", ret);
143 return OH_NN_UNAVAILABLE_DEVICE;
144 }
145 status = TransHDIDeviceV1_0Status(iDeviceStatus);
146 return OH_NN_SUCCESS;
147 }
148
GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,std::vector<bool> & ops)149 OH_NN_ReturnCode HDIDeviceV1_0::GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,
150 std::vector<bool>& ops)
151 {
152 if (model == nullptr) {
153 LOGE("Model is nullptr, cannot query supported operation.");
154 return OH_NN_NULL_PTR;
155 }
156
157 OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0};
158 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get());
159 int32_t hdiRet {0};
160 if (tensorSize > 0) {
161 hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer);
162 if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) {
163 LOGE("Allocate tensor buffer error when get supported operation. ErrorCode: %d", hdiRet);
164 return OH_NN_FAILED;
165 }
166 }
167
168 auto iModel = V1::LiteGraph_To_HDIModel(model.get(), tensorBuffer);
169 if (iModel == nullptr) {
170 LOGE("Parse litegraph to hdi model failed.");
171 ReleaseSharedBuffer(tensorBuffer);
172 return OH_NN_FAILED;
173 }
174
175 hdiRet = m_iDevice->GetSupportedOperation(*iModel, ops);
176
177 V1::HDIModel_Destroy(&iModel);
178 auto ret = ReleaseSharedBuffer(tensorBuffer);
179 if (ret != OH_NN_SUCCESS) {
180 LOGE("Release tensorBuffer failed.");
181 return OH_NN_FAILED;
182 }
183 if (hdiRet != HDF_SUCCESS) {
184 LOGE("Get supported operation failed. ErrorCode=%d", hdiRet);
185 return OH_NN_UNAVAILABLE_DEVICE;
186 }
187 return OH_NN_SUCCESS;
188 }
189
IsFloat16PrecisionSupported(bool & isSupported)190 OH_NN_ReturnCode HDIDeviceV1_0::IsFloat16PrecisionSupported(bool& isSupported)
191 {
192 auto ret = m_iDevice->IsFloat16PrecisionSupported(isSupported);
193 if (ret != HDF_SUCCESS) {
194 LOGE("Query fp16 precision supported failed. ErrorCode=%d", ret);
195 return OH_NN_UNAVAILABLE_DEVICE;
196 }
197 return OH_NN_SUCCESS;
198 }
199
IsPerformanceModeSupported(bool & isSupported)200 OH_NN_ReturnCode HDIDeviceV1_0::IsPerformanceModeSupported(bool& isSupported)
201 {
202 auto ret = m_iDevice->IsPerformanceModeSupported(isSupported);
203 if (ret != HDF_SUCCESS) {
204 LOGE("Query performance mode supported failed. ErrorCode=%d", ret);
205 return OH_NN_UNAVAILABLE_DEVICE;
206 }
207 return OH_NN_SUCCESS;
208 }
209
IsPrioritySupported(bool & isSupported)210 OH_NN_ReturnCode HDIDeviceV1_0::IsPrioritySupported(bool& isSupported)
211 {
212 auto ret = m_iDevice->IsPrioritySupported(isSupported);
213 if (ret != HDF_SUCCESS) {
214 LOGE("Query priority supported failed. ErrorCode=%d", ret);
215 return OH_NN_UNAVAILABLE_DEVICE;
216 }
217 return OH_NN_SUCCESS;
218 }
219
IsDynamicInputSupported(bool & isSupported)220 OH_NN_ReturnCode HDIDeviceV1_0::IsDynamicInputSupported(bool& isSupported)
221 {
222 auto ret = m_iDevice->IsDynamicInputSupported(isSupported);
223 if (ret != HDF_SUCCESS) {
224 LOGE("Query dynamic input supported failed. ErrorCode=%d", ret);
225 return OH_NN_UNAVAILABLE_DEVICE;
226 }
227 return OH_NN_SUCCESS;
228 }
229
IsModelCacheSupported(bool & isSupported)230 OH_NN_ReturnCode HDIDeviceV1_0::IsModelCacheSupported(bool& isSupported)
231 {
232 auto ret = m_iDevice->IsModelCacheSupported(isSupported);
233 if (ret != HDF_SUCCESS) {
234 LOGE("Query cache model supported failed. ErrorCode=%d", ret);
235 return OH_NN_UNAVAILABLE_DEVICE;
236 }
237 return OH_NN_SUCCESS;
238 }
239
PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)240 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
241 const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel)
242 {
243 if (model == nullptr) {
244 LOGE("Model is nullptr, cannot prepare model.");
245 return OH_NN_INVALID_PARAMETER;
246 }
247
248 OHOS::HDI::Nnrt::V1_0::SharedBuffer tensorBuffer {INVALID_FD, 0, 0, 0};
249 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get());
250 int32_t hdiRet {0};
251 if (tensorSize > 0) {
252 hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer);
253 if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == INVALID_FD) {
254 LOGE("Allocate tensor buffer error when prepare model. ErrorCode: %d", hdiRet);
255 return OH_NN_FAILED;
256 }
257 }
258
259 V1_0::Model* iModel = V1::LiteGraph_To_HDIModel(model.get(), tensorBuffer);
260 if (iModel == nullptr) {
261 LOGE("Parse litegraph to hdi model failed.");
262 ReleaseSharedBuffer(tensorBuffer);
263 return OH_NN_FAILED;
264 }
265
266 V1_0::ModelConfig iModelConfig;
267 iModelConfig.enableFloat16 = config.enableFloat16;
268 iModelConfig.mode = TransPerformanceMode(config.mode);
269 iModelConfig.priority = TransPriority(config.priority);
270 OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
271
272 auto preparedRet = m_iDevice->PrepareModel(*iModel, iModelConfig, iPreparedModel);
273
274 V1::HDIModel_Destroy(&iModel);
275 auto ret = ReleaseSharedBuffer(tensorBuffer);
276 if (ret != OH_NN_SUCCESS) {
277 LOGE("Release tensorBuffer failed.");
278 return OH_NN_FAILED;
279 }
280 if (preparedRet != HDF_SUCCESS || iPreparedModel == nullptr) {
281 LOGE("Prepare model failed. ErrorCode=%d", preparedRet);
282 return OH_NN_FAILED;
283 }
284
285 preparedModel = CreateSharedPtr<HDIPreparedModelV1_0>(iPreparedModel);
286 if (preparedModel == nullptr) {
287 LOGE("Prepare model failed, because fail to create preparedModel instance.");
288 return OH_NN_MEMORY_ERROR;
289 }
290
291 return OH_NN_SUCCESS;
292 }
293
PrepareModel(const void * metaGraph,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)294 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModel(const void* metaGraph,
295 const ModelConfig& config,
296 std::shared_ptr<PreparedModel>& preparedModel)
297 {
298 return OH_NN_OPERATION_FORBIDDEN;
299 }
300
PrepareModelFromModelCache(const std::vector<Buffer> & modelCache,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel,bool & isUpdatable)301 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector<Buffer>& modelCache,
302 const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel, bool& isUpdatable)
303 {
304 std::vector<V1_0::SharedBuffer> iBuffers;
305 auto memManager = MemoryManager::GetInstance();
306 Memory memory;
307 OH_NN_ReturnCode ret;
308 size_t modelCacheSize = modelCache.size();
309 for (size_t i = 0; i < modelCacheSize; i++) {
310 ret = memManager->GetMemory(modelCache[i].data, memory);
311 if (ret != OH_NN_SUCCESS) {
312 LOGE("The %zuth model cache is invalid. Please put valid model cache.", i + 1);
313 return ret;
314 }
315 iBuffers.emplace_back(V1_0::SharedBuffer {memory.fd, memory.length, 0, memory.length});
316 }
317
318 V1_0::ModelConfig iModelConfig;
319 iModelConfig.enableFloat16 = config.enableFloat16;
320 iModelConfig.mode = TransPerformanceMode(config.mode);
321 iModelConfig.priority = TransPriority(config.priority);
322
323 OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
324 auto hdiRet = m_iDevice->PrepareModelFromModelCache(iBuffers, iModelConfig, iPreparedModel);
325 if (hdiRet != HDF_SUCCESS) {
326 LOGE("Prepare model from cache failed. ErrorCode=%d", hdiRet);
327 return OH_NN_UNAVAILABLE_DEVICE;
328 }
329
330 preparedModel = CreateSharedPtr<HDIPreparedModelV1_0>(iPreparedModel);
331 if (preparedModel == nullptr) {
332 LOGE("Prepare model from model cache failed, because fail to create preparedModel instance.");
333 return OH_NN_MEMORY_ERROR;
334 }
335 return OH_NN_SUCCESS;
336 }
337
AllocateBuffer(size_t length)338 void* HDIDeviceV1_0::AllocateBuffer(size_t length)
339 {
340 if (length == 0) {
341 LOGE("The length param is invalid, length=0");
342 return nullptr;
343 }
344
345 V1_0::SharedBuffer buffer;
346 auto ret = m_iDevice->AllocateBuffer(length, buffer);
347 if (ret != HDF_SUCCESS) {
348 LOGE("Allocate buffer error. ErrorCode: %d", ret);
349 return nullptr;
350 }
351
352 auto memManager = MemoryManager::GetInstance();
353 auto addr = memManager->MapMemory(buffer.fd, length);
354 if (addr == nullptr) {
355 LOGE("Map fd to address failed.");
356 m_iDevice->ReleaseBuffer(buffer);
357 }
358 return addr;
359 }
360
AllocateBuffer(size_t length,int & fd)361 OH_NN_ReturnCode HDIDeviceV1_0::AllocateBuffer(size_t length, int& fd)
362 {
363 if (length == 0) {
364 LOGE("The length param is invalid, length=0");
365 return OH_NN_INVALID_PARAMETER;
366 }
367
368 V1_0::SharedBuffer buffer;
369 auto ret = m_iDevice->AllocateBuffer(length, buffer);
370 if (ret != HDF_SUCCESS) {
371 LOGE("Allocate buffer error. ErrorCode: %d", ret);
372 return OH_NN_MEMORY_ERROR;
373 }
374
375 fd = buffer.fd;
376 return OH_NN_SUCCESS;
377 }
378
ReleaseBuffer(int fd,size_t length)379 OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(int fd, size_t length)
380 {
381 V1_0::SharedBuffer hdiBuffer {fd, length, 0, length};
382 auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer);
383 if (deviceResult != HDF_SUCCESS) {
384 LOGE("Device release buffer error. ErrorCode: %d", deviceResult);
385 return OH_NN_MEMORY_ERROR;
386 }
387 return OH_NN_SUCCESS;
388 }
389
390
AllocateTensorBuffer(size_t length,std::shared_ptr<TensorDesc> tensor)391 void* HDIDeviceV1_0::AllocateTensorBuffer(size_t length, std::shared_ptr<TensorDesc> tensor)
392 {
393 return AllocateBuffer(length);
394 }
395
AllocateTensorBuffer(size_t length,std::shared_ptr<NNTensor> tensor)396 void* HDIDeviceV1_0::AllocateTensorBuffer(size_t length, std::shared_ptr<NNTensor> tensor)
397 {
398 return AllocateBuffer(length);
399 }
400
ReleaseBuffer(const void * buffer)401 OH_NN_ReturnCode HDIDeviceV1_0::ReleaseBuffer(const void* buffer)
402 {
403 if (buffer == nullptr) {
404 LOGE("Buffer is nullptr, no need to release.");
405 return OH_NN_INVALID_PARAMETER;
406 }
407
408 auto memManager = MemoryManager::GetInstance();
409 Memory memory;
410 auto ret = memManager->GetMemory(buffer, memory);
411 if (ret != OH_NN_SUCCESS) {
412 LOGE("Invalid Buffer, it is not NNRt buffer.");
413 return ret;
414 }
415
416 V1_0::SharedBuffer hdiBuffer {memory.fd, memory.length, 0, memory.length};
417 auto deviceResult = m_iDevice->ReleaseBuffer(hdiBuffer);
418 if (deviceResult != HDF_SUCCESS) {
419 LOGE("Device release buffer error. ErrorCode: %d", deviceResult);
420 return OH_NN_FAILED;
421 }
422
423 ret = memManager->UnMapMemory(buffer);
424 if (ret != OH_NN_SUCCESS) {
425 LOGE("Unmap memory failed.");
426 return ret;
427 }
428
429 return OH_NN_SUCCESS;
430 }
431
ReleaseSharedBuffer(const V1_0::SharedBuffer & buffer)432 OH_NN_ReturnCode HDIDeviceV1_0::ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer)
433 {
434 if (buffer.fd == INVALID_FD) {
435 LOGI("No need to release. fd=%d", INVALID_FD);
436 return OH_NN_SUCCESS;
437 }
438
439 auto ret = m_iDevice->ReleaseBuffer(buffer);
440 if (ret != HDF_SUCCESS) {
441 LOGE("Device release buffer error. ErrorCode=%d", ret);
442 return OH_NN_FAILED;
443 }
444 return OH_NN_SUCCESS;
445 }
446
PrepareOfflineModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)447 OH_NN_ReturnCode HDIDeviceV1_0::PrepareOfflineModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
448 const ModelConfig& config,
449 std::shared_ptr<PreparedModel>& preparedModel)
450 {
451 LOGE("HDIDeviceV1.0 not support PrepareOfflineModel.");
452 return OH_NN_OPERATION_FORBIDDEN;
453 }
454 } // namespace NeuralNetworkRuntime
455 } // namespace OHOS
456