1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "hdi_prepared_model_v1_0.h"
17
18 #include "common/log.h"
19 #include "memory_manager.h"
20 #include "nntensor.h"
21
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace {
TransDataType(const OH_NN_DataType & dataType)25 V1_0::DataType TransDataType(const OH_NN_DataType& dataType)
26 {
27 switch (dataType) {
28 case OH_NN_BOOL:
29 return V1_0::DataType::DATA_TYPE_BOOL;
30 case OH_NN_INT8:
31 return V1_0::DataType::DATA_TYPE_INT8;
32 case OH_NN_INT16:
33 return V1_0::DataType::DATA_TYPE_INT16;
34 case OH_NN_INT32:
35 return V1_0::DataType::DATA_TYPE_INT32;
36 case OH_NN_INT64:
37 return V1_0::DataType::DATA_TYPE_INT64;
38 case OH_NN_UINT8:
39 return V1_0::DataType::DATA_TYPE_UINT8;
40 case OH_NN_UINT16:
41 return V1_0::DataType::DATA_TYPE_UINT16;
42 case OH_NN_UINT32:
43 return V1_0::DataType::DATA_TYPE_UINT32;
44 case OH_NN_UINT64:
45 return V1_0::DataType::DATA_TYPE_UINT64;
46 case OH_NN_FLOAT16:
47 return V1_0::DataType::DATA_TYPE_FLOAT16;
48 case OH_NN_FLOAT32:
49 return V1_0::DataType::DATA_TYPE_FLOAT32;
50 case OH_NN_FLOAT64:
51 return V1_0::DataType::DATA_TYPE_FLOAT64;
52 default:
53 return V1_0::DataType::DATA_TYPE_UNKNOWN;
54 }
55 }
56
TransFormat(const OH_NN_Format & format)57 V1_0::Format TransFormat(const OH_NN_Format& format)
58 {
59 switch (format) {
60 case OH_NN_FORMAT_NCHW:
61 return V1_0::Format::FORMAT_NCHW;
62 case OH_NN_FORMAT_NHWC:
63 return V1_0::Format::FORMAT_NHWC;
64 default:
65 return V1_0::Format::FORMAT_NONE;
66 }
67 }
68
TransIOTensor(const IOTensor & tensor)69 V1_0::IOTensor TransIOTensor(const IOTensor& tensor)
70 {
71 V1_0::IOTensor iTensor;
72 iTensor.name = tensor.name;
73 iTensor.dataType = TransDataType(tensor.dataType);
74 iTensor.dimensions = tensor.dimensions;
75 iTensor.format = TransFormat(tensor.format);
76
77 V1_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0};
78 if (tensor.data != nullptr) {
79 auto memManager = MemoryManager::GetInstance();
80 Memory memory;
81 auto ret = memManager->GetMemory(tensor.data, memory);
82 if (ret != OH_NN_SUCCESS) {
83 LOGE("Invalid Tensor buffer, cannot transform to fd.");
84 } else {
85 iBuffer.fd = memory.fd;
86 iBuffer.bufferSize = memory.length;
87 iBuffer.offset = 0;
88 iBuffer.dataSize = memory.length;
89 }
90 }
91 iTensor.data = iBuffer;
92
93 return iTensor;
94 }
95
TransIOTensor(const NN_Tensor * tensor,V1_0::IOTensor & ioTensor)96 OH_NN_ReturnCode TransIOTensor(const NN_Tensor* tensor, V1_0::IOTensor& ioTensor)
97 {
98 if (tensor == nullptr) {
99 LOGE("TransIOTensor failed, failed to transform to v1_0 IOTensor.");
100 return OH_NN_NULL_PTR;
101 }
102
103 const NNTensor2_0* nnTensor = reinterpret_cast<const NNTensor2_0*>(tensor);
104 TensorDesc* nnTensorDesc = nnTensor->GetTensorDesc();
105 if (nnTensorDesc == nullptr) {
106 LOGE("TransIOTensor failed, failed to get desc from tensor.");
107 return OH_NN_NULL_PTR;
108 }
109
110 // convert name
111 const char* tensorName = nullptr;
112 OH_NN_ReturnCode ret = nnTensorDesc->GetName(&tensorName);
113 if (ret != OH_NN_SUCCESS) {
114 LOGE("TransIOTensor failed, failed to get name from desc.");
115 return ret;
116 }
117 ioTensor.name = tensorName;
118
119 // convert data type
120 OH_NN_DataType dataType;
121 ret = nnTensorDesc->GetDataType(&dataType);
122 if (ret != OH_NN_SUCCESS) {
123 LOGE("TransIOTensor failed, failed to get data type from desc.");
124 return ret;
125 }
126 ioTensor.dataType = TransDataType(dataType);
127
128 // convert format
129 OH_NN_Format format;
130 ret = nnTensorDesc->GetFormat(&format);
131 if (ret != OH_NN_SUCCESS) {
132 LOGE("TransIOTensor failed, failed to get format from desc.");
133 return ret;
134 }
135 ioTensor.format = TransFormat(format);
136
137 // convert shape
138 int32_t* shape = nullptr;
139 size_t shapeNum = 0;
140 ret = nnTensorDesc->GetShape(&shape, &shapeNum);
141 if (ret != OH_NN_SUCCESS) {
142 LOGE("TransIOTensor failed, failed to get shape from desc.");
143 return ret;
144 }
145 ioTensor.dimensions.clear();
146 for (size_t i = 0; i < shapeNum; ++i) {
147 ioTensor.dimensions.emplace_back(shape[i]);
148 }
149
150 // convert data
151 if (!nnTensor->CheckTensorData()) {
152 LOGE("TransIOTensor failed, failed to check tensor data.");
153 return OH_NN_INVALID_PARAMETER;
154 }
155 V1_0::SharedBuffer iBuffer {nnTensor->GetFd(), nnTensor->GetSize(), nnTensor->GetOffset(), nnTensor->GetSize()};
156 ioTensor.data = iBuffer;
157
158 return OH_NN_SUCCESS;
159 }
160 } // unamed namespace
161
HDIPreparedModelV1_0(OHOS::sptr<V1_0::IPreparedModel> hdiPreparedModel)162 HDIPreparedModelV1_0::HDIPreparedModelV1_0(OHOS::sptr<V1_0::IPreparedModel> hdiPreparedModel)
163 : m_hdiPreparedModel(hdiPreparedModel)
164 {
165 hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second);
166 }
167
~HDIPreparedModelV1_0()168 HDIPreparedModelV1_0::~HDIPreparedModelV1_0()
169 {
170 for (auto addr : m_addrs) {
171 auto memManager = MemoryManager::GetInstance();
172 OH_NN_ReturnCode ret = memManager->UnMapMemory(addr);
173 if (ret != OH_NN_SUCCESS) {
174 LOGE("~HDIPreparedModelV1_0 UnMapMemory failed.");
175 }
176 }
177 }
178
ExportModelCache(std::vector<Buffer> & modelCache)179 OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector<Buffer>& modelCache)
180 {
181 if (!modelCache.empty()) {
182 LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size());
183 return OH_NN_INVALID_PARAMETER;
184 }
185
186 std::vector<V1_0::SharedBuffer> iBuffers;
187 auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers);
188 if (ret != HDF_SUCCESS) {
189 LOGE("Export model cache failed. ErrorCode=%d", ret);
190 return OH_NN_SAVE_CACHE_EXCEPTION;
191 }
192
193 auto memManager = MemoryManager::GetInstance();
194 size_t iBuffersSize = iBuffers.size();
195 for (size_t i = 0; i < iBuffersSize; i++) {
196 auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize);
197 if (addr == nullptr) {
198 LOGE("Export the %zuth model cache failed, cannot not map fd to address.", i + 1);
199 return OH_NN_MEMORY_ERROR;
200 }
201 m_addrs.emplace_back(addr);
202 Buffer modelbuffer {addr, iBuffers[i].bufferSize};
203 modelCache.emplace_back(modelbuffer);
204 }
205
206 return OH_NN_SUCCESS;
207 }
208
Run(const std::vector<IOTensor> & inputs,const std::vector<IOTensor> & outputs,std::vector<std::vector<int32_t>> & outputsDims,std::vector<bool> & isOutputBufferEnough)209 OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
210 std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough)
211 {
212 V1_0::IOTensor iTensor;
213 std::vector<V1_0::IOTensor> iInputTensors;
214 for (const auto& input: inputs) {
215 iTensor = TransIOTensor(input);
216 if (iTensor.data.fd == INVALID_FD) {
217 LOGE("Transform inputs tensor failed, cannot find data file descriptor.");
218 return OH_NN_INVALID_PARAMETER;
219 }
220 iInputTensors.emplace_back(iTensor);
221 }
222
223 std::vector<V1_0::IOTensor> iOutputTensors;
224 for (const auto& output: outputs) {
225 iTensor = TransIOTensor(output);
226 if (iTensor.data.fd == INVALID_FD) {
227 LOGE("Transform outputs tensor failed, cannot find data file descriptor.");
228 return OH_NN_INVALID_PARAMETER;
229 }
230 iOutputTensors.emplace_back(iTensor);
231 }
232
233 auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough);
234 if (ret != HDF_SUCCESS || outputsDims.empty()) {
235 LOGE("Run model failed. ErrorCode=%d", ret);
236 return OH_NN_UNAVAILABLE_DEVICE;
237 }
238
239 return OH_NN_SUCCESS;
240 }
241
Run(const std::vector<NN_Tensor * > & inputs,const std::vector<NN_Tensor * > & outputs,std::vector<std::vector<int32_t>> & outputsDims,std::vector<bool> & isOutputBufferEnough)242 OH_NN_ReturnCode HDIPreparedModelV1_0::Run(const std::vector<NN_Tensor*>& inputs,
243 const std::vector<NN_Tensor*>& outputs, std::vector<std::vector<int32_t>>& outputsDims,
244 std::vector<bool>& isOutputBufferEnough)
245 {
246 V1_0::IOTensor iTensor;
247 std::vector<V1_0::IOTensor> iInputTensors;
248 for (const auto& input: inputs) {
249 auto returnCode = TransIOTensor(input, iTensor);
250 if (returnCode != OH_NN_SUCCESS) {
251 LOGE("Run failed, failed to transform to ioTensor.");
252 return OH_NN_FAILED;
253 }
254 if (iTensor.data.fd == INVALID_FD) {
255 LOGE("Transform inputs tensor failed, cannot find data file descriptor.");
256 return OH_NN_INVALID_PARAMETER;
257 }
258 iInputTensors.emplace_back(iTensor);
259 }
260
261 std::vector<V1_0::IOTensor> iOutputTensors;
262 for (const auto& output: outputs) {
263 auto returnCode = TransIOTensor(output, iTensor);
264 if (returnCode != OH_NN_SUCCESS) {
265 LOGE("Run failed, failed to transform to ioTensor.");
266 return OH_NN_FAILED;
267 }
268 if (iTensor.data.fd == INVALID_FD) {
269 LOGE("Transform outputs tensor failed, cannot find data file descriptor.");
270 return OH_NN_INVALID_PARAMETER;
271 }
272 iOutputTensors.emplace_back(iTensor);
273 }
274
275 auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims, isOutputBufferEnough);
276 if (ret != HDF_SUCCESS || outputsDims.empty()) {
277 LOGE("Run model failed. ErrorCode=%d", ret);
278 return OH_NN_UNAVAILABLE_DEVICE;
279 }
280
281 return OH_NN_SUCCESS;
282 }
283
GetModelID(uint32_t & modelId) const284 OH_NN_ReturnCode HDIPreparedModelV1_0::GetModelID(uint32_t& modelId) const
285 {
286 LOGE("hdi preparedModel V1_0 not support get model ID.");
287 return OH_NN_SUCCESS;
288 }
289 } // namespace NeuralNetworkRuntime
290 } // OHOS