1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "hdi_prepared_model_v2_0.h"
17
18 #include "common/log.h"
19 #include "hdi_returncode_utils.h"
20 #include "memory_manager.h"
21 #include "nntensor.h"
22
23 namespace OHOS {
24 namespace NeuralNetworkRuntime {
25 namespace {
TransDataType(const OH_NN_DataType & dataType)26 V2_0::DataType TransDataType(const OH_NN_DataType& dataType)
27 {
28 switch (dataType) {
29 case OH_NN_BOOL:
30 return V2_0::DataType::DATA_TYPE_BOOL;
31 case OH_NN_INT8:
32 return V2_0::DataType::DATA_TYPE_INT8;
33 case OH_NN_INT16:
34 return V2_0::DataType::DATA_TYPE_INT16;
35 case OH_NN_INT32:
36 return V2_0::DataType::DATA_TYPE_INT32;
37 case OH_NN_INT64:
38 return V2_0::DataType::DATA_TYPE_INT64;
39 case OH_NN_UINT8:
40 return V2_0::DataType::DATA_TYPE_UINT8;
41 case OH_NN_UINT16:
42 return V2_0::DataType::DATA_TYPE_UINT16;
43 case OH_NN_UINT32:
44 return V2_0::DataType::DATA_TYPE_UINT32;
45 case OH_NN_UINT64:
46 return V2_0::DataType::DATA_TYPE_UINT64;
47 case OH_NN_FLOAT16:
48 return V2_0::DataType::DATA_TYPE_FLOAT16;
49 case OH_NN_FLOAT32:
50 return V2_0::DataType::DATA_TYPE_FLOAT32;
51 case OH_NN_FLOAT64:
52 return V2_0::DataType::DATA_TYPE_FLOAT64;
53 default:
54 return V2_0::DataType::DATA_TYPE_UNKNOWN;
55 }
56 }
57
TransFormat(const OH_NN_Format & format)58 V2_0::Format TransFormat(const OH_NN_Format& format)
59 {
60 switch (format) {
61 case OH_NN_FORMAT_NCHW:
62 return V2_0::Format::FORMAT_NCHW;
63 case OH_NN_FORMAT_NHWC:
64 return V2_0::Format::FORMAT_NHWC;
65 default:
66 return V2_0::Format::FORMAT_NONE;
67 }
68 }
69
TransIOTensor(const IOTensor & tensor)70 V2_0::IOTensor TransIOTensor(const IOTensor& tensor)
71 {
72 V2_0::IOTensor iTensor;
73 iTensor.name = tensor.name;
74 iTensor.dataType = TransDataType(tensor.dataType);
75 iTensor.dimensions = tensor.dimensions;
76 iTensor.format = TransFormat(tensor.format);
77
78 V2_0::SharedBuffer iBuffer {INVALID_FD, 0, 0, 0};
79 if (tensor.data != nullptr) {
80 auto memManager = MemoryManager::GetInstance();
81 Memory memory;
82 auto ret = memManager->GetMemory(tensor.data, memory);
83 if (ret != OH_NN_SUCCESS) {
84 LOGE("Invalid Tensor buffer, cannot transform to fd.");
85 } else {
86 iBuffer.fd = memory.fd;
87 iBuffer.bufferSize = memory.length;
88 iBuffer.offset = 0;
89 iBuffer.dataSize = memory.length;
90 }
91 }
92 iTensor.data = iBuffer;
93
94 return iTensor;
95 }
96
TransIOTensor(const NN_Tensor * tensor,V2_0::IOTensor & ioTensor)97 OH_NN_ReturnCode TransIOTensor(const NN_Tensor* tensor, V2_0::IOTensor& ioTensor)
98 {
99 if (tensor == nullptr) {
100 LOGE("TransIOTensor failed, failed to transform to V2_0 IOTensor.");
101 return OH_NN_NULL_PTR;
102 }
103
104 const NNTensor2_0* nnTensor = reinterpret_cast<const NNTensor2_0*>(tensor);
105 TensorDesc* nnTensorDesc = nnTensor->GetTensorDesc();
106 if (nnTensorDesc == nullptr) {
107 LOGE("TransIOTensor failed, failed to get desc from tensor.");
108 return OH_NN_NULL_PTR;
109 }
110
111 // convert name
112 const char* tensorName = nullptr;
113 OH_NN_ReturnCode ret = nnTensorDesc->GetName(&tensorName);
114 if (ret != OH_NN_SUCCESS) {
115 LOGE("TransIOTensor failed, failed to get name from desc.");
116 return ret;
117 }
118 ioTensor.name = tensorName;
119
120 // convert data type
121 OH_NN_DataType dataType;
122 ret = nnTensorDesc->GetDataType(&dataType);
123 if (ret != OH_NN_SUCCESS) {
124 LOGE("TransIOTensor failed, failed to get data type from desc.");
125 return ret;
126 }
127 ioTensor.dataType = TransDataType(dataType);
128
129 // convert format
130 OH_NN_Format format;
131 ret = nnTensorDesc->GetFormat(&format);
132 if (ret != OH_NN_SUCCESS) {
133 LOGE("TransIOTensor failed, failed to get format from desc.");
134 return ret;
135 }
136 ioTensor.format = TransFormat(format);
137
138 // convert shape
139 int32_t* shape = nullptr;
140 size_t shapeNum = 0;
141 ret = nnTensorDesc->GetShape(&shape, &shapeNum);
142 if (ret != OH_NN_SUCCESS) {
143 LOGE("TransIOTensor failed, failed to get shape from desc.");
144 return ret;
145 }
146 ioTensor.dimensions.clear();
147 for (size_t i = 0; i < shapeNum; ++i) {
148 ioTensor.dimensions.emplace_back(shape[i]);
149 }
150
151 // convert data
152 if (!nnTensor->CheckTensorData()) {
153 LOGE("TransIOTensor failed, failed to check tensor data.");
154 return OH_NN_INVALID_PARAMETER;
155 }
156 V2_0::SharedBuffer iBuffer {nnTensor->GetFd(), nnTensor->GetSize(), nnTensor->GetOffset(), nnTensor->GetSize()};
157 ioTensor.data = iBuffer;
158
159 return OH_NN_SUCCESS;
160 }
161 } // unamed namespace
162
HDIPreparedModelV2_0(OHOS::sptr<V2_0::IPreparedModel> hdiPreparedModel)163 HDIPreparedModelV2_0::HDIPreparedModelV2_0(OHOS::sptr<V2_0::IPreparedModel> hdiPreparedModel)
164 : m_hdiPreparedModel(hdiPreparedModel)
165 {
166 hdiPreparedModel->GetVersion(m_hdiVersion.first, m_hdiVersion.second);
167 }
168
~HDIPreparedModelV2_0()169 HDIPreparedModelV2_0::~HDIPreparedModelV2_0()
170 {
171 for (auto addr : m_addrs) {
172 auto memManager = MemoryManager::GetInstance();
173 OH_NN_ReturnCode ret = memManager->UnMapMemory(addr);
174 if (ret != OH_NN_SUCCESS) {
175 LOGE("~HDIPreparedModelV2_0 UnMapMemory failed.");
176 }
177 }
178 }
179
ExportModelCache(std::vector<Buffer> & modelCache)180 OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector<Buffer>& modelCache)
181 {
182 if (!modelCache.empty()) {
183 LOGE("The vector of modelCache should be empty. size=%{public}zu", modelCache.size());
184 return OH_NN_INVALID_PARAMETER;
185 }
186
187 std::vector<V2_0::SharedBuffer> iBuffers;
188 auto ret = m_hdiPreparedModel->ExportModelCache(iBuffers);
189 if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
190 return CheckReturnCode(ret, OH_NN_SAVE_CACHE_EXCEPTION, "Export model cache failed");
191 }
192
193 auto memManager = MemoryManager::GetInstance();
194 size_t iBuffersSize = iBuffers.size();
195 for (size_t i = 0; i < iBuffersSize; i++) {
196 auto addr = memManager->MapMemory(iBuffers[i].fd, iBuffers[i].bufferSize);
197 if (addr == nullptr) {
198 LOGE("Export the %{public}zuth model cache failed, cannot not map fd to address.", i + 1);
199 return OH_NN_MEMORY_ERROR;
200 }
201 m_addrs.emplace_back(addr);
202 Buffer modelbuffer {addr, iBuffers[i].bufferSize};
203 modelCache.emplace_back(modelbuffer);
204 }
205
206 return OH_NN_SUCCESS;
207 }
208
Run(const std::vector<IOTensor> & inputs,const std::vector<IOTensor> & outputs,std::vector<std::vector<int32_t>> & outputsDims,std::vector<bool> & isOutputBufferEnough)209 OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
210 std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough)
211 {
212 V2_0::IOTensor iTensor;
213 std::vector<V2_0::IOTensor> iInputTensors;
214 for (const auto& input: inputs) {
215 iTensor = TransIOTensor(input);
216 if (iTensor.data.fd == INVALID_FD) {
217 LOGE("Transform inputs tensor failed, cannot find data file descriptor.");
218 return OH_NN_INVALID_PARAMETER;
219 }
220 iInputTensors.emplace_back(iTensor);
221 }
222
223 std::vector<V2_0::IOTensor> iOutputTensors;
224 for (const auto& output: outputs) {
225 iTensor = TransIOTensor(output);
226 if (iTensor.data.fd == INVALID_FD) {
227 LOGE("Transform outputs tensor failed, cannot find data file descriptor.");
228 return OH_NN_INVALID_PARAMETER;
229 }
230 iOutputTensors.emplace_back(iTensor);
231 }
232
233 auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims);
234 if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
235 return CheckReturnCode(ret, OH_NN_UNAVAILABLE_DEVICE, "Run model failed");
236 }
237 if (outputsDims.empty()) {
238 LOGE("Run failed, outputsDims is empty.");
239 return OH_NN_UNAVAILABLE_DEVICE;
240 }
241
242 return OH_NN_SUCCESS;
243 }
244
Run(const std::vector<NN_Tensor * > & inputs,const std::vector<NN_Tensor * > & outputs,std::vector<std::vector<int32_t>> & outputsDims,std::vector<bool> & isOutputBufferEnough)245 OH_NN_ReturnCode HDIPreparedModelV2_0::Run(const std::vector<NN_Tensor*>& inputs,
246 const std::vector<NN_Tensor*>& outputs, std::vector<std::vector<int32_t>>& outputsDims,
247 std::vector<bool>& isOutputBufferEnough)
248 {
249 V2_0::IOTensor iTensor;
250 std::vector<V2_0::IOTensor> iInputTensors;
251 for (const auto& input: inputs) {
252 auto returnCode = TransIOTensor(input, iTensor);
253 if (returnCode != OH_NN_SUCCESS) {
254 LOGE("Run failed, failed to transform to ioTensor.");
255 return OH_NN_FAILED;
256 }
257 if (iTensor.data.fd == INVALID_FD) {
258 LOGE("Transform inputs tensor failed, cannot find data file descriptor.");
259 return OH_NN_INVALID_PARAMETER;
260 }
261 iInputTensors.emplace_back(iTensor);
262 }
263
264 std::vector<V2_0::IOTensor> iOutputTensors;
265 for (const auto& output: outputs) {
266 auto returnCode = TransIOTensor(output, iTensor);
267 if (returnCode != OH_NN_SUCCESS) {
268 LOGE("Run failed, failed to transform to ioTensor.");
269 return OH_NN_FAILED;
270 }
271 if (iTensor.data.fd == INVALID_FD) {
272 LOGE("Transform outputs tensor failed, cannot find data file descriptor.");
273 return OH_NN_INVALID_PARAMETER;
274 }
275 iOutputTensors.emplace_back(iTensor);
276 }
277
278 auto ret = m_hdiPreparedModel->Run(iInputTensors, iOutputTensors, outputsDims);
279 if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
280 return CheckReturnCode(ret, OH_NN_UNAVAILABLE_DEVICE, "Run model failed");
281 }
282 if (outputsDims.empty()) {
283 LOGE("Run failed, outputsDims is empty.");
284 return OH_NN_UNAVAILABLE_DEVICE;
285 }
286
287 return OH_NN_SUCCESS;
288 }
289
GetInputDimRanges(std::vector<std::vector<uint32_t>> & minInputDims,std::vector<std::vector<uint32_t>> & maxInputDims)290 OH_NN_ReturnCode HDIPreparedModelV2_0::GetInputDimRanges(std::vector<std::vector<uint32_t>>& minInputDims,
291 std::vector<std::vector<uint32_t>>& maxInputDims)
292 {
293 auto ret = m_hdiPreparedModel->GetInputDimRanges(minInputDims, maxInputDims);
294 if (ret != V2_0::NNRT_ReturnCode::NNRT_SUCCESS) {
295 return CheckReturnCode(ret, OH_NN_UNAVAILABLE_DEVICE, "Get input dim ranges failed");
296 }
297
298 return OH_NN_SUCCESS;
299 }
300
GetModelID(uint32_t & modelId) const301 OH_NN_ReturnCode HDIPreparedModelV2_0::GetModelID(uint32_t& modelId) const
302 {
303 LOGE("hdi preparedModel V2_0 not support get model ID.");
304 return OH_NN_SUCCESS;
305 }
306 } // namespace NeuralNetworkRuntime
307 } // OHOS
308