1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "inner_model.h"
17 
18 #include <new>
19 #include <unordered_map>
20 #include <vector>
21 
22 #include "securec.h"
23 
24 #include "common/utils.h"
25 #include "common/scoped_trace.h"
26 #include "backend_manager.h"
27 #include "validation.h"
28 #include "ops_builder.h"
29 #include "ops_registry.h"
30 #include "transform.h"
31 #include "nnbackend.h"
32 
33 namespace MSLITE = mindspore::lite;
34 
35 namespace OHOS {
36 namespace NeuralNetworkRuntime {
37 const std::string NNR_MODEL = "NNR_Model";
38 const std::string LOADED_NNR_MODEL = "Loaded_NNR_Model";
39 
40 namespace {
41 class LiteGraphDeleter {
42 public:
operator ()(MSLITE::LiteGraph * liteGraph) const43     void operator()(MSLITE::LiteGraph* liteGraph) const
44     {
45         MindIR_LiteGraph_Destroy(&liteGraph);
46     }
47 };
48 
ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)49 std::shared_ptr<NNTensor> ConstructNNTensorFromLiteGraphTensor(const MSLITE::TensorPtr msTensor)
50 {
51     MSLITE::DataType msDataType = MSLITE::MindIR_Tensor_GetDataType(msTensor);
52     OH_NN_DataType dataType = MSToNN::TransformDataType(msDataType);
53     std::vector<int32_t> msDims = MSLITE::MindIR_Tensor_GetDims(msTensor);
54     std::vector<MSLITE::QuantParam> msQuantParams = MSLITE::MindIR_Tensor_GetQuantParams(msTensor);
55     std::vector<QuantParam> nnQuantParams = MSToNN::TransformQuantParams(msQuantParams);
56     OH_NN_Format nnFormat = MSToNN::TransformFormat(MSLITE::MindIR_Tensor_GetFormat(msTensor));
57 
58     std::shared_ptr<NNTensor> nnTensor = CreateSharedPtr<NNTensor>();
59     if (nnTensor == nullptr) {
60         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when creating NNTensor.");
61         return nullptr;
62     }
63 
64     OH_NN_ReturnCode ret = nnTensor->Build(dataType, msDims, nnQuantParams, OH_NN_TENSOR);
65     if (ret != OH_NN_SUCCESS) {
66         LOGE("ConstructNNTensorFromLiteGraphTensor failed, error happened when building NNTensor with attributes.");
67         return nullptr;
68     }
69 
70     nnTensor->SetFormat(nnFormat);
71 
72     return nnTensor;
73 }
74 
ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph * liteGraph,const std::vector<uint32_t> & indices,std::vector<std::shared_ptr<NNTensor>> & nnTensors)75 OH_NN_ReturnCode ConstructNNTensorsFromLiteGraph(const MSLITE::LiteGraph* liteGraph,
76                                                  const std::vector<uint32_t>& indices,
77                                                  std::vector<std::shared_ptr<NNTensor>>& nnTensors)
78 {
79     if (indices.empty()) {
80         LOGE("ConstructNNTensorsFromLiteGraph failed, passed empty indices list.");
81         return OH_NN_INVALID_PARAMETER;
82     }
83 
84     uint32_t maximumIndex = *(std::max_element(indices.begin(), indices.end()));
85     if (maximumIndex >= liteGraph->all_tensors_.size()) {
86         LOGE("ConstructNNTensorsFromLiteGraph failed, index exceed size of all_tensors inside liteGraph.");
87         return OH_NN_INVALID_PARAMETER;
88     }
89 
90     std::shared_ptr<NNTensor> nnTensor;
91     for (uint32_t i : indices) {
92         nnTensor = ConstructNNTensorFromLiteGraphTensor(liteGraph->all_tensors_[i]);
93         if (nnTensor == nullptr) {
94             LOGE("ConstructNNTensorsFromLiteGraph failed, failed to construct NNTensor from LiteGraphTensor.");
95             return OH_NN_NULL_PTR;
96         }
97 
98         nnTensors.emplace_back(nnTensor);
99     }
100 
101     return OH_NN_SUCCESS;
102 }
103 } // anonymous namespace
104 
InnerModel()105 InnerModel::InnerModel() {}
106 
IsBuild() const107 bool InnerModel::IsBuild() const
108 {
109     return ((m_liteGraph != nullptr) || (m_metaGraph != nullptr));
110 }
111 
BuildFromLiteGraph(const MSLITE::LiteGraph * liteGraph,const ExtensionConfig & extensionConfig)112 OH_NN_ReturnCode InnerModel::BuildFromLiteGraph(const MSLITE::LiteGraph* liteGraph,
113     const ExtensionConfig& extensionConfig)
114 {
115     NNRT_TRACE_NAME("Build model from lite graph");
116     if (liteGraph == nullptr) {
117         LOGE("BuildFromLiteGraph failed, passed empty liteGraph.");
118         return OH_NN_INVALID_PARAMETER;
119     }
120 
121     if (IsBuild()) {
122         LOGE("BuildFromLiteGraph failed, inner model has been built or loaded before.");
123         return OH_NN_OPERATION_FORBIDDEN;
124     }
125 
126     if (!m_allTensors.empty() || !m_ops.empty()) {
127         LOGE("BuildFromLiteGraph failed, please LoadLiteGraph without adding tensor and operations.");
128         return OH_NN_OPERATION_FORBIDDEN;
129     }
130 
131     m_inputTensors.clear();
132     OH_NN_ReturnCode ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->input_indices_, m_inputTensors);
133     if (ret != OH_NN_SUCCESS) {
134         LOGE("BuildFromLiteGraph failed, error happened when constructing input NNTensors from liteGraph.");
135         return ret;
136     }
137 
138     m_outputTensors.clear();
139     ret = ConstructNNTensorsFromLiteGraph(liteGraph, liteGraph->output_indices_, m_outputTensors);
140     if (ret != OH_NN_SUCCESS) {
141         LOGE("BuildFromLiteGraph failed, error happened when constructing output NNTensors from liteGraph.");
142         return ret;
143     }
144 
145     m_liteGraph.reset(const_cast<MSLITE::LiteGraph*>(liteGraph), LiteGraphDeleter());
146     m_liteGraph->name_ = LOADED_NNR_MODEL;
147 
148     m_extensionConfig = extensionConfig;
149 
150     return OH_NN_SUCCESS;
151 }
152 
BuildFromMetaGraph(const void * metaGraph,const ExtensionConfig & extensionConfig)153 OH_NN_ReturnCode InnerModel::BuildFromMetaGraph(const void* metaGraph, const ExtensionConfig& extensionConfig)
154 {
155     NNRT_TRACE_NAME("Build model from meta graph");
156     if (metaGraph == nullptr) {
157         LOGE("BuildFromMetaGraph failed, passed empty metaGraph.");
158         return OH_NN_INVALID_PARAMETER;
159     }
160 
161     if (IsBuild()) {
162         LOGE("BuildFromMetaGraph failed, inner model has been built or loaded before.");
163         return OH_NN_OPERATION_FORBIDDEN;
164     }
165 
166     if (m_allTensors.empty()) {
167         LOGE("BuildFromMetaGraph failed, SetInputsAndOutputsInfo should be called before building metaGraph.");
168         return OH_NN_OPERATION_FORBIDDEN;
169     }
170 
171     m_metaGraph = const_cast<void*>(metaGraph);
172     m_extensionConfig = extensionConfig;
173     return OH_NN_SUCCESS;
174 }
175 
AddTensor(const OH_NN_Tensor & nnTensor)176 OH_NN_ReturnCode InnerModel::AddTensor(const OH_NN_Tensor& nnTensor)
177 {
178     if (IsBuild()) {
179         LOGE("AddTensor failed, AddTensor is forbidden after model has been built.");
180         return OH_NN_OPERATION_FORBIDDEN;
181     }
182 
183     std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
184     if (tensor == nullptr) {
185         LOGE("AddTensor failed, error happened when creating NNTensor.");
186         return OH_NN_MEMORY_ERROR;
187     }
188 
189     OH_NN_ReturnCode ret = tensor->BuildFromOHNNTensor(nnTensor);
190     if (ret != OH_NN_SUCCESS) {
191         LOGE("AddTensor failed, error happened when build NNTensor from OH_NN_Tensor.");
192         return ret;
193     }
194 
195     // The NNTensor is named as "Tensor: <tensor index>"".
196     tensor->SetName("Tensor: " + std::to_string(m_allTensors.size()));
197     m_allTensors.emplace_back(tensor);
198 
199     return OH_NN_SUCCESS;
200 }
201 
AddTensorDesc(const NN_TensorDesc * nnTensorDesc)202 OH_NN_ReturnCode InnerModel::AddTensorDesc(const NN_TensorDesc* nnTensorDesc)
203 {
204     if (nnTensorDesc == nullptr) {
205         LOGE("AddTensorDesc failed, passed nullptr to nnTensorDesc.");
206         return OH_NN_INVALID_PARAMETER;
207     }
208 
209     std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
210     if (tensor == nullptr) {
211         LOGE("AddTensorDesc failed, error happened when creating NNTensor.");
212         return OH_NN_MEMORY_ERROR;
213     }
214 
215     OH_NN_ReturnCode returnCode = tensor->BuildFromTensorDesc(nnTensorDesc);
216     if (returnCode != OH_NN_SUCCESS) {
217         LOGE("AddTensorDesc failed, error happened when build NNTensor from OH_NNCore_TensorDesc.");
218         return returnCode;
219     }
220 
221     // The NNTensor is named as "Tensor: <tensor index>"".
222     tensor->SetName("Tensor: " + std::to_string(m_allTensors.size()));
223     m_allTensors.emplace_back(tensor);
224 
225     return OH_NN_SUCCESS;
226 }
227 
SetTensorType(uint32_t index,OH_NN_TensorType tensorType)228 OH_NN_ReturnCode InnerModel::SetTensorType(uint32_t index, OH_NN_TensorType tensorType)
229 {
230     if (IsBuild()) {
231         LOGE("SetTensorType failed, SetTensorType is forbidden after model has been built.");
232         return OH_NN_OPERATION_FORBIDDEN;
233     }
234 
235     if (index >= m_allTensors.size()) {
236         LOGE("SetTensorType failed, passed index %u out of the number of added tensors.", index);
237         return OH_NN_INVALID_PARAMETER;
238     }
239 
240     std::shared_ptr<NNTensor> tensor = m_allTensors[index];
241     OH_NN_ReturnCode returnCode = tensor->SetTensorType(tensorType);
242     if (returnCode != OH_NN_SUCCESS) {
243         LOGE("SetTensorType failed, error happened when setting tensor type.");
244     }
245 
246     return returnCode;
247 }
248 
SetTensorQuantParam(uint32_t index,const NN_QuantParam * quantParam)249 OH_NN_ReturnCode InnerModel::SetTensorQuantParam(uint32_t index, const NN_QuantParam* quantParam)
250 {
251     if (IsBuild()) {
252         LOGE("SetTensorQuantParam failed, SetTensorValue is forbidden after model has been built.");
253         return OH_NN_OPERATION_FORBIDDEN;
254     }
255 
256     if (index >= m_allTensors.size()) {
257         LOGE("SetTensorQuantParam failed, passed index %u out of the number of added tensors.", index);
258         return OH_NN_INVALID_PARAMETER;
259     }
260 
261     std::shared_ptr<NNTensor> tensor = m_allTensors[index];
262     // quantParam is validated in outer function, no need to check it here.
263     OH_NN_ReturnCode returnCode = tensor->SetQuantParam(quantParam);
264     if (returnCode != OH_NN_SUCCESS) {
265         LOGE("SetTensorQuantParam failed, error happened when set quant param.");
266     }
267 
268     return returnCode;
269 }
270 
271 // DOTO: 圈复杂度待优化
SetTensorValue(uint32_t index,const void * buffer,size_t length)272 OH_NN_ReturnCode InnerModel::SetTensorValue(uint32_t index, const void* buffer, size_t length)
273 {
274     if (IsBuild()) {
275         LOGE("SetTensorValue failed, SetTensorValue is forbidden after model has been built.");
276         return OH_NN_OPERATION_FORBIDDEN;
277     }
278 
279     if (index >= m_allTensors.size()) {
280         LOGE("SetTensorValue failed, passed index %u out of the number of added tensors.", index);
281         return OH_NN_INVALID_PARAMETER;
282     }
283 
284     const std::shared_ptr<NNTensor> tensor = m_allTensors[index];
285     if (tensor->GetBuffer() != nullptr) {
286         LOGE("SetTensorValue failed, tensor has been set value twice. Tensor index: %u.", index);
287         return OH_NN_INVALID_PARAMETER;
288     }
289 
290     if (buffer == nullptr) {
291         LOGW("SetTensorValue passed empty buffer, which makes no effect.");
292         return OH_NN_SUCCESS;
293     }
294 
295     if (tensor->IsDynamicShape()) {
296         LOGE("SetTensorValue failed, cannot set value to tensor with dynamic shape.");
297         return OH_NN_OPERATION_FORBIDDEN;
298     }
299 
300     if (length != tensor->GetDataLength()) {
301         LOGE("SetTensorValue failed, get buffer length %zu different from the byte size of tensor %zu.",
302              length, tensor->GetDataLength());
303         return OH_NN_INVALID_PARAMETER;
304     }
305 
306     // Data will be released inside NNTensor if it is set inside NNTensor using SetBuffer().
307     void* data = new (std::nothrow) char[length];
308     if (data == nullptr) {
309         LOGE("SetTensorValue failed, please check whether it runs out of memory.");
310         return OH_NN_MEMORY_ERROR;
311     }
312 
313     errno_t ret = memcpy_s(data, length, buffer, length);
314     if (ret != EOK) {
315         LOGE("SetTensorValue failed, please the information of error number %d from memcpy_s.", ret);
316         delete [] reinterpret_cast<char*>(data);
317         return OH_NN_FAILED;
318     }
319 
320     tensor->SetBuffer(data, length);
321     return OH_NN_SUCCESS;
322 }
323 
ValidateInputAndOutput(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices) const324 OH_NN_ReturnCode InnerModel::ValidateInputAndOutput(
325     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const
326 {
327     OH_NN_ReturnCode ret = ValidateTensorArray(inputIndices);
328     if (ret != OH_NN_SUCCESS) {
329         LOGE("ValidateInputAndOutput failed, please check input indices.");
330         return ret;
331     }
332 
333     ret = ValidateTensorArray(outputIndices);
334     if (ret != OH_NN_SUCCESS) {
335         LOGE("ValidateInputAndOutput failed, please check output indices.");
336         return ret;
337     }
338 
339     if (inputIndices.size == 0) {
340         LOGE("ValidateInputAndOutput failed, passed empty input indices.");
341         return OH_NN_INVALID_PARAMETER;
342     }
343 
344     if (outputIndices.size == 0) {
345         LOGE("ValidateInputAndOutput failed, passed empty output indices.");
346         return OH_NN_INVALID_PARAMETER;
347     }
348 
349     std::shared_ptr<NNTensor> tensor{nullptr};
350     for (uint32_t i = 0; i < inputIndices.size; i++) {
351         tensor = m_allTensors[inputIndices.data[i]];
352         if (tensor->GetType() != OH_NN_TENSOR) {
353             LOGE("ValidateInputAndOutput failed, tensor set as input should has type of OH_NN_TENSOR, but receive %d."
354                  "Tensor index: %u.", tensor->GetType(), i);
355             return OH_NN_INVALID_PARAMETER;
356         }
357     }
358 
359     for (uint32_t i = 0; i < outputIndices.size; i++) {
360         tensor = m_allTensors[outputIndices.data[i]];
361         if (tensor->GetType() != OH_NN_TENSOR) {
362             LOGE("ValidateInputAndOutput failed, tensor set as output should has type of OH_NN_TENSOR, but receive %d."
363                  "Tensor index: %u.", tensor->GetType(), i);
364             return OH_NN_INVALID_PARAMETER;
365         }
366     }
367 
368     // The number of inputIndices and outputIndices are usually small, so O(n**2) iteration is fine.
369     for (uint32_t i = 0; i < inputIndices.size; i++) {
370         for (uint32_t j = 0; j < outputIndices.size; j++) {
371             if (inputIndices.data[i] == outputIndices.data[j]) {
372                 LOGE("ValidateInputAndOutput failed, should not set an tensor as input and output at the same time, "
373                      "input index %u, output index %u", inputIndices.data[i], outputIndices.data[j]);
374                 return OH_NN_INVALID_PARAMETER;
375             }
376         }
377     }
378     return OH_NN_SUCCESS;
379 }
380 
381 /* Check whether the indices exceed the number of added tensors. */
ValidateTensorArray(const OH_NN_UInt32Array & indices) const382 OH_NN_ReturnCode InnerModel::ValidateTensorArray(const OH_NN_UInt32Array& indices) const
383 {
384     OH_NN_ReturnCode ret = Validation::ValidateArray(indices.data, indices.size);
385     if (ret != OH_NN_SUCCESS) {
386         LOGE("ValidateTensorArray failed, please check the validity of indices.");
387         return ret;
388     }
389 
390     size_t allTensorsSize = m_allTensors.size();
391     for (uint32_t i = 0; i < indices.size; i++) {
392         if (indices.data[i] >= allTensorsSize) {
393             LOGE("ValidateTensors failed, index %{public}u is out of the number of added tensors.", indices.data[i]);
394             return OH_NN_INVALID_PARAMETER;
395         }
396     }
397 
398     return OH_NN_SUCCESS;
399 }
400 
AddOperation(OH_NN_OperationType opType,const OH_NN_UInt32Array & paramIndices,const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)401 OH_NN_ReturnCode InnerModel::AddOperation(OH_NN_OperationType opType, const OH_NN_UInt32Array& paramIndices,
402                                           const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
403 {
404     if (IsBuild()) {
405         LOGE("AddOperation failed, AddOperation is forbidden after model has been built.");
406         return OH_NN_OPERATION_FORBIDDEN;
407     }
408 
409     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
410     if (ret != OH_NN_SUCCESS) {
411         LOGE("AddOperation failed, please check inputIndices and outputIndices.");
412         return ret;
413     }
414     std::vector<uint32_t> inputs = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
415     std::vector<uint32_t> outputs = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
416 
417     ret = ValidateTensorArray(paramIndices);
418     if (ret != OH_NN_SUCCESS) {
419         LOGE("AddOperation failed, please check paramIndices.");
420         return ret;
421     }
422     std::vector<uint32_t> parameters = ConstructVectorFromArray(paramIndices.data, paramIndices.size);
423 
424     const Ops::OpsRegistry& opsRegistry = Ops::OpsRegistry::GetSingleton();
425     std::unique_ptr<Ops::OpsBuilder> opsBuilder = opsRegistry.GetOpsBuilder(opType);
426     if (opsBuilder == nullptr) {
427         LOGE("AddOperation failed, cannot add operation of type: %d.", opType);
428         return OH_NN_INVALID_PARAMETER;
429     }
430 
431     ret = opsBuilder->Build(parameters, inputs, outputs, m_allTensors);
432     if (ret != OH_NN_SUCCESS) {
433         LOGE("AddOperation failed, error happens when build operations.");
434         return ret;
435     }
436 
437     m_ops.emplace_back(std::move(opsBuilder));
438     return OH_NN_SUCCESS;
439 }
440 
SpecifyInputsAndOutputs(const OH_NN_UInt32Array & inputIndices,const OH_NN_UInt32Array & outputIndices)441 OH_NN_ReturnCode InnerModel::SpecifyInputsAndOutputs(
442     const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices)
443 {
444     if (IsBuild()) {
445         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs is forbidden after model has been built.");
446         return OH_NN_OPERATION_FORBIDDEN;
447     }
448 
449     if (!m_inputTensors.empty()) {
450         LOGE("SpecifyInputsAndOutputs failed, SpecifyInputsAndOutputs should not be called twice.");
451         return OH_NN_OPERATION_FORBIDDEN;
452     }
453 
454     OH_NN_ReturnCode ret = ValidateInputAndOutput(inputIndices, outputIndices);
455     if (ret != OH_NN_SUCCESS) {
456         LOGE("SpecifyInputsAndOutputs failed, please check inputIndices and outputIndices.");
457         return ret;
458     }
459 
460     m_inputIndices = ConstructVectorFromArray(inputIndices.data, inputIndices.size);
461     m_outputIndices = ConstructVectorFromArray(outputIndices.data, outputIndices.size);
462 
463     std::transform(m_inputIndices.begin(), m_inputIndices.end(), std::back_inserter(m_inputTensors),
464         [this](uint32_t i) {
465             return m_allTensors[i];
466         });
467 
468     std::transform(m_outputIndices.begin(), m_outputIndices.end(), std::back_inserter(m_outputTensors),
469         [this](uint32_t i) {
470             return m_allTensors[i];
471         });
472 
473     return OH_NN_SUCCESS;
474 }
475 
CheckParameters() const476 OH_NN_ReturnCode InnerModel::CheckParameters() const
477 {
478     if (m_liteGraph != nullptr) {
479         LOGE("CheckParameters failed, liteGraph is not nullptr.");
480         return OH_NN_OPERATION_FORBIDDEN;
481     }
482 
483     if (m_metaGraph != nullptr) {
484         LOGE("CheckParameters failed, metaGraph is not nullptr.");
485         return OH_NN_OPERATION_FORBIDDEN;
486     }
487 
488     if (!m_allTensors.empty()) {
489         LOGE("CheckParameters failed, m_allTensors is not empty.");
490         return OH_NN_OPERATION_FORBIDDEN;
491     }
492 
493     if (!(m_inputTensors.empty() && (m_inputIndices.empty()))) {
494         LOGE("CheckParameters failed, m_inputTensors is not empty.");
495         return OH_NN_OPERATION_FORBIDDEN;
496     }
497 
498     if (!(m_outputTensors.empty() && (m_outputIndices.empty()))) {
499         LOGE("CheckParameters failed, m_outputTensors is not empty.");
500         return OH_NN_OPERATION_FORBIDDEN;
501     }
502 
503     return OH_NN_SUCCESS;
504 }
505 
SetInputsAndOutputsInfo(const OH_NN_TensorInfo * inputsInfo,size_t inputSize,const OH_NN_TensorInfo * outputsInfo,size_t outputSize)506 OH_NN_ReturnCode InnerModel::SetInputsAndOutputsInfo(const OH_NN_TensorInfo* inputsInfo, size_t inputSize,
507     const OH_NN_TensorInfo* outputsInfo, size_t outputSize)
508 {
509     OH_NN_ReturnCode ret = CheckParameters();
510     if (ret != OH_NN_SUCCESS) {
511         LOGE("SetInputsAndOutputsInfo failed, error happened when checking parameters.");
512         return ret;
513     }
514 
515     // 根据inputsInfo设置输入NNTensor
516     for (size_t i = 0; i < inputSize; ++i) {
517         std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
518         if (tensor == nullptr) {
519             LOGE("SetInputsAndOutputsInfo failed, error happened when creating input NNTensor.");
520             return OH_NN_MEMORY_ERROR;
521         }
522 
523         ret = tensor->BuildFromOHNNTensorInfo(inputsInfo[i]);
524         if (ret != OH_NN_SUCCESS) {
525             LOGE("SetInputsAndOutputsInfo failed, error happened when building input NNTensor from info.");
526             return ret;
527         }
528         m_inputIndices.emplace_back(i);
529         m_allTensors.emplace_back(tensor);
530         m_inputTensors.emplace_back(tensor);
531     }
532 
533     // 根据outputsInfo设置输入NNTensor
534     for (size_t i = 0; i < outputSize; ++i) {
535         std::shared_ptr<NNTensor> tensor = CreateSharedPtr<NNTensor>();
536         if (tensor == nullptr) {
537             LOGE("SetInputsAndOutputsInfo failed, error happened when creating output NNTensor.");
538             return OH_NN_MEMORY_ERROR;
539         }
540 
541         ret = tensor->BuildFromOHNNTensorInfo(outputsInfo[i]);
542         if (ret != OH_NN_SUCCESS) {
543             LOGE("SetInputsAndOutputsInfo failed, error happened when building output NNTensor from info.");
544             return ret;
545         }
546         m_outputIndices.emplace_back(i + inputSize);
547         m_allTensors.emplace_back(tensor);
548         m_outputTensors.emplace_back(tensor);
549     }
550 
551     return OH_NN_SUCCESS;
552 }
553 
Build()554 OH_NN_ReturnCode InnerModel::Build()
555 {
556     NNRT_TRACE_NAME("Build model");
557     if (IsBuild()) {
558         LOGE("Build failed, OH_NNModel_Finish() shouldn't be called after OH_NNModel_Finish() or "
559              "OH_NNModel_BuildFromMetaGraph() or OH_NNModel_BuildFromLiteGraph().");
560         return OH_NN_OPERATION_FORBIDDEN;
561     }
562 
563     if (m_allTensors.empty()) {
564         LOGE("Build failed, no OH_NN_Tensor has been added. Must call AddTensor before Build().");
565         return OH_NN_OPERATION_FORBIDDEN;
566     }
567 
568     if (m_ops.empty()) {
569         LOGE("Build failed, no operation has beed added. Must call AddOperation before Build().");
570         return OH_NN_OPERATION_FORBIDDEN;
571     }
572 
573     if ((m_inputIndices.empty()) || (m_outputIndices.empty())) {
574         LOGE("Build failed, inputs and outputs are unspecified. Must call SpecifyInputsAndOutputs before Build().");
575         return OH_NN_OPERATION_FORBIDDEN;
576     }
577 
578     MSLITE::LiteGraph* pLiteGraph = new (std::nothrow) MSLITE::LiteGraph();
579     if (pLiteGraph == nullptr) {
580         LOGE("Build failed, error happend when creating LiteGraph.");
581         return OH_NN_MEMORY_ERROR;
582     }
583     m_liteGraph.reset(pLiteGraph, LiteGraphDeleter());
584 
585     m_liteGraph->name_ = NNR_MODEL;
586 
587     std::unordered_map<uint32_t, uint32_t> modelIDToGraphID;
588     AddTensorsToLiteGraph(modelIDToGraphID);
589 
590     OH_NN_ReturnCode ret = AddNodesToLiteGraph(modelIDToGraphID);
591     if (ret != OH_NN_SUCCESS) {
592         return ret;
593     }
594 
595     // subGraph will be released by LiteGraph if it is added into instance of LiteGraph.
596     MSLITE::LiteGraph::SubGraph* subGraph = new (std::nothrow) MSLITE::LiteGraph::SubGraph();
597     if (subGraph == nullptr) {
598         LOGE("AddNodesToLiteGraph failed, error happened when creating subgraph.");
599         return OH_NN_NULL_PTR;
600     }
601 
602     subGraph->name_ = "NNRt_SubGraph"; // Name of subGraph
603     subGraph->input_indices_ = m_liteGraph->input_indices_;
604     subGraph->output_indices_ = m_liteGraph->output_indices_;
605     uint32_t nodeCount = static_cast<uint32_t>(m_ops.size()); // m_ops.size() smaller than UINT32_MAX
606     for (uint32_t i = 0; i < nodeCount; i++) {
607         subGraph->node_indices_.emplace_back(i);
608     }
609     m_liteGraph->sub_graphs_.emplace_back(subGraph);
610 
611     return OH_NN_SUCCESS;
612 }
613 
AddTensorsToLiteGraph(std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)614 void InnerModel::AddTensorsToLiteGraph(std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
615 {
616     uint32_t graphID = 0;
617     LiteGraphTensorPtr tensor(nullptr, DestroyLiteGraphTensor);
618     size_t tensorCount = m_allTensors.size();
619     for (size_t i = 0; i < tensorCount; i++) {
620         const std::shared_ptr<NNTensor>& nnTensor = m_allTensors[i];
621         // If the tensor is used as operation parameter, it will not convert to the tensor of LiteGraph.
622         if (nnTensor->IsOpParameter()) {
623             continue;
624         }
625 
626         tensor = nnTensor->ConvertToLiteGraphTensor();
627         m_liteGraph->all_tensors_.emplace_back(tensor.release());
628         modelIDToGraphID[i] = graphID++;
629     }
630 
631     // Note: Indices in m_inputIndices and m_outputIndices have been checked in SpecifyInputAndOutput(), there is no
632     // need to check twice.
633     std::vector<uint32_t>& inputIndices = m_liteGraph->input_indices_;
634     std::transform(m_inputIndices.begin(), m_inputIndices.end(), std::back_inserter(inputIndices),
635         [modelIDToGraphID](uint32_t index) {return modelIDToGraphID.at(index);});
636 
637     std::vector<uint32_t>& outputIndices = m_liteGraph->output_indices_;
638     std::transform(m_outputIndices.begin(), m_outputIndices.end(), std::back_inserter(outputIndices),
639         [modelIDToGraphID](uint32_t index) {return modelIDToGraphID.at(index);});
640 }
641 
AddNodesToLiteGraph(const std::unordered_map<uint32_t,uint32_t> & modelIDToGraphID)642 OH_NN_ReturnCode InnerModel::AddNodesToLiteGraph(const std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID)
643 {
644     MSLITE::LiteGraph::Node* node{nullptr};
645     size_t opCount = m_ops.size();
646     Ops::LiteGraphPrimitvePtr primitive = {nullptr, DestroyLiteGraphTensor};
647     for (size_t i = 0; i < opCount; i++) {
648         std::unique_ptr<Ops::OpsBuilder>& op = m_ops[i];
649         // node will be released by LiteGraph if it is added into instance of LiteGraph.
650         node = new(std::nothrow) MSLITE::LiteGraph::Node();
651         if (node == nullptr) {
652             LOGE("AddNodesToLiteGraph failed, error happened when creating LiteGraph tensor.");
653             return OH_NN_NULL_PTR;
654         }
655 
656         node->name_ = op->GetName() + ":" + std::to_string(i);
657         node->quant_type_ = NNToMS::TransformQuantType(op->GetQuantType());
658 
659         op->GetInputIndex(node->input_indices_, modelIDToGraphID);
660         op->GetOutputIndex(node->output_indices_, modelIDToGraphID);
661 
662         primitive = op->GetPrimitive();
663         if (primitive == nullptr) {
664             LOGE("Build %s primitive failed.", op->GetName().c_str());
665             delete node;
666             return OH_NN_FAILED;
667         }
668 
669         node->primitive_ = primitive.release();
670         m_liteGraph->all_nodes_.emplace_back(node);
671     }
672 
673     return OH_NN_SUCCESS;
674 }
675 
GetSupportedOperations(size_t deviceID,const bool ** isSupported,uint32_t & opCount)676 OH_NN_ReturnCode InnerModel::GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount)
677 {
678     if (m_liteGraph == nullptr) {
679         LOGE("GetSupportedOperations failed. GetSupportedOperations() must be called after Finish().");
680         return OH_NN_OPERATION_FORBIDDEN;
681     }
682 
683     BackendManager& backendManager = BackendManager::GetInstance();
684 
685     std::shared_ptr<Backend> backend = backendManager.GetBackend(deviceID);
686     if (backend == nullptr) {
687         LOGE("GetSupportedOperations failed, retrieve backend failed.");
688         return OH_NN_FAILED;
689     }
690 
691     std::vector<bool> supportedOperations;
692     std::shared_ptr<NNBackend> nnBackend = std::reinterpret_pointer_cast<NNBackend>(backend);
693     OH_NN_ReturnCode ret = nnBackend->GetSupportedOperation(m_liteGraph, supportedOperations);
694     if (ret != OH_NN_SUCCESS) {
695         LOGE("GetSupportedOperations failed, error happened when get supported operations from backends.");
696         return ret;
697     }
698 
699     m_supportedOperations.clear();
700     std::copy(supportedOperations.begin(), supportedOperations.end(), std::back_inserter(m_supportedOperations));
701 
702     *isSupported = reinterpret_cast<bool*>(m_supportedOperations.data());
703     opCount = m_supportedOperations.size();
704 
705     return OH_NN_SUCCESS;
706 }
707 
GetInputTensors() const708 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetInputTensors() const
709 {
710     return m_inputTensors;
711 }
712 
GetOutputTensors() const713 std::vector<std::shared_ptr<NNTensor>> InnerModel::GetOutputTensors() const
714 {
715     return m_outputTensors;
716 }
717 
GetInputTensorDescs() const718 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> InnerModel::GetInputTensorDescs() const
719 {
720     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> inputTensorDescs;
721     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> tensorDescPair;
722     for (auto inputTensor : m_inputTensors) {
723         tensorDescPair.first = OHOS::NeuralNetworkRuntime::CreateSharedPtr<TensorDesc>();
724         inputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get()));
725         tensorDescPair.second = inputTensor->GetType();
726         inputTensorDescs.emplace_back(tensorDescPair);
727     }
728 
729     return inputTensorDescs;
730 }
731 
GetOutputTensorDescs() const732 std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> InnerModel::GetOutputTensorDescs() const
733 {
734     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> outputTensorDescs;
735     std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType> tensorDescPair;
736     for (auto outputTensor : m_outputTensors) {
737         tensorDescPair.first = OHOS::NeuralNetworkRuntime::CreateSharedPtr<TensorDesc>();
738         outputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get()));
739         tensorDescPair.second = outputTensor->GetType();
740         outputTensorDescs.emplace_back(tensorDescPair);
741     }
742 
743     return outputTensorDescs;
744 }
745 
GetMetaGraph() const746 void* InnerModel::GetMetaGraph() const
747 {
748     return m_metaGraph;
749 }
750 
GetExtensionConfig() const751 ExtensionConfig InnerModel::GetExtensionConfig() const
752 {
753     return m_extensionConfig;
754 }
755 }  // namespace NeuralNetworkRuntime
756 }  // namespace OHOS
757