1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef NEURAL_NETWORK_RUNTIME_INNER_MODEL_H
17 #define NEURAL_NETWORK_RUNTIME_INNER_MODEL_H
18 
19 #include <memory>
20 #include <unordered_map>
21 
22 #include "mindir.h"
23 #include "ops_builder.h"
24 #include "tensor_desc.h"
25 #include "interfaces/innerkits/c/neural_network_runtime_inner.h"
26 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime.h"
27 
28 namespace OHOS {
29 namespace NeuralNetworkRuntime {
30 class InnerModel {
31 public:
32     InnerModel();
33 
34     bool IsBuild() const;
35     OH_NN_ReturnCode BuildFromLiteGraph(const mindspore::lite::LiteGraph* liteGraph,
36                                         const ExtensionConfig& extensionConfig);
37     OH_NN_ReturnCode BuildFromMetaGraph(const void* metaGraph, const ExtensionConfig& extensionConfig);
38     OH_NN_ReturnCode AddTensor(const OH_NN_Tensor& nnTensor);
39     OH_NN_ReturnCode AddTensorDesc(const NN_TensorDesc* nnTensorDesc);
40     OH_NN_ReturnCode SetTensorQuantParam(uint32_t index, const NN_QuantParam* quantParam);
41     OH_NN_ReturnCode SetTensorType(uint32_t index, OH_NN_TensorType tensorType);
42     OH_NN_ReturnCode SetTensorValue(uint32_t index, const void* buffer, size_t length);
43     OH_NN_ReturnCode AddOperation(OH_NN_OperationType opType,
44                                   const OH_NN_UInt32Array& paramIndices,
45                                   const OH_NN_UInt32Array& inputIndices,
46                                   const OH_NN_UInt32Array& outputIndices);
47     OH_NN_ReturnCode GetSupportedOperations(size_t deviceID, const bool** isSupported, uint32_t& opCount);
48     OH_NN_ReturnCode SpecifyInputsAndOutputs(
49         const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices);
50     OH_NN_ReturnCode SetInputsAndOutputsInfo(const OH_NN_TensorInfo* inputsInfo, size_t inputSize,
51         const OH_NN_TensorInfo* outputsInfo, size_t outputSize);
52     OH_NN_ReturnCode Build();
53     std::vector<std::shared_ptr<NNTensor>> GetInputTensors() const;
54     std::vector<std::shared_ptr<NNTensor>> GetOutputTensors() const;
55     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> GetInputTensorDescs() const;
56     std::vector<std::pair<std::shared_ptr<TensorDesc>, OH_NN_TensorType>> GetOutputTensorDescs() const;
GetLiteGraphs()57     std::shared_ptr<mindspore::lite::LiteGraph> GetLiteGraphs() const
58     {
59         return m_liteGraph;
60     }
61     void* GetMetaGraph() const;
62     ExtensionConfig GetExtensionConfig() const;
63 
64 private:
65     void AddTensorsToLiteGraph(std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID);
66     OH_NN_ReturnCode AddNodesToLiteGraph(const std::unordered_map<uint32_t, uint32_t>& modelIDToGraphID);
67     OH_NN_ReturnCode ValidateInputAndOutput(
68         const OH_NN_UInt32Array& inputIndices, const OH_NN_UInt32Array& outputIndices) const;
69     OH_NN_ReturnCode ValidateTensorArray(const OH_NN_UInt32Array& indices) const;
70     OH_NN_ReturnCode CheckParameters() const;
71 
72 private:
73     std::vector<char> m_supportedOperations; // std::vector<bool> not support data(), use std::vector<char> instead.
74     std::vector<uint32_t> m_inputIndices;
75     std::vector<uint32_t> m_outputIndices;
76     std::vector<std::unique_ptr<Ops::OpsBuilder>> m_ops;
77     std::vector<std::shared_ptr<NNTensor>> m_allTensors;
78     std::vector<std::shared_ptr<NNTensor>> m_inputTensors; // Used to pass input tensors to compilation.
79     std::vector<std::shared_ptr<NNTensor>> m_outputTensors; // Used to pass output tensors to compilation.
80     std::shared_ptr<mindspore::lite::LiteGraph> m_liteGraph {nullptr};
81     void* m_metaGraph {nullptr};
82     ExtensionConfig m_extensionConfig;
83 };
84 }  // namespace NeuralNetworkRuntime
85 }  // namespace OHOS
86 #endif // NEURAL_NETWORK_RUNTIME_INNER_MODEL_H
87