1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "depthwise_conv2d_native_builder.h"
17
18 #include "transform.h"
19 #include "validation.h"
20 #include "ops_validation.h"
21
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 3;
26 static const int OUTPUT_NUM = 1;
27 static const int PARAM_MAX_NUM = 5;
28 static const int PAD_MODE_SIZE = 1;
29 static const int PAD_LIST_SIZE = 4;
30 static const int IN_CHANNEL_IN_INPUT = 3;
31 static const int OUT_CHANNEL_IN_WEIGHT = 0;
32 static const int HEIGHT_IN_WEIGHT = 1;
33 static const int WIDTH_IN_WEIGHT = 2;
34 static const int INPUT_RANK = 4;
35 static const int INPUT_X = 0;
36 static const int INPUT_WEIGHT = 1;
37 static const int SCALE_LENGTH = 1;
38 static const std::string OP_NAME = "DepthwiseConv2DNative";
39
DepthwiseConv2DNativeBuilder()40 DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {}
41
~DepthwiseConv2DNativeBuilder()42 DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {}
43
SetIsPadMode(const std::shared_ptr<NNTensor> & tensor,bool & isPadMode)44 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(const std::shared_ptr<NNTensor>& tensor,
45 bool &isPadMode)
46 {
47 if (tensor->GetElementCount() == PAD_MODE_SIZE) {
48 isPadMode = true;
49 } else if (tensor->GetElementCount() != PAD_LIST_SIZE) {
50 LOGE("[DepthwiseConv2DNative] The element size of padMode should be 1 or "
51 "the element size of padList should be 4.");
52 return OH_NN_INVALID_PARAMETER;
53 }
54
55 return OH_NN_SUCCESS;
56 }
57
SetActivation(const std::shared_ptr<NNTensor> & tensor)58 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(const std::shared_ptr<NNTensor>& tensor)
59 {
60 tensor->IdentifyOpParameter();
61 // Set ActivationType
62 if (tensor->GetElementCount() != SCALE_LENGTH) {
63 LOGE("[DepthwiseConv2DNative] SetActivation failed, the Activation should be scaler.");
64 return OH_NN_INVALID_PARAMETER;
65 }
66
67 if (tensor->GetDataType() != OH_NN_INT8) {
68 LOGE("[DepthwiseConv2DNative] SetActivation failed, the activationType should have type OH_NN_INT8.");
69 return OH_NN_INVALID_PARAMETER;
70 }
71
72 void* buffer = tensor->GetBuffer();
73 if (buffer == nullptr) {
74 LOGE("[DepthwiseConv2DNative] SetActivation GetBuffer return nullptr");
75 return OH_NN_INVALID_PARAMETER;
76 }
77 int8_t* pFuseData = static_cast<int8_t*>(buffer);
78 if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast<OH_NN_FuseType>(*pFuseData))) {
79 LOGE("[DepthwiseConv2DNative] SetActivation failed, activation input is invalid.");
80 return OH_NN_INVALID_PARAMETER;
81 }
82 m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData));
83
84 return OH_NN_SUCCESS;
85 }
86
SetKernelSize(const std::vector<uint32_t> & inputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)87 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector<uint32_t>& inputsIndex,
88 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
89 {
90 // Set kernleSize and outChannel
91 auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions();
92 if (weightShape.size() != INPUT_RANK) {
93 LOGE("[DepthwiseConv2DNative] SetKernelSize failed, invalid rank of shape of weight, should be 4 dimensions.");
94 return OH_NN_INVALID_PARAMETER;
95 }
96
97 m_outChannel = weightShape[OUT_CHANNEL_IN_WEIGHT];
98 m_kernelSize.clear();
99 m_kernelSize.emplace_back(weightShape[HEIGHT_IN_WEIGHT]);
100 m_kernelSize.emplace_back(weightShape[WIDTH_IN_WEIGHT]);
101 return OH_NN_SUCCESS;
102 }
103
SetStrides(const std::shared_ptr<NNTensor> & tensor)104 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(const std::shared_ptr<NNTensor>& tensor)
105 {
106 tensor->IdentifyOpParameter();
107 if (tensor->GetDataType() != OH_NN_INT64) {
108 LOGE("[DepthwiseConv2DNative] SetStrides failed, the stride should have type OH_NN_INT64.");
109 return OH_NN_INVALID_PARAMETER;
110 }
111
112 void* buffer = tensor->GetBuffer();
113 if (buffer == nullptr) {
114 LOGE("[DepthwiseConv2DNative] SetStrides GetBuffer return nullptr");
115 return OH_NN_INVALID_PARAMETER;
116 }
117 const int64_t* pStrides = reinterpret_cast<const int64_t*>(buffer);
118 uint32_t stridesSize = tensor->GetElementCount();
119 m_strides.assign(pStrides, pStrides + stridesSize);
120
121 return OH_NN_SUCCESS;
122 }
SetDilation(const std::shared_ptr<NNTensor> & tensor)123 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(const std::shared_ptr<NNTensor>& tensor)
124 {
125 tensor->IdentifyOpParameter();
126 if (tensor->GetDataType() != OH_NN_INT64) {
127 LOGE("[DepthwiseConv2DNative] SetDilation failed, the dilation should have type OH_NN_INT64");
128 return OH_NN_INVALID_PARAMETER;
129 }
130
131 void* buffer = tensor->GetBuffer();
132 if (buffer == nullptr) {
133 LOGE("[DepthwiseConv2DNative] SetDilation GetBuffer return nullptr");
134 return OH_NN_INVALID_PARAMETER;
135 }
136 const int64_t* pDilation = reinterpret_cast<const int64_t*>(buffer);
137 uint32_t dilationSize = tensor->GetElementCount();
138 m_dilation.assign(pDilation, pDilation + dilationSize);
139
140 return OH_NN_SUCCESS;
141 }
142
SetPadModeOrPaddings(const std::shared_ptr<NNTensor> & tensor)143 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings(const std::shared_ptr<NNTensor>& tensor)
144 {
145 tensor->IdentifyOpParameter();
146
147 bool isPadMode = false;
148 OH_NN_ReturnCode ret = SetIsPadMode(tensor, isPadMode);
149 if (ret != OH_NN_SUCCESS) {
150 return ret;
151 }
152
153 void* buffer = tensor->GetBuffer();
154 if (buffer == nullptr) {
155 LOGE("[DepthwiseConv2DNative] SetPad GetBuffer return nullptr");
156 return OH_NN_INVALID_PARAMETER;
157 }
158
159 if (isPadMode) {
160 if (tensor->GetDataType() != OH_NN_INT8) {
161 LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padMode should have type OH_NN_INT8.");
162 return OH_NN_INVALID_PARAMETER;
163 }
164
165 int8_t* pPad = static_cast<int8_t*>(buffer);
166 if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) {
167 LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, invalid pad mode.");
168 return OH_NN_INVALID_PARAMETER;
169 }
170 m_padMode = NNToMS::TransformPadModeValue(*pPad);
171 } else {
172 if (tensor->GetDataType() != OH_NN_INT64) {
173 LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padList should have type OH_NN_INT64.");
174 return OH_NN_INVALID_PARAMETER;
175 }
176
177 const int64_t* pPadList = reinterpret_cast<const int64_t*>(buffer);
178 uint32_t padListSize = tensor->GetElementCount();
179 m_pad.assign(pPadList, pPadList + padListSize);
180 }
181 return OH_NN_SUCCESS;
182 }
183
SetInputAndOutput(const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)184 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetInputAndOutput(
185 const std::vector<uint32_t>& inputsIndex, const std::vector<uint32_t>& outputsIndex,
186 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
187 {
188 OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
189 if (returnCode != OH_NN_SUCCESS) {
190 LOGE("[DepthwiseConv2DNative] SetInputAndOutput failed, passed invalid input or output index.");
191 return returnCode;
192 }
193
194 m_inputsIndex = inputsIndex;
195 m_outputsIndex = outputsIndex;
196
197 return OH_NN_SUCCESS;
198 }
199
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)200 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector<uint32_t>& paramsIndex,
201 const std::vector<uint32_t>& inputsIndex, const std::vector<uint32_t>& outputsIndex,
202 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
203 {
204 if (m_isBuild) {
205 LOGE("[DepthwiseConv2DNative] Build failed, operation has been build, cannot build again.");
206 return OH_NN_OPERATION_FORBIDDEN;
207 }
208
209 OH_NN_ReturnCode ret = SetInputAndOutput(inputsIndex, outputsIndex, allTensors);
210 if (ret != OH_NN_SUCCESS) {
211 return ret;
212 }
213
214 ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM);
215 if (ret != OH_NN_SUCCESS) {
216 LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param index.");
217 return ret;
218 }
219
220 auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions();
221 if (inputShape.size() != INPUT_RANK) {
222 LOGE("[DepthwiseConv2DNative] Build failed, invalid rank of shape of input, should be 4 dimensions.");
223 return OH_NN_INVALID_PARAMETER;
224 }
225 m_inChannel = inputShape[IN_CHANNEL_IN_INPUT];
226 // Set Kernel Size
227 ret = SetKernelSize(inputsIndex, allTensors);
228 if (ret != OH_NN_SUCCESS) {
229 LOGE("[DepthwiseConv2DNative] Build failed, SetKernelSize failed.");
230 return ret;
231 }
232
233 for (int i : paramsIndex) {
234 std::shared_ptr<NNTensor> tensor = allTensors[i]; // 参数 tensor
235 if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) {
236 ret = (this->*(m_paramMap[tensor->GetType()]))(tensor);
237 } else {
238 LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type=%d", tensor->GetType());
239 return OH_NN_INVALID_PARAMETER;
240 }
241
242 if (ret != OH_NN_SUCCESS) {
243 LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param.");
244 return ret;
245 }
246 }
247
248 SetQuantType(outputsIndex, allTensors);
249
250 m_isBuild = true;
251 m_name = OP_NAME;
252 return OH_NN_SUCCESS;
253 }
254
GetPrimitive()255 LiteGraphPrimitvePtr DepthwiseConv2DNativeBuilder::GetPrimitive()
256 {
257 if (!m_isBuild) {
258 LOGE("[DepthwiseConv2DNative] GetPrimitive failed, cannot get primitive before call build.");
259 return {nullptr, DestroyLiteGraphPrimitive};
260 }
261
262 auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides,
263 m_dilation, m_padMode, m_pad, m_inChannel, m_inChannel, m_outChannel, m_activationType);
264 LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ;
265 return graphPrimitivePtr;
266 }
267
268 REGISTER_OPS(DepthwiseConv2DNativeBuilder, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE);
269 } // namespace Ops
270 } // namespace NeuralNetworkRuntime
271 } // namespace OHOS
272