1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "conv2d_transpose_builder.h"
17 
18 #include "transform.h"
19 #include "validation.h"
20 #include "ops_validation.h"
21 
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static constexpr int INPUT_NUM = 3;
26 static constexpr int OUTPUT_NUM = 1;
27 static constexpr int PARAM_MAX_NUM = 7;
28 static constexpr int INPUT_WEIGHT = 1;
29 static constexpr int WEIGHT_SIZE = 4;
30 static constexpr int OUT_CHANNEL_INDEX = 0;
31 static constexpr int IN_CHANNEL_INDEX = 3;
32 static constexpr int KERNEL_HEIGHT_INDEX = 1;
33 static constexpr int KERNEL_WEIGHT_INDEX = 2;
34 static constexpr int PAD_MODE_PARAM_NUM = 1;
35 static constexpr int PAD_LIST_PARAM_NUM = 4;
36 static constexpr int SCALAR_LENGTH = 1;
37 static const std::string OP_NAME = "Conv2DTranspose";
38 
Conv2DTransposeBuilder()39 Conv2DTransposeBuilder::Conv2DTransposeBuilder() {}
40 
~Conv2DTransposeBuilder()41 Conv2DTransposeBuilder::~Conv2DTransposeBuilder() {}
42 
SetInput(const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)43 OH_NN_ReturnCode Conv2DTransposeBuilder::SetInput(const std::vector<uint32_t>& inputsIndex,
44                                                   const std::vector<uint32_t>& outputsIndex,
45                                                   const std::vector<std::shared_ptr<NNTensor>>& allTensors)
46 {
47     OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
48     if (returnCode != OH_NN_SUCCESS) {
49         LOGE("[Conv2dTranspose] SetInput failed, Passed invalid input or output index.");
50         return returnCode;
51     }
52 
53     m_inputsIndex = inputsIndex;
54     m_outputsIndex = outputsIndex;
55 
56     // set inChannel, outChannel, kernelSize
57     auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions();
58     if (weightShape.size() != WEIGHT_SIZE) {
59         LOGE("[Conv2dTranspose] SetInput failed, the dimension of weight should be %d", WEIGHT_SIZE);
60         return OH_NN_INVALID_PARAMETER;
61     }
62 
63     m_inChannel = weightShape[IN_CHANNEL_INDEX];
64     m_outChannel = weightShape[OUT_CHANNEL_INDEX];
65 
66     return OH_NN_SUCCESS;
67 }
68 
SetKernelSize(const std::vector<uint32_t> & inputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)69 void Conv2DTransposeBuilder::SetKernelSize(const std::vector<uint32_t>& inputsIndex,
70                                            const std::vector<std::shared_ptr<NNTensor>>& allTensors)
71 {
72     auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions();
73 
74     m_kernelSize.clear();
75     m_kernelSize.emplace_back(weightShape[KERNEL_HEIGHT_INDEX]);
76     m_kernelSize.emplace_back(weightShape[KERNEL_WEIGHT_INDEX]);
77 }
78 
SetStrides(const std::shared_ptr<NNTensor> & tensor)79 OH_NN_ReturnCode Conv2DTransposeBuilder::SetStrides(const std::shared_ptr<NNTensor>& tensor)
80 {
81     tensor->IdentifyOpParameter();
82     // Set Strides
83     if (tensor->GetDataType() != OH_NN_INT64) {
84         LOGE("[Conv2DTranspose] SetStrides failed, the Strides should be type OH_NN_INT64.");
85         return OH_NN_INVALID_PARAMETER;
86     }
87 
88     void* buffer = tensor->GetBuffer();
89     if (buffer == nullptr) {
90         LOGE("[Conv2DTranspose] SetStrides GetBuffer return nullptr");
91         return OH_NN_INVALID_PARAMETER;
92     }
93     const int64_t* pStrides = reinterpret_cast<const int64_t*>(buffer);
94     uint32_t elementSize = tensor->GetElementCount();
95     m_strides.assign(pStrides, pStrides + elementSize);
96 
97     return OH_NN_SUCCESS;
98 }
99 
SetDilation(const std::shared_ptr<NNTensor> & tensor)100 OH_NN_ReturnCode Conv2DTransposeBuilder::SetDilation(const std::shared_ptr<NNTensor>& tensor)
101 {
102     tensor->IdentifyOpParameter();
103     // Set Dilation
104     if (tensor->GetDataType() != OH_NN_INT64) {
105         LOGE("[Conv2DTranspose] SetDilation failed, the Dilation should be type OH_NN_INT64");
106         return OH_NN_INVALID_PARAMETER;
107     }
108 
109     void* buffer = tensor->GetBuffer();
110     if (buffer == nullptr) {
111         LOGE("[Conv2DTranspose] SetDilation GetBuffer return nullptr");
112         return OH_NN_INVALID_PARAMETER;
113     }
114     const int64_t* pDilation = reinterpret_cast<const int64_t*>(buffer);
115     uint32_t dilationSize = tensor->GetElementCount();
116     m_dilation.assign(pDilation, pDilation + dilationSize);
117 
118     return OH_NN_SUCCESS;
119 }
120 
SetPad(const std::shared_ptr<NNTensor> & tensor)121 OH_NN_ReturnCode Conv2DTransposeBuilder::SetPad(const std::shared_ptr<NNTensor>& tensor)
122 {
123     tensor->IdentifyOpParameter();
124 
125     bool isPadMode = false;
126     if (tensor->GetElementCount() == PAD_MODE_PARAM_NUM) {
127         isPadMode = true;
128     } else if (tensor->GetElementCount() != PAD_LIST_PARAM_NUM) {
129         LOGE("[Conv2DTranspose] SetPad failed, the inputs should be 1 if using padMode or 4 if using padList.");
130         return OH_NN_INVALID_PARAMETER;
131     }
132 
133     void* buffer = tensor->GetBuffer();
134     if (buffer == nullptr) {
135         LOGE("[Conv2DTranspose] SetPadMode GetBuffer return nullptr");
136         return OH_NN_INVALID_PARAMETER;
137     }
138 
139     // Set PadMode or PadList
140     if (isPadMode) {
141         if (tensor->GetDataType() != OH_NN_INT8) {
142             LOGE("[Conv2DTranspose] SetPad failed, the PadMode should have type OH_NN_INT8.");
143             return OH_NN_INVALID_PARAMETER;
144         }
145 
146         int8_t* pPad = static_cast<int8_t*>(buffer);
147         if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) {
148             LOGE("[Conv2DTranspose] SetPad failed, invalid pad mode.");
149             return OH_NN_INVALID_PARAMETER;
150         }
151         m_padMode = NNToMS::TransformPadModeValue(*pPad);
152     } else {
153         if (tensor->GetDataType() != OH_NN_INT64) {
154             LOGE("[Conv2DTranspose] SetPad failed, the PadList should have type OH_NN_INT64.");
155             return OH_NN_INVALID_PARAMETER;
156         }
157 
158         const int64_t* pPadList = reinterpret_cast<const int64_t*>(buffer);
159         uint32_t padListPadSize = tensor->GetElementCount();
160         m_padList.assign(pPadList, pPadList + padListPadSize);
161     }
162 
163     return OH_NN_SUCCESS;
164 }
165 
SetGroup(const std::shared_ptr<NNTensor> & tensor)166 OH_NN_ReturnCode Conv2DTransposeBuilder::SetGroup(const std::shared_ptr<NNTensor>& tensor)
167 {
168     tensor->IdentifyOpParameter();
169     // Set Group
170     if (tensor->GetElementCount() != SCALAR_LENGTH) {
171         LOGE("[Conv2dTranspose] SetGroup failed, the Group shoule be a scalar");
172         return OH_NN_INVALID_PARAMETER;
173     }
174 
175     if (tensor->GetDataType() != OH_NN_INT64) {
176         LOGE("[Conv2dTranspose] SetGroup failed, the Group should have type OH_NN_INT64.");
177         return OH_NN_INVALID_PARAMETER;
178     }
179 
180     void* buffer = tensor->GetBuffer();
181     if (buffer == nullptr) {
182         LOGE("[Conv2DTranspose] SetGroup GetBuffer return nullptr");
183         return OH_NN_INVALID_PARAMETER;
184     }
185     m_group = *reinterpret_cast<const int64_t*>(buffer);
186 
187     return OH_NN_SUCCESS;
188 }
189 
SetOutPadding(const std::shared_ptr<NNTensor> & tensor)190 OH_NN_ReturnCode Conv2DTransposeBuilder::SetOutPadding(const std::shared_ptr<NNTensor>& tensor)
191 {
192     tensor->IdentifyOpParameter();
193     // Set outputPadding
194     if (tensor->GetDataType() != OH_NN_INT64) {
195         LOGE("[Conv2DTranspose] SetOutPadding failed, the outputPadding should be type OH_NN_INT64.");
196         return OH_NN_INVALID_PARAMETER;
197     }
198 
199     void* buffer = tensor->GetBuffer();
200     if (buffer == nullptr) {
201         LOGE("[Conv2DTranspose] SetOutPadding GetBuffer return nullptr");
202         return OH_NN_INVALID_PARAMETER;
203     }
204     const int64_t* pOutputPadding = reinterpret_cast<const int64_t*>(buffer);
205     uint32_t outputPadSize = tensor->GetElementCount();
206     m_outputPaddings.assign(pOutputPadding, pOutputPadding + outputPadSize);
207 
208     return OH_NN_SUCCESS;
209 }
210 
SetActivation(const std::shared_ptr<NNTensor> & tensor)211 OH_NN_ReturnCode Conv2DTransposeBuilder::SetActivation(const std::shared_ptr<NNTensor>& tensor)
212 {
213     tensor->IdentifyOpParameter();
214 
215     if (tensor->GetElementCount() != SCALAR_LENGTH) {
216         LOGE("[Conv2DTranspose] SetActivation failed, the ActivationType shoule be a scalar");
217         return OH_NN_INVALID_PARAMETER;
218     }
219 
220     if (tensor->GetDataType() != OH_NN_INT8) {
221         LOGE("[Conv2DTranspose] SetActivation failed, the ActivationType should have type OH_NN_INT8.");
222         return OH_NN_INVALID_PARAMETER;
223     }
224 
225     void* buffer = tensor->GetBuffer();
226     if (buffer == nullptr) {
227         LOGE("[Conv2DTranspose] SetOutPadding GetBuffer return nullptr");
228         return OH_NN_INVALID_PARAMETER;
229     }
230     int8_t* pFuseData = static_cast<int8_t*>(buffer);
231     if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast<OH_NN_FuseType>(*pFuseData))) {
232         LOGE("[Conv2DTranspose] SetActivation failed, activation input is invalid.");
233         return OH_NN_INVALID_PARAMETER;
234     }
235     m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData));
236 
237     return OH_NN_SUCCESS;
238 }
239 
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)240 OH_NN_ReturnCode Conv2DTransposeBuilder::Build(const std::vector<uint32_t>& paramsIndex,
241                                                const std::vector<uint32_t>& inputsIndex,
242                                                const std::vector<uint32_t>& outputsIndex,
243                                                const std::vector<std::shared_ptr<NNTensor>>& allTensors)
244 {
245     if (m_isBuild) {
246         LOGE("[Conv2DTranspose] Build failed, conv2DTranspose operation has been build, cannot build again.");
247         return OH_NN_OPERATION_FORBIDDEN;
248     }
249 
250     OH_NN_ReturnCode returnCode = SetInput(inputsIndex, outputsIndex, allTensors);
251     if (returnCode != OH_NN_SUCCESS) {
252         return returnCode;
253     }
254 
255     returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM);
256     if (returnCode != OH_NN_SUCCESS) {
257         LOGE("[Conv2DTranspose] Build failed, passed invalid param index.");
258         return returnCode;
259     }
260 
261     SetKernelSize(inputsIndex, allTensors);
262 
263     for (int i : paramsIndex) {
264         std::shared_ptr<NNTensor> tensor =  allTensors[i]; // 参数 tensor
265         if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) {
266             returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor);
267         } else {
268             LOGE("[Conv2DTranspose] Build failed, param invalid, type=%d", tensor->GetType());
269             return OH_NN_INVALID_PARAMETER;
270         }
271 
272         if (returnCode != OH_NN_SUCCESS) {
273             LOGE("[Conv2DTranspose] Build failed, passed invalid param.");
274             return returnCode;
275         }
276     }
277 
278     // The quantization type of the first output determinies that of the operator.
279     SetQuantType(outputsIndex, allTensors);
280 
281     m_isBuild = true;
282     m_name = OP_NAME;
283     return OH_NN_SUCCESS;
284 }
285 
GetPrimitive()286 LiteGraphPrimitvePtr Conv2DTransposeBuilder::GetPrimitive()
287 {
288     if (!m_isBuild) {
289         LOGE("[Conv2DTranspose] GetPrimitive failed, cannot get primitive before call build.");
290         return {nullptr, DestroyLiteGraphPrimitive};
291     }
292 
293     void* primitive = MindIR_Conv2dTransposeFusion_CreatePrimitive(m_kernelSize,
294         m_strides, m_dilation, m_padMode, m_padList, m_group, m_inChannel, m_outChannel,
295         m_activationType, m_outputPaddings);
296     LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive);
297     return graphPrimitivePtr;
298 }
299 
300 REGISTER_OPS(Conv2DTransposeBuilder, OH_NN_OPS_CONV2D_TRANSPOSE);
301 } // namespace Ops
302 } // namespace NeuralNetworkRuntime
303 } // namespace OHOS
304