1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "l2_normalize_builder.h"
17
18 #include "transform.h"
19 #include "validation.h"
20 #include "ops_registry.h"
21
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 1;
26 static const int OUTPUT_NUM = 1;
27 static const int PARAM_MAX_NUM = 3;
28 static const int SCALAR_LENGTH = 1;
29 static const std::string OP_NAME = "L2Normalize";
30
L2NormalizeBuilder()31 L2NormalizeBuilder::L2NormalizeBuilder() {}
32
~L2NormalizeBuilder()33 L2NormalizeBuilder::~L2NormalizeBuilder() {}
34
SetAxis(const std::shared_ptr<NNTensor> & tensor)35 OH_NN_ReturnCode L2NormalizeBuilder::SetAxis(const std::shared_ptr<NNTensor>& tensor)
36 {
37 if (tensor->GetDataType() != OH_NN_INT64) {
38 LOGE("[L2Normalize] The axis should be type OH_NN_INT64.");
39 return OH_NN_INVALID_PARAMETER;
40 }
41
42 m_axis.clear();
43
44 void* buffer = tensor->GetBuffer();
45 if (buffer == nullptr) {
46 LOGE("[L2Normalize] Tensor buffer is nullptr.");
47 return OH_NN_INVALID_PARAMETER;
48 }
49
50 int64_t* pAxis = static_cast<int64_t*>(buffer);
51
52 uint32_t elementCount = tensor->GetElementCount();
53 for (uint32_t i = 0; i < elementCount; ++i) {
54 m_axis.emplace_back(*pAxis);
55 ++pAxis;
56 }
57
58 return OH_NN_SUCCESS;
59 }
60
SetEpsilon(const std::shared_ptr<NNTensor> & tensor)61 OH_NN_ReturnCode L2NormalizeBuilder::SetEpsilon(const std::shared_ptr<NNTensor>& tensor)
62 {
63 if (tensor->GetDataType() != OH_NN_FLOAT32) {
64 LOGE("[L2Normalize] The epsilon should be type OH_NN_FLOAT32.");
65 return OH_NN_INVALID_PARAMETER;
66 }
67
68 if (tensor->GetElementCount() != SCALAR_LENGTH) {
69 LOGE("[L2Normalize] The epsilon should be scalar.");
70 return OH_NN_INVALID_PARAMETER;
71 }
72
73 void* buffer = tensor->GetBuffer();
74 if (buffer == nullptr) {
75 LOGE("[L2Normalize] Tensor buffer is nullptr.");
76 return OH_NN_INVALID_PARAMETER;
77 }
78 m_epsilon = *(static_cast<const float*>(buffer));
79
80 return OH_NN_SUCCESS;
81 }
82
SetActivationType(const std::shared_ptr<NNTensor> & tensor)83 OH_NN_ReturnCode L2NormalizeBuilder::SetActivationType(const std::shared_ptr<NNTensor>& tensor)
84 {
85 if (tensor->GetDataType() != OH_NN_INT8) {
86 LOGE("[L2Normalize] SetActivationType failed, the activationType should have type OH_NN_INT8.");
87 return OH_NN_INVALID_PARAMETER;
88 }
89
90 if (tensor->GetElementCount() != SCALAR_LENGTH) {
91 LOGE("[L2Normalize] SetActivationType failed, the activationType shoule be a scalar");
92 return OH_NN_INVALID_PARAMETER;
93 }
94
95 void* buffer = tensor->GetBuffer();
96 if (buffer == nullptr) {
97 LOGE("[L2Normalize] SetActivationType GetBuffer return nullptr");
98 return OH_NN_INVALID_PARAMETER;
99 }
100
101 int8_t* pActivationType = static_cast<int8_t*>(buffer);
102 if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast<OH_NN_FuseType>(*pActivationType))) {
103 LOGE("[L2Normalize] SetActivationType failed, activationType input is invalid.");
104 return OH_NN_INVALID_PARAMETER;
105 }
106 m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pActivationType));
107
108 return OH_NN_SUCCESS;
109 }
110
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)111 OH_NN_ReturnCode L2NormalizeBuilder::Build(const std::vector<uint32_t>& paramsIndex,
112 const std::vector<uint32_t>& inputsIndex,
113 const std::vector<uint32_t>& outputsIndex,
114 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
115 {
116 if (m_isBuild) {
117 LOGE("[L2Normalize] Build failed, the depthToSpace operation has been build. cannot build again.");
118 return OH_NN_OPERATION_FORBIDDEN;
119 }
120
121 auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
122 if (ret != OH_NN_SUCCESS) {
123 LOGE("[L2Normalize] Build failed, passed invalid input or output index.");
124 return ret;
125 }
126
127 m_inputsIndex = inputsIndex;
128 m_outputsIndex = outputsIndex;
129
130 ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM);
131 if (ret != OH_NN_SUCCESS) {
132 LOGE("[L2Normalize] Build failed, passed invalid param index.");
133 return ret;
134 }
135
136 for (int i : paramsIndex) {
137 std::shared_ptr<NNTensor> tensor = allTensors[i];
138 tensor->IdentifyOpParameter();
139 if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) {
140 ret = (this->*(m_paramMap[tensor->GetType()]))(tensor);
141 } else {
142 LOGE("[L2Normalize] Build failed, param invalid, type=%d", tensor->GetType());
143 return OH_NN_INVALID_PARAMETER;
144 }
145
146 if (ret != OH_NN_SUCCESS) {
147 LOGE("[L2Normalize] Build failed, passed invalid param.");
148 return ret;
149 }
150 }
151
152 m_name = OP_NAME;
153 m_isBuild = true;
154 return OH_NN_SUCCESS;
155 }
156
GetPrimitive()157 LiteGraphPrimitvePtr L2NormalizeBuilder::GetPrimitive()
158 {
159 if (!m_isBuild) {
160 LOGE("[L2Normalize] GetPrimitive failed, cannot get primitive before call build.");
161 return {nullptr, DestroyLiteGraphPrimitive};
162 }
163
164 void* primitive = mindspore::lite::MindIR_L2NormalizeFusion_CreatePrimitive(m_axis, m_epsilon, m_activationType);
165 LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ;
166 return graphPrimitivePtr;
167 }
168
169 REGISTER_OPS(L2NormalizeBuilder, OH_NN_OPS_L2_NORMALIZE);
170 } // namespace Ops
171 } // namespace NeuralNetworkRuntime
172 } // namespace OHOS