1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ops_test.h"
17 
18 using namespace OHOS::NeuralNetworkRuntime::Ops;
19 using namespace std;
20 namespace OHOS {
21 namespace NeuralNetworkRuntime {
22 namespace UnitTest {
SaveInputTensor(const std::vector<uint32_t> & inputsIndex,OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam)23 void OpsTest::SaveInputTensor(const std::vector<uint32_t>& inputsIndex, OH_NN_DataType dataType,
24     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam)
25 {
26     m_inputsIndex = inputsIndex;
27     for (size_t i = 0; i < inputsIndex.size(); ++i) {
28         std::shared_ptr<NNTensor> inputTensor;
29         inputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR);
30         m_allTensors.emplace_back(inputTensor);
31     }
32 }
33 
SaveOutputTensor(const std::vector<uint32_t> & outputsIndex,OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam)34 void OpsTest::SaveOutputTensor(const std::vector<uint32_t>& outputsIndex, OH_NN_DataType dataType,
35     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam)
36 {
37     m_outputsIndex = outputsIndex;
38     for (size_t i = 0; i < outputsIndex.size(); ++i) {
39         std::shared_ptr<NNTensor> outputTensor;
40         outputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR);
41         m_allTensors.emplace_back(outputTensor);
42     }
43 }
44 
SetKernelSize(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)45 void OpsTest::SetKernelSize(OH_NN_DataType dataType,
46     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
47 {
48     int32_t kernelsNum{2};
49     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
50     int64_t* kernelSizeValue = new (std::nothrow) int64_t[kernelsNum]{1, 1};
51     EXPECT_NE(nullptr, kernelSizeValue);
52     tensor->SetBuffer(kernelSizeValue, sizeof(int64_t) * kernelsNum);
53     m_allTensors.emplace_back(tensor);
54 }
55 
SetStride(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)56 void OpsTest::SetStride(OH_NN_DataType dataType,
57     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
58 {
59     int32_t strideNum{2};
60     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
61     int64_t* strideValue = new (std::nothrow) int64_t[strideNum]{1, 1};
62     EXPECT_NE(nullptr, strideValue);
63     tensor->SetBuffer(strideValue, sizeof(int64_t) * strideNum);
64     m_allTensors.emplace_back(tensor);
65 }
66 
SetActivation(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)67 void OpsTest::SetActivation(OH_NN_DataType dataType,
68     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
69 {
70     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
71     int8_t* activationValue = new (std::nothrow) int8_t(0);
72     EXPECT_NE(nullptr, activationValue);
73     tensor->SetBuffer(activationValue, sizeof(int8_t));
74     m_allTensors.emplace_back(tensor);
75 }
76 
SetDilation(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)77 void OpsTest::SetDilation(OH_NN_DataType dataType,
78     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
79 {
80     int32_t dilationNum = 2;
81     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
82     int64_t* dilationValue = new (std::nothrow) int64_t[2]{1, 1};
83     EXPECT_NE(nullptr, dilationValue);
84     tensor->SetBuffer(dilationValue, dilationNum * sizeof(int64_t));
85     m_allTensors.emplace_back(tensor);
86 }
87 
SetGroup(OH_NN_DataType dataType,const std::vector<int32_t> & dim,const OH_NN_QuantParam * quantParam,OH_NN_TensorType type)88 void OpsTest::SetGroup(OH_NN_DataType dataType,
89     const std::vector<int32_t> &dim,  const OH_NN_QuantParam* quantParam, OH_NN_TensorType type)
90 {
91     std::shared_ptr<NNTensor> tensor = TransToNNTensor(dataType, dim, quantParam, type);
92     int64_t* groupValue = new (std::nothrow) int64_t(0);
93     EXPECT_NE(nullptr, groupValue);
94     tensor->SetBuffer(groupValue, sizeof(int64_t));
95     m_allTensors.emplace_back(tensor);
96 }
97 
98 } // namespace UnitTest
99 } // namespace NeuralNetworkRuntime
100 } // namespace OHOS
101