1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <unistd.h>
17
18 #include <hdf_base.h>
19 #include <refbase.h>
20 #include <gtest/gtest.h>
21
22 #include "common/log.h"
23 #include "hdi_device_v2_0.h"
24 #include "test/unittest/common/v2_0/mock_idevice.h"
25
26 using namespace testing;
27 using namespace testing::ext;
28 using namespace OHOS::NeuralNetworkRuntime;
29 namespace OHOS {
30 namespace NeuralNetworkRuntime {
31 namespace UnitTest {
32 constexpr uint32_t INNRT_DEVICE_MAJOR_VERSION = 2;
33 constexpr uint32_t INNRT_DEVICE_MINOR_VERSION = 0;
34
35 class IRegisterDevice : public HDI::HdiBase {
36 public:
37 DECLARE_HDI_DESCRIPTOR(u"ohos.hdi.nnrt.v2_0.IRegisterDevice");
38
39 virtual ~IRegisterDevice() = default;
40
41 static sptr<IRegisterDevice> Get(bool isStub = false);
42 static sptr<IRegisterDevice> Get(const std::string& serviceName, bool isStub = false);
43
44 virtual int32_t GetDeviceName(std::string& name) = 0;
45
46 virtual int32_t GetVendorName(std::string& name) = 0;
47
48 virtual int32_t GetDeviceType(V2_0::DeviceType& deviceType) = 0;
49
50 virtual int32_t GetDeviceStatus(V2_0::DeviceStatus& status) = 0;
51
52 virtual int32_t GetSupportedOperation(const V2_0::Model& model, std::vector<bool>& ops) = 0;
53
54 virtual int32_t IsFloat16PrecisionSupported(bool& isSupported) = 0;
55
56 virtual int32_t IsPerformanceModeSupported(bool& isSupported) = 0;
57
58 virtual int32_t IsPrioritySupported(bool& isSupported) = 0;
59
60 virtual int32_t IsDynamicInputSupported(bool& isSupported) = 0;
61
62 virtual int32_t PrepareModel(const V2_0::Model& model, const V2_0::ModelConfig& config,
63 sptr<V2_0::IPreparedModel>& preparedModel) = 0;
64
65 virtual int32_t IsModelCacheSupported(bool& isSupported) = 0;
66
67 virtual int32_t PrepareModelFromModelCache(const std::vector<V2_0::SharedBuffer>& modelCache,
68 const V2_0::ModelConfig& config, sptr<V2_0::IPreparedModel>& preparedModel) = 0;
69
70 virtual int32_t AllocateBuffer(uint32_t length, V2_0::SharedBuffer& buffer) = 0;
71
72 virtual int32_t ReleaseBuffer(const V2_0::SharedBuffer& buffer) = 0;
73
GetVersion(uint32_t & majorVer,uint32_t & minorVer)74 virtual int32_t GetVersion(uint32_t& majorVer, uint32_t& minorVer)
75 {
76 majorVer = INNRT_DEVICE_MAJOR_VERSION;
77 minorVer = INNRT_DEVICE_MINOR_VERSION;
78 return HDF_SUCCESS;
79 }
80 };
81
82 class SimulationDevice : public Device {
83 public:
SimulationDevice(OHOS::sptr<IRegisterDevice> device)84 explicit SimulationDevice(OHOS::sptr<IRegisterDevice> device) {};
85
GetDeviceName(std::string & name)86 OH_NN_ReturnCode GetDeviceName(std::string& name) override
87 {
88 name = "MockIDeviceA";
89 return OH_NN_SUCCESS;
90 };
GetVendorName(std::string & name)91 OH_NN_ReturnCode GetVendorName(std::string& name) override
92 {
93 name = "MockVendorA";
94 return OH_NN_SUCCESS;
95 };
GetVersion(std::string & version)96 OH_NN_ReturnCode GetVersion(std::string& version) override
97 {
98 version = "MockVersionA";
99 return OH_NN_SUCCESS;
100 };
GetDeviceType(OH_NN_DeviceType & deviceType)101 OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override
102 {
103 return OH_NN_SUCCESS;
104 };
GetDeviceStatus(DeviceStatus & status)105 OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override
106 {
107 status = DeviceStatus::AVAILABLE;
108 return OH_NN_SUCCESS;
109 };
GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,std::vector<bool> & ops)110 OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,
111 std::vector<bool>& ops) override
112 {
113 return OH_NN_SUCCESS;
114 };
115
IsFloat16PrecisionSupported(bool & isSupported)116 OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override
117 {
118 return OH_NN_SUCCESS;
119 };
IsPerformanceModeSupported(bool & isSupported)120 OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override
121 {
122 return OH_NN_SUCCESS;
123 };
IsPrioritySupported(bool & isSupported)124 OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override
125 {
126 return OH_NN_SUCCESS;
127 };
IsDynamicInputSupported(bool & isSupported)128 OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override
129 {
130 return OH_NN_SUCCESS;
131 };
IsModelCacheSupported(bool & isSupported)132 OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override
133 {
134 return OH_NN_SUCCESS;
135 };
136
PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const Buffer & quantBuffer,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)137 OH_NN_ReturnCode PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
138 const Buffer& quantBuffer, const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel) override
139 {
140 return OH_NN_SUCCESS;
141 };
PrepareModelFromModelCache(const std::vector<Buffer> & modelCache,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)142 OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector<Buffer>& modelCache,
143 const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel) override
144 {
145 return OH_NN_SUCCESS;
146 };
PrepareOfflineModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,const ModelConfig & config,std::shared_ptr<PreparedModel> & preparedModel)147 OH_NN_ReturnCode PrepareOfflineModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
148 const ModelConfig& config, std::shared_ptr<PreparedModel>& preparedModel) override
149 {
150 return OH_NN_SUCCESS;
151 };
152
AllocateBuffer(size_t length)153 void *AllocateBuffer(size_t length) override
154 {
155 return nullptr;
156 };
ReleaseBuffer(const void * buffer)157 OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override
158 {
159 return OH_NN_SUCCESS;
160 };
161 };
162
163 class MockIDeviceImp : public IRegisterDevice {
164 public:
165 MOCK_METHOD1(GetDeviceName, int32_t(std::string&));
166 MOCK_METHOD1(GetVendorName, int32_t(std::string&));
167 MOCK_METHOD1(GetDeviceType, int32_t(V2_0::DeviceType&));
168 MOCK_METHOD1(GetDeviceStatus, int32_t(V2_0::DeviceStatus&));
169 MOCK_METHOD2(GetSupportedOperation, int32_t(const V2_0::Model&, std::vector<bool>&));
170 MOCK_METHOD1(IsFloat16PrecisionSupported, int32_t(bool&));
171 MOCK_METHOD1(IsPerformanceModeSupported, int32_t(bool&));
172 MOCK_METHOD1(IsPrioritySupported, int32_t(bool&));
173 MOCK_METHOD1(IsDynamicInputSupported, int32_t(bool&));
174 MOCK_METHOD3(PrepareModel,
175 int32_t(const V2_0::Model&, const V2_0::ModelConfig&, OHOS::sptr<V2_0::IPreparedModel>&));
176 MOCK_METHOD1(IsModelCacheSupported, int32_t(bool&));
177 MOCK_METHOD3(PrepareModelFromModelCache, int32_t(const std::vector<V2_0::SharedBuffer>&, const V2_0::ModelConfig&,
178 OHOS::sptr<V2_0::IPreparedModel>&));
179 MOCK_METHOD2(AllocateBuffer, int32_t(uint32_t, V2_0::SharedBuffer&));
180 MOCK_METHOD1(ReleaseBuffer, int32_t(const V2_0::SharedBuffer&));
181 MOCK_METHOD2(GetVersion, int32_t(uint32_t&, uint32_t&));
182 };
183
Get(bool isStub)184 sptr<IRegisterDevice> IRegisterDevice::Get(bool isStub)
185 {
186 return IRegisterDevice::Get("device_service", isStub);
187 }
188
Get(const std::string & serviceName,bool isStub)189 sptr<IRegisterDevice> IRegisterDevice::Get(const std::string& serviceName, bool isStub)
190 {
191 if (isStub) {
192 return nullptr;
193 }
194
195 auto mockIDevice = OHOS::NeuralNetworkRuntime::CreateSharedPtr<MockIDeviceImp>();
196 if (!mockIDevice) {
197 LOGE("Failed to new MockIDeviceImp object.");
198 return nullptr;
199 }
200
201 std::string deviceName = "MockIDeviceA";
202 EXPECT_CALL(*mockIDevice, GetDeviceName(::testing::_))
203 .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceName), ::testing::Return(HDF_SUCCESS)));
204
205 std::string vendorName = "MockVendorA";
206 EXPECT_CALL(*mockIDevice, GetVendorName(::testing::_))
207 .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(vendorName), ::testing::Return(HDF_SUCCESS)));
208
209 V2_0::DeviceStatus deviceStatus = V2_0::DeviceStatus::AVAILABLE;
210 EXPECT_CALL(*mockIDevice, GetDeviceStatus(::testing::_))
211 .WillRepeatedly(::testing::DoAll(::testing::SetArgReferee<0>(deviceStatus), ::testing::Return(HDF_SUCCESS)));
212 return mockIDevice;
213 }
214
215 class DeviceRegistrarTest : public testing::Test {
216 public:
217 DeviceRegistrarTest() = default;
218 ~DeviceRegistrarTest() = default;
219 };
220
CreateNullObjectCallback()221 std::shared_ptr<Device> CreateNullObjectCallback()
222 {
223 return nullptr;
224 }
225 } // namespace UnitTest
226 } // namespace NeuralNetworkRuntime
227 } // namespace OHOS
228