/ohos5.0/docs/zh-cn/device-dev/reference/hdi-apis/nnrt/ |
H A D | interface_i_prepared_model_v10.md | 20 …(#exportmodelcache) ([out] struct [SharedBuffer](_shared_buffer_v10.md)[] modelCache) | 导出模型缓存。 | 30 IPreparedModel::ExportModelCache ([out] struct SharedBuffer[] modelCache) 42 | modelCache | 模型缓存数组,元素顺序与导出时元素顺序一致,SharedBuffer定义请查看[SharedBuffer](_shared_buffer_v10.md)。 |
|
H A D | interface_i_nnrt_device_v20.md | 33 …elfrommodelcache) ([in] struct [SharedBuffer](_shared_buffer_v20.md)[] modelCache, [in] struct [Mo… 34 …pareofflinemodel) ([in] struct [SharedBuffer](_shared_buffer_v20.md)[] modelCache, [in] struct [Mo… 343 INnrtDevice::PrepareModelFromModelCache ([in] struct SharedBuffer[] modelCache, [in] struct ModelCo… 355 | modelCache | 模型缓存文件的数组,数组顺序与导出时的数组顺序一致,数组元素类型请查看SharedBuffer定义[SharedBuffer](_shared_buffer_v20.m… 369 INnrtDevice::PrepareOfflineModel ([in] struct SharedBuffer[] modelCache, [in] struct ModelConfig co… 381 | modelCache | 离线模型文件缓存的数组,元素顺序与用户传入的离线模型格式有关,元素类型请查看SharedBuffer定义[SharedBuffer](_shared_buffer_v2…
|
H A D | interface_i_prepared_model_v20.md | 22 …](#exportmodelcache) ([out] struct [SharedBuffer](_shared_buffer_v20.md)[] modelCache) | 导出模型缓存。 | 33 IPreparedModel::ExportModelCache ([out] struct SharedBuffer[] modelCache) 47 | modelCache | 模型缓存数组,元素顺序与导出时元素顺序一致,SharedBuffer定义请查看[SharedBuffer](_shared_buffer_v20.md)。 |
|
H A D | interface_i_nnrt_device_v10.md | 33 …elfrommodelcache) ([in] struct [SharedBuffer](_shared_buffer_v10.md)[] modelCache, [in] struct [Mo… 342 INnrtDevice::PrepareModelFromModelCache ([in] struct SharedBuffer[] modelCache, [in] struct ModelCo… 354 | modelCache | 模型缓存文件的数组,数组顺序与导出时的数组顺序一致,数组元素类型请查看SharedBuffer定义[SharedBuffer](_shared_buffer_v10.m…
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/common/v1_0/ |
H A D | compilation_mock_idevice.cpp | 165 OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector<Buffer>& modelCache) in ExportModelCache() argument 167 if (!modelCache.empty()) { in ExportModelCache() 182 modelCache.emplace_back(buffer); in ExportModelCache() 188 modelCache.emplace_back(buffer2); in ExportModelCache() 225 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector<Buffer>& modelCache, in PrepareModelFromModelCache() argument 233 if (modelCache.size() == 0 || config.enableFloat16 == false) { in PrepareModelFromModelCache()
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/common/v2_0/ |
H A D | compilation_mock_idevice.cpp | 165 OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector<Buffer>& modelCache) in ExportModelCache() argument 167 if (!modelCache.empty()) { in ExportModelCache() 182 modelCache.emplace_back(buffer); in ExportModelCache() 188 modelCache.emplace_back(buffer2); in ExportModelCache() 225 OH_NN_ReturnCode HDIDeviceV2_0::PrepareModelFromModelCache(const std::vector<Buffer>& modelCache, in PrepareModelFromModelCache() argument 233 if (modelCache.size() == 0 || config.enableFloat16 == false) { in PrepareModelFromModelCache()
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/components/v1_0/hdi_prepared_model/ |
H A D | hdi_prepared_model_test.cpp | 139 std::vector<Buffer> modelCache; variable 149 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 166 std::vector<Buffer> modelCache; variable 176 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 192 std::vector<Buffer> modelCache; variable 193 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 210 std::vector<Buffer> modelCache; variable 220 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 239 std::vector<Buffer> modelCache; variable 241 modelCache.emplace_back(buffer); [all …]
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/components/v2_0/hdi_prepared_model/ |
H A D | hdi_prepared_model_test.cpp | 137 std::vector<Buffer> modelCache; variable 147 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 164 std::vector<Buffer> modelCache; variable 174 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 190 std::vector<Buffer> modelCache; variable 191 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 208 std::vector<Buffer> modelCache; variable 218 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 237 std::vector<Buffer> modelCache; variable 239 modelCache.emplace_back(buffer); [all …]
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/components/v2_1/hdi_prepared_model/ |
H A D | hdi_prepared_model_test.cpp | 137 std::vector<Buffer> modelCache; variable 147 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 164 std::vector<Buffer> modelCache; variable 174 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 190 std::vector<Buffer> modelCache; variable 191 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 208 std::vector<Buffer> modelCache; variable 218 OH_NN_ReturnCode result = preparedModel->ExportModelCache(modelCache); 237 std::vector<Buffer> modelCache; variable 239 modelCache.emplace_back(buffer); [all …]
|
/ohos5.0/foundation/ability/idl_tool/test/hdi_unittest/nnrt_v1_0/cpp_target/nnrt/v1_0/ |
H A D | prepared_model_proxy.cpp.txt | 56 …:PreparedModelProxy::ExportModelCache(std::vector<OHOS::HDI::Nnrt::V1_0::SharedBuffer>& modelCache) 58 return OHOS::HDI::Nnrt::V1_0::PreparedModelProxy::ExportModelCache_(modelCache, Remote()); 72 …y::ExportModelCache_(std::vector<OHOS::HDI::Nnrt::V1_0::SharedBuffer>& modelCache, const sptr<IRem… 106 modelCache.clear(); 107 modelCache.reserve(modelCacheSize); 114 modelCache.push_back(value0);
|
H A D | prepared_model_stub.cpp.txt | 101 std::vector<OHOS::HDI::Nnrt::V1_0::SharedBuffer> modelCache; 109 modelCache.reserve(capacity); 117 int32_t preparedModelRet = impl->ExportModelCache(modelCache); 123 if (!preparedModelReply.WriteUint32(modelCache.size())) { 124 HDF_LOGE("%{public}s: write modelCache size failed!", __func__); 127 for (const auto& it0 : modelCache) {
|
H A D | prepared_model_proxy.h.txt | 38 int32_t ExportModelCache(std::vector<OHOS::HDI::Nnrt::V1_0::SharedBuffer>& modelCache) override; 45 static int32_t ExportModelCache_(std::vector<OHOS::HDI::Nnrt::V1_0::SharedBuffer>& modelCache,
|
/ohos5.0/foundation/ai/neural_network_runtime/frameworks/native/neural_network_runtime/ |
H A D | hdi_prepared_model_v1_0.cpp | 179 OH_NN_ReturnCode HDIPreparedModelV1_0::ExportModelCache(std::vector<Buffer>& modelCache) in ExportModelCache() argument 181 if (!modelCache.empty()) { in ExportModelCache() 182 LOGE("The vector of modelCache should be empty. size=%zu", modelCache.size()); in ExportModelCache() 203 modelCache.emplace_back(modelbuffer); in ExportModelCache()
|
H A D | hdi_prepared_model_v2_0.cpp | 180 OH_NN_ReturnCode HDIPreparedModelV2_0::ExportModelCache(std::vector<Buffer>& modelCache) in ExportModelCache() argument 182 if (!modelCache.empty()) { in ExportModelCache() 183 LOGE("The vector of modelCache should be empty. size=%{public}zu", modelCache.size()); in ExportModelCache() 203 modelCache.emplace_back(modelbuffer); in ExportModelCache()
|
H A D | hdi_prepared_model_v2_1.cpp | 180 OH_NN_ReturnCode HDIPreparedModelV2_1::ExportModelCache(std::vector<Buffer>& modelCache) in ExportModelCache() argument 182 if (!modelCache.empty()) { in ExportModelCache() 183 LOGE("The vector of modelCache should be empty. size=%{public}zu", modelCache.size()); in ExportModelCache() 203 modelCache.emplace_back(modelbuffer); in ExportModelCache()
|
H A D | hdi_device_v1_0.cpp | 301 OH_NN_ReturnCode HDIDeviceV1_0::PrepareModelFromModelCache(const std::vector<Buffer>& modelCache, in PrepareModelFromModelCache() argument 308 size_t modelCacheSize = modelCache.size(); in PrepareModelFromModelCache() 310 ret = memManager->GetMemory(modelCache[i].data, memory); in PrepareModelFromModelCache()
|
/ohos5.0/foundation/ability/idl_tool/test/hdi_unittest/nnrt_v2_0/cpp_target/nnrt/v2_0/ |
H A D | prepared_model_proxy.cpp.txt | 56 …:PreparedModelProxy::ExportModelCache(std::vector<OHOS::HDI::Nnrt::V2_0::SharedBuffer>& modelCache) 58 return OHOS::HDI::Nnrt::V2_0::PreparedModelProxy::ExportModelCache_(modelCache, Remote()); 78 …y::ExportModelCache_(std::vector<OHOS::HDI::Nnrt::V2_0::SharedBuffer>& modelCache, const sptr<IRem… 112 modelCache.clear(); 113 modelCache.reserve(modelCacheSize); 120 modelCache.push_back(value0);
|
H A D | prepared_model_stub.cpp.txt | 108 std::vector<OHOS::HDI::Nnrt::V2_0::SharedBuffer> modelCache; 116 modelCache.reserve(capacity); 124 int32_t preparedModelRet = impl->ExportModelCache(modelCache); 130 if (!preparedModelReply.WriteUint32(modelCache.size())) { 131 HDF_LOGE("%{public}s: write modelCache size failed!", __func__); 134 for (const auto& it0 : modelCache) {
|
H A D | prepared_model_proxy.h.txt | 38 int32_t ExportModelCache(std::vector<OHOS::HDI::Nnrt::V2_0::SharedBuffer>& modelCache) override; 48 static int32_t ExportModelCache_(std::vector<OHOS::HDI::Nnrt::V2_0::SharedBuffer>& modelCache,
|
/ohos5.0/foundation/ai/neural_network_runtime/example/drivers/nnrt/v1_0/hdi_cpu_service/src/ |
H A D | prepared_model_service.cpp | 48 int32_t PreparedModelService::ExportModelCache(std::vector<SharedBuffer>& modelCache) in ExportModelCache() argument 50 if (!modelCache.empty()) { in ExportModelCache() 60 modelCache.emplace_back(SharedBuffer {fd, size, 0, size}); in ExportModelCache() 89 …modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetA… in ExportModelCache()
|
H A D | nnrt_device_service.cpp | 149 int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, in PrepareModelFromModelCache() argument 155 if (modelCache.size() != 1) { in PrepareModelFromModelCache() 161 auto ret = parser.Init(modelCache[0]); in PrepareModelFromModelCache() 175 ret = service->Compile(modelBuffer, modelCache[0].dataSize); in PrepareModelFromModelCache()
|
/ohos5.0/foundation/ai/neural_network_runtime/example/drivers/nnrt/v2_0/hdi_cpu_service/src/ |
H A D | prepared_model_service.cpp | 50 int32_t PreparedModelService::ExportModelCache(std::vector<SharedBuffer>& modelCache) in ExportModelCache() argument 52 if (!modelCache.empty()) { in ExportModelCache() 62 modelCache.emplace_back(SharedBuffer {fd, size, 0, size}); in ExportModelCache() 91 …modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetA… in ExportModelCache()
|
H A D | nnrt_device_service.cpp | 171 int32_t NnrtDeviceService::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, in PrepareModelFromModelCache() argument 177 if (modelCache.size() != 1) { in PrepareModelFromModelCache() 183 auto result = parser.Init(modelCache[0]); in PrepareModelFromModelCache() 209 ret = service->Compile(modelBuffer, modelCache[0].dataSize); in PrepareModelFromModelCache()
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/components/v1_0/hdi_device/ |
H A D | hdi_device_test.cpp | 848 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 865 …OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel,… 888 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 902 …OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel,… 917 std::vector<Buffer> modelCache = { { nullptr, 0 } }; variable 921 …OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel,… 943 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 959 …OH_NN_ReturnCode result = hdiDevice->PrepareModelFromModelCache(modelCache, config, preparedModel,… 981 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 1019 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable [all …]
|
/ohos5.0/foundation/ai/neural_network_runtime/test/unittest/components/v2_0/hdi_device/ |
H A D | hdi_device_test.cpp | 990 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 1030 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 1059 std::vector<Buffer> modelCache = { { nullptr, 0 } }; variable 1085 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 1123 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 1161 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 1199 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 2362 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 2402 std::vector<Buffer> modelCache = { { buffer, 100 } }; variable 2431 std::vector<Buffer> modelCache = { { nullptr, 0 } }; variable [all …]
|