Searched refs:tensorSize (Results 1 – 8 of 8) sorted by relevance
/ohos5.0/foundation/ai/neural_network_runtime/frameworks/native/neural_network_runtime/ |
H A D | hdi_device_v1_0.cpp | 158 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); in GetSupportedOperation() local 160 if (tensorSize > 0) { in GetSupportedOperation() 161 hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); in GetSupportedOperation() 249 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); in PrepareModel() local 251 if (tensorSize > 0) { in PrepareModel() 252 hdiRet = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); in PrepareModel()
|
H A D | hdi_device_v2_0.cpp | 209 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); in GetSupportedOperation() local 211 if (tensorSize > 0) { in GetSupportedOperation() 212 ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); in GetSupportedOperation() 293 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); in PrepareModel() local 295 if (tensorSize > 0) { in PrepareModel() 296 ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); in PrepareModel()
|
H A D | hdi_device_v2_1.cpp | 204 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); in GetSupportedOperation() local 206 if (tensorSize > 0) { in GetSupportedOperation() 207 ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); in GetSupportedOperation() 288 size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(model.get()); in PrepareModel() local 290 if (tensorSize > 0) { in PrepareModel() 291 ret = m_iDevice->AllocateBuffer(tensorSize, tensorBuffer); in PrepareModel()
|
/ohos5.0/foundation/ai/neural_network_runtime/example/drivers/nnrt/v1_0/hdi_cpu_service/src/ |
H A D | nnrt_device_service.cpp | 267 size_t tensorSize = model.allTensors.size(); in ValidateModel() local 268 …auto inputIt = std::find_if(model.inputIndex.begin(), model.inputIndex.end(), [tensorSize](size_t … in ValidateModel() 269 return inputIndex > tensorSize; in ValidateModel() 276 …auto outputIt = std::find_if(model.outputIndex.begin(), model.outputIndex.end(), [tensorSize](size… in ValidateModel() 277 return outputIndex > tensorSize; in ValidateModel()
|
/ohos5.0/foundation/ai/neural_network_runtime/example/drivers/nnrt/v2_0/hdi_cpu_service/src/ |
H A D | nnrt_device_service.cpp | 302 size_t tensorSize = model.allTensors.size(); in ValidateModel() local 303 …auto inputIt = std::find_if(model.inputIndex.begin(), model.inputIndex.end(), [tensorSize](size_t … in ValidateModel() 304 return inputIndex > tensorSize; in ValidateModel() 311 …auto outputIt = std::find_if(model.outputIndex.begin(), model.outputIndex.end(), [tensorSize](size… in ValidateModel() 312 return outputIndex > tensorSize; in ValidateModel()
|
/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/delegates/nnrt_delegate/ |
H A D | nnrt_utils.cpp | 163 …s GetTensorSize(TfLiteContext* context, const int32_t* dims, int32_t dimCount, int64_t& tensorSize) in GetTensorSize() argument 173 tensorSize = 1; in GetTensorSize() 179 tensorSize *= *(dims + i); in GetTensorSize()
|
H A D | nnrt_utils.h | 104 … GetTensorSize(TfLiteContext* context, const int32_t* dims, int32_t dimCount, int64_t& tensorSize);
|
H A D | nnrt_op_builder.cpp | 365 int64_t tensorSize = 0; in AddTensor() local 367 …_ENSURE_STATUS(GetTensorSize(m_context, nnTensor.dimensions, nnTensor.dimensionCount, tensorSize)); in AddTensor() 369 depthwiseTensorData.assign(tensorSize * typeBytes, 0); in AddTensor()
|