/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/tools/ |
H A D | utils.h | 30 namespace tflite { 35 …std::unique_ptr<tflite::Interpreter>& interpreter, std::map<int, std::vector<int>>& neededInputSha… 36 bool IsEqualShape(int tensorIndex, const std::vector<int>& dim, std::unique_ptr<tflite::Interpreter… 38 void PrintResult(std::unique_ptr<tflite::Interpreter>& interpreter); 39 void AnalysisResults(Settings& settings, std::unique_ptr<tflite::Interpreter>& interpreter); 40 void ImportData(Settings& settings, std::vector<int>& imageSize, std::unique_ptr<tflite::Interprete…
|
H A D | utils.cpp | 26 namespace tflite { namespace 80 TfLiteStatus FilterDynamicInputs(Settings& settings, std::unique_ptr<tflite::Interpreter>& interpre… in FilterDynamicInputs() 131 void PrintResult(std::unique_ptr<tflite::Interpreter>& interpreter) in PrintResult() 163 void AnalysisResults(Settings& settings, std::unique_ptr<tflite::Interpreter>& interpreter) in AnalysisResults() 214 void ImportData(Settings& settings, std::vector<int>& imageSize, std::unique_ptr<tflite::Interprete… in ImportData() 256 bool IsEqualShape(int tensorIndex, const std::vector<int>& dims, std::unique_ptr<tflite::Interprete… in IsEqualShape()
|
H A D | log.h | 22 namespace tflite { 42 #define LOG(severity) tflite::label_classify::Log(#severity).Stream()
|
H A D | get_topn.h | 25 namespace tflite {
|
H A D | bitmap_helpers.h | 27 namespace tflite {
|
H A D | bitmap_helpers.cpp | 23 namespace tflite { namespace
|
/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/label_classify/ |
H A D | label_classify.cpp | 45 namespace tflite { namespace 47 using TfLiteDelegatePtr = tflite::Interpreter::TfLiteDelegatePtr; 48 using ProvidedDelegateList = tflite::tools::ProvidedDelegateList; 79 std::vector<tflite::Flag> flags; in InitFromCmdlineArgs() 109 tflite::tools::ToolParams params; 201 std::unique_ptr<tflite::FlatBufferModel> model; in InferenceModel() 202 std::unique_ptr<tflite::Interpreter> interpreter; in InferenceModel() 203 model = tflite::FlatBufferModel::BuildFromFile(settings.modelName.c_str()); in InferenceModel() 211 tflite::ops::builtin::BuiltinOpResolver resolver; in InferenceModel() 212 tflite::InterpreterBuilder(*model, resolver)(&interpreter); in InferenceModel() [all …]
|
H A D | label_classify.h | 25 namespace tflite { 28 tflite::FlatBufferModel* model;
|
H A D | CMakeLists.txt | 18 set(TOOLS_INC ${LOCAL_DIRECTORY_PATH}/tflite/tools)
|
/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/ |
H A D | CMakeLists.txt | 16 set(NNRT_INTERFACE_HOME ${LOCAL_DIRECTORY_PATH}/tflite/nnrt) 17 set(NNRT_DELEGATE_HOME ${LOCAL_DIRECTORY_PATH}/tflite/delegates/nnrt_delegate) 18 set(NNRT_DEMO_HOME ${LOCAL_DIRECTORY_PATH}/tflite/label_classify)
|
/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/ |
H A D | README_zh.md | 54 …图编译。其中,支持在NNRtDelegate上运行的node会调用NnrtDelegateKernel的prepare接口完成编译,不支持的会调用tflite operation kernels的… 98 …y-sig/neural_network_runtime/blob/master/example/deep_learning_framework/tflite/delegates/nnrt_del… 249 # 修改```tflite/CMakeLists.txt``` 260 # 修改```tflite/CMakeLists.txt``` 301 …sify,libneural_network_runtime.z.so、tensorflow-lite.so及其依赖的库、mobilenetv2.tflite模型、标签labels.txt、测试图… 321 # 执行demo,-m tflite模型, -i 测试图片, -l 数据标签, -a 1表示使用nnrt, 0表示不使用nnrt推理,-z 1 表示打印输出张量大小的结果 322 ./label_classify -m mobilenetv2.tflite -i grace_hopper.bmp -l labels.txt -a 1 -z 1
|
H A D | CMakeLists.txt | 28 add_subdirectory(${LOCAL_DIRECTORY_PATH}/tflite)
|
/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/delegates/nnrt_delegate/ |
H A D | nnrt_delegate.h | 29 namespace tflite { 36 using tflite::delegate::nnrt::NnrtDelegateKernel;
|
H A D | CMakeLists.txt | 22 set(TOOLS_INC ${LOCAL_DIRECTORY_PATH}/tflite/tools)
|
H A D | nnrt_delegate_kernel.h | 26 namespace tflite {
|
H A D | nnrt_delegate.cpp | 25 namespace tflite { namespace 245 if (tflite::IsUseTargetDevice(delegateOptions)) { in CheckDeviceValid()
|
H A D | nnrt_utils.h | 25 namespace tflite {
|
H A D | nnrt_delegate_kernel.cpp | 39 namespace tflite { namespace 81 TF_LITE_ENSURE_STATUS(tflite::GetTargetDevice(context, params->delegate, m_nnrt, m_nnrtDevice)); in Init()
|
H A D | tensor_mapping.h | 24 namespace tflite {
|
H A D | nnrt_delegate_provider.cpp | 24 namespace tflite { namespace
|
H A D | nnrt_op_builder.h | 29 namespace tflite {
|
H A D | nnrt_utils.cpp | 27 namespace tflite { namespace
|
H A D | nnrt_op_builder.cpp | 27 namespace tflite { namespace
|
/ohos5.0/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/nnrt/ |
H A D | nnrt_implementation.cpp | 26 namespace tflite { namespace
|
H A D | nnrt_implementation.h | 28 namespace tflite {
|