1 /* 2 * Copyright (c) 2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #ifndef TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H 17 #define TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H 18 19 #include <cstdlib> 20 #include <dlfcn.h> 21 #include <fcntl.h> 22 #include <cstdio> 23 #include <cstdlib> 24 #include <memory> 25 26 #include "neural_network_runtime_type.h" 27 28 namespace tflite { 29 #define NNRT_LOG(format, ...) fprintf(stderr, format "\n", __VA_ARGS__) 30 31 struct NnrtApi { 32 // This indicates the availability of nnrt library. If it is false, it means that loading 33 // the nnrt library failed and tflite will not use nnrt to run the model, vice versa. 34 bool nnrtExists; 35 36 // Create model interface 37 OH_NNModel* (*OH_NNModel_Construct)(void); 38 OH_NN_ReturnCode (*OH_NNModel_AddTensor)(OH_NNModel* model, const OH_NN_Tensor* nnTensor); 39 OH_NN_ReturnCode (*OH_NNModel_SetTensorData)(OH_NNModel* model, uint32_t index, const void* buffer, 40 size_t length); 41 OH_NN_ReturnCode (*OH_NNModel_AddOperation)(OH_NNModel* model, OH_NN_OperationType op, 42 const OH_NN_UInt32Array* paramIndices, const OH_NN_UInt32Array* inputIndices, 43 const OH_NN_UInt32Array* outputIndices); 44 OH_NN_ReturnCode (*OH_NNModel_SpecifyInputsAndOutputs)(OH_NNModel* model, const OH_NN_UInt32Array* inputIndices, 45 const OH_NN_UInt32Array* outputIndices); 46 OH_NN_ReturnCode (*OH_NNModel_Finish)(OH_NNModel* model); 47 void (*OH_NNModel_Destroy)(OH_NNModel** model); 48 OH_NN_ReturnCode (*OH_NNModel_GetAvailableOperations)(OH_NNModel* model, size_t deviceID, const bool** isSupported, 49 uint32_t* opCount); 50 // Compilation interface 51 OH_NNCompilation* (*OH_NNCompilation_Construct)(const OH_NNModel* model); 52 OH_NN_ReturnCode (*OH_NNCompilation_SetCache)(OH_NNCompilation* compilation, const char* cacheDir, 53 uint32_t version); 54 OH_NN_ReturnCode (*OH_NNCompilation_SetPerformanceMode)(OH_NNCompilation* compilation, 55 OH_NN_PerformanceMode performanceMode); 56 OH_NN_ReturnCode (*OH_NNCompilation_SetPriority)(OH_NNCompilation* compilation, OH_NN_Priority priority); 57 OH_NN_ReturnCode (*OH_NNCompilation_EnableFloat16)(OH_NNCompilation* compilation, bool enablefloat16); 58 OH_NN_ReturnCode (*OH_NNCompilation_SetDevice)(OH_NNCompilation* compilation, size_t deviceID); 59 OH_NN_ReturnCode (*OH_NNCompilation_Build)(OH_NNCompilation* compilation); 60 void (*OH_NNCompilation_Destroy)(OH_NNCompilation** compilation); 61 // Executor interface 62 OH_NNExecutor* (*OH_NNExecutor_Construct)(OH_NNCompilation* compilation); 63 OH_NN_ReturnCode (*OH_NNExecutor_SetInput)(OH_NNExecutor* executor, uint32_t inputIndex, 64 const OH_NN_Tensor* nnTensor, const void* buffer, size_t length); 65 OH_NN_ReturnCode (*OH_NNExecutor_SetOutput)(const OH_NNExecutor* executor, uint32_t outputIndex, void* buffer, 66 size_t length); 67 OH_NN_ReturnCode (*OH_NNExecutor_GetOutputShape)(const OH_NNExecutor* executor, uint32_t outputIndex, 68 const uint32_t** dimensions, uint32_t* dimensionCount); 69 OH_NN_ReturnCode (*OH_NNExecutor_Run)(OH_NNExecutor* executor); 70 OH_NN_Memory* (*OH_NNExecutor_AllocateInputMemory)(OH_NNExecutor* executor, uint32_t inputIndex, size_t length); 71 OH_NN_Memory* (*OH_NNExecutor_AllocateOutputMemory)(OH_NNExecutor* executor, uint32_t outputIndex, size_t length); 72 void (*OH_NNExecutor_DestroyOutputMemory)(OH_NNExecutor* executor, uint32_t outputIndex, OH_NN_Memory** memory); 73 void (*OH_NNExecutor_DestroyInputMemory)(OH_NNExecutor* executor, uint32_t inputIndex, OH_NN_Memory** memory); 74 OH_NN_ReturnCode (*OH_NNExecutor_SetInputWithMemory)(OH_NNExecutor* executor, uint32_t inputIndex, 75 const OH_NN_Tensor* nnTensor, const OH_NN_Memory* memory); 76 OH_NN_ReturnCode (*OH_NNExecutor_SetOutputWithMemory)(OH_NNExecutor* executor, uint32_t outputIndex, 77 const OH_NN_Memory* memory); 78 void (*OH_NNExecutor_Destroy)(OH_NNExecutor** executor); 79 // Device interface 80 OH_NN_ReturnCode (*OH_NNDevice_GetAllDevicesID)(const size_t** allDevicesID, uint32_t* deviceCount); 81 OH_NN_ReturnCode (*OH_NNDevice_GetName)(size_t deviceID, const char** name); 82 OH_NN_ReturnCode (*OH_NNDevice_GetType)(size_t deviceID, OH_NN_DeviceType* deviceType); 83 }; 84 85 const NnrtApi* NnrtImplementation(); 86 } // namespace tflite 87 88 #endif // TENSORFLOW_LITE_NNRT_IMPLEMENTATION_H