1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_H
17 #define NEURAL_NETWORK_RUNTIME_COMPILATION_H
18 
19 #include <vector>
20 #include <utility>
21 #include <memory>
22 #include <unordered_map>
23 
24 #include "compiler.h"
25 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h"
26 
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
29 struct Compilation {
30     size_t backendID {0};
31     void* nnModel {nullptr};
32     char* offlineModelPath {nullptr};
33     std::pair<void*, size_t> offlineModelBuffer;
34     char* cachePath {nullptr};
35     uint32_t cacheVersion {0};
36     std::pair<void*, size_t> cacheBuffer;
37     OH_NN_Priority priority {OH_NN_PRIORITY_NONE};
38     OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_NONE};
39     bool enableFp16 {false};
40     Compiler* compiler {nullptr};
41     std::vector<std::shared_ptr<void>> options;
42     std::unordered_map<std::string, std::vector<char>> configs;
43     size_t nnrtModelID {0};
44     uint32_t hiaiModelId {0};
45     int32_t callingPid {-1};
46     bool isNeedModelLatency {false};
47 
~CompilationCompilation48     ~Compilation()
49     {
50         options.clear();
51     }
52 };
53 } // namespace NeuralNetworkRuntime
54 } // namespace OHOS
55 
56 #endif // NEURAL_NETWORK_RUNTIME_COMPILATION_H