1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef NEURAL_NETWORK_BACKEND_NNCOMPILED_CACHE_H
17 #define NEURAL_NETWORK_BACKEND_NNCOMPILED_CACHE_H
18 
19 #include <vector>
20 #include <fstream>
21 #include <memory>
22 
23 #include "device.h"
24 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime.h"
25 #include "tensor_desc.h"
26 
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
29 const uint32_t INVALID_CAHCE_VERSION = UINT32_MAX; // UINT32_MAX is reserved for invalid cache version.
30 constexpr size_t NN_CACHE_FILE_NUMBER_MAX = 100; // 限制cache文件数量最大为100
31 
32 struct NNCompiledCacheInfo {
33     int64_t fileNumber{0};
34     int64_t version{0};
35     int64_t deviceId{0};
36     std::vector<unsigned short> modelCheckSum;
37     int64_t opVersion{0};
38 };
39 
40 class NNCompiledCache {
41 public:
42     NNCompiledCache() = default;
43     ~NNCompiledCache() = default;
44 
45     OH_NN_ReturnCode Save(const std::vector<Buffer>& caches,
46                           const std::string& cacheDir,
47                           uint32_t version);
48     OH_NN_ReturnCode Restore(const std::string& cacheDir,
49                              uint32_t version,
50                              std::vector<Buffer>& caches);
51 
52     OH_NN_ReturnCode SetBackend(size_t backendID);
53     void SetModelName(const std::string& modelName);
54     OH_NN_ReturnCode WriteCacheInfo(uint32_t cacheSize,
55                                     std::unique_ptr<int64_t[]>& cacheInfo,
56                                     const std::string& cacheDir) const;
57     OH_NN_ReturnCode CheckCacheInfo(NNCompiledCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const;
58 
59 private:
60     OH_NN_ReturnCode GenerateCacheFiles(const std::vector<Buffer>& caches,
61                                         const std::string& cacheDir,
62                                         uint32_t version) const;
63     OH_NN_ReturnCode GenerateCacheModel(const std::vector<Buffer>& caches,
64                                         std::unique_ptr<int64_t[]>& cacheInfo,
65                                         const std::string& cacheDir,
66                                         uint32_t version) const;
67     OH_NN_ReturnCode ReadCacheModelFile(const std::string& file, Buffer& cache) const;
68     unsigned short GetCrc16(char* buffer, size_t length) const;
69     OH_NN_ReturnCode GetCacheFileLength(std::ifstream& ifs, int& fileSize) const;
70     OH_NN_ReturnCode VerifyCachePath(const std::string& cachePath) const;
71 
72 private:
73     size_t m_backendID {0};
74     std::string m_modelName;
75     std::shared_ptr<Device> m_device {nullptr};
76 };
77 
78 } // namespace NeuralNetworkRuntime
79 } // namespace OHOS
80 
81 #endif // NEURAL_NETWORK_BACKEND_NNCOMPILED_CACHE_H
82