1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "skia_gpu_context.h"
17 #include "include/gpu/gl/GrGLInterface.h"
18 #include "src/gpu/GrDirectContextPriv.h"
19 #include "include/core/SkTypes.h"
20
21 #include "skia_data.h"
22 #include "utils/data.h"
23 #include "utils/log.h"
24 #include "skia_trace_memory_dump.h"
25 #include "utils/system_properties.h"
26 #include "skia_task_executor.h"
27
28 namespace OHOS {
29 namespace Rosen {
30 namespace Drawing {
31 static std::mutex g_registarMutex;
SkiaPersistentCache(GPUContextOptions::PersistentCache * cache)32 SkiaPersistentCache::SkiaPersistentCache(GPUContextOptions::PersistentCache* cache) : cache_(cache) {}
33
load(const SkData & key)34 sk_sp<SkData> SkiaPersistentCache::load(const SkData& key)
35 {
36 Data keyData;
37 if (!cache_) {
38 LOGD("SkiaPersistentCache::load, failed! cache or key invalid");
39 return nullptr;
40 }
41 auto skiaKeyDataImpl = keyData.GetImpl<SkiaData>();
42 skiaKeyDataImpl->SetSkData(sk_ref_sp(&key));
43
44 auto retData = cache_->Load(keyData);
45 if (retData == nullptr) {
46 LOGD("SkiaPersistentCache::load, failed! load data invalid");
47 return nullptr;
48 }
49
50 return retData->GetImpl<SkiaData>()->GetSkData();
51 }
52
store(const SkData & key,const SkData & data)53 void SkiaPersistentCache::store(const SkData& key, const SkData& data)
54 {
55 Data keyData;
56 Data storeData;
57 if (!cache_) {
58 LOGD("SkiaPersistentCache::store, failed! cache or {key,data} invalid");
59 return;
60 }
61
62 keyData.GetImpl<SkiaData>()->SetSkData(sk_ref_sp(&key));
63 storeData.GetImpl<SkiaData>()->SetSkData(sk_ref_sp(&data));
64 cache_->Store(keyData, storeData);
65 }
66
SkiaGPUContext()67 SkiaGPUContext::SkiaGPUContext() : grContext_(nullptr), skiaPersistentCache_(nullptr) {}
68
69 class CommonPoolExecutor : public SkExecutor {
70 public:
add(std::function<void (void)> func)71 void add(std::function<void(void)> func) override
72 {
73 TaskPoolExecutor::PostTask(std::move(func));
74 }
75 };
76
77 static CommonPoolExecutor g_defaultExecutor;
78
BuildFromGL(const GPUContextOptions & options)79 bool SkiaGPUContext::BuildFromGL(const GPUContextOptions& options)
80 {
81 sk_sp<const GrGLInterface> glInterface(GrGLCreateNativeInterface());
82 if (options.GetPersistentCache() != nullptr) {
83 skiaPersistentCache_ = std::make_shared<SkiaPersistentCache>(options.GetPersistentCache());
84 }
85
86 GrContextOptions grOptions;
87 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kCoverageCounting;
88 // fix svg antialiasing bug
89 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kAtlas;
90 grOptions.fPreferExternalImagesOverES3 = true;
91 grOptions.fDisableDistanceFieldPaths = true;
92 grOptions.fAllowPathMaskCaching = options.GetAllowPathMaskCaching();
93 grOptions.fPersistentCache = skiaPersistentCache_.get();
94 grOptions.fExecutor = &g_defaultExecutor;
95 grContext_ = GrDirectContext::MakeGL(std::move(glInterface), grOptions);
96 return grContext_ != nullptr ? true : false;
97 }
98
99 #ifdef RS_ENABLE_VK
BuildFromVK(const GrVkBackendContext & context)100 bool SkiaGPUContext::BuildFromVK(const GrVkBackendContext& context)
101 {
102 if (!SystemProperties::IsUseVulkan()) {
103 return false;
104 }
105 GrContextOptions grOptions;
106 grOptions.fExecutor = &g_defaultExecutor;
107 grContext_ = GrDirectContext::MakeVulkan(context, grOptions);
108 return grContext_ != nullptr;
109 }
110
BuildFromVK(const GrVkBackendContext & context,const GPUContextOptions & options)111 bool SkiaGPUContext::BuildFromVK(const GrVkBackendContext& context, const GPUContextOptions& options)
112 {
113 if (!SystemProperties::IsUseVulkan()) {
114 return false;
115 }
116 if (options.GetPersistentCache() != nullptr) {
117 skiaPersistentCache_ = std::make_shared<SkiaPersistentCache>(options.GetPersistentCache());
118 }
119 GrContextOptions grOptions;
120 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kCoverageCounting;
121 // fix svg antialiasing bug
122 grOptions.fGpuPathRenderers &= ~GpuPathRenderers::kAtlas;
123 grOptions.fPreferExternalImagesOverES3 = true;
124 grOptions.fDisableDistanceFieldPaths = true;
125 grOptions.fAllowPathMaskCaching = options.GetAllowPathMaskCaching();
126 grOptions.fPersistentCache = skiaPersistentCache_.get();
127 grOptions.fExecutor = &g_defaultExecutor;
128 grContext_ = GrDirectContext::MakeVulkan(context, grOptions);
129 return grContext_ != nullptr;
130 }
131 #endif
132
Flush()133 void SkiaGPUContext::Flush()
134 {
135 if (!grContext_) {
136 LOGD("SkiaGPUContext::Flush, grContext_ is nullptr");
137 return;
138 }
139 grContext_->flush();
140 }
141
FlushAndSubmit(bool syncCpu)142 void SkiaGPUContext::FlushAndSubmit(bool syncCpu)
143 {
144 if (!grContext_) {
145 LOGD("SkiaGPUContext::FlushAndSubmit, grContext_ is nullptr");
146 return;
147 }
148 grContext_->flushAndSubmit(syncCpu);
149 }
150
Submit()151 void SkiaGPUContext::Submit()
152 {
153 if (!grContext_) {
154 LOGD("SkiaGPUContext::Submit, grContext_ is nullptr");
155 return;
156 }
157 grContext_->submit();
158 }
159
PerformDeferredCleanup(std::chrono::milliseconds msNotUsed)160 void SkiaGPUContext::PerformDeferredCleanup(std::chrono::milliseconds msNotUsed)
161 {
162 if (!grContext_) {
163 LOGD("SkiaGPUContext::PerformDeferredCleanup, grContext_ is nullptr");
164 return;
165 }
166 grContext_->performDeferredCleanup(msNotUsed);
167 }
168
GetResourceCacheLimits(int * maxResource,size_t * maxResourceBytes) const169 void SkiaGPUContext::GetResourceCacheLimits(int* maxResource, size_t* maxResourceBytes) const
170 {
171 if (!grContext_) {
172 LOGD("SkiaGPUContext::GetResourceCacheLimits, grContext_ is nullptr");
173 return;
174 }
175 grContext_->getResourceCacheLimits(maxResource, maxResourceBytes);
176 }
177
SetResourceCacheLimits(int maxResource,size_t maxResourceBytes)178 void SkiaGPUContext::SetResourceCacheLimits(int maxResource, size_t maxResourceBytes)
179 {
180 if (!grContext_) {
181 LOGD("SkiaGPUContext::SetResourceCacheLimits, grContext_ is nullptr");
182 return;
183 }
184 grContext_->setResourceCacheLimits(maxResource, maxResourceBytes);
185 }
186
GetResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const187 void SkiaGPUContext::GetResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const
188 {
189 if (!grContext_) {
190 LOGD("SkiaGPUContext::GetResourceCacheUsage, grContext_ is nullptr");
191 return;
192 }
193 grContext_->getResourceCacheUsage(resourceCount, resourceBytes);
194 }
195
FreeGpuResources()196 void SkiaGPUContext::FreeGpuResources()
197 {
198 if (!grContext_) {
199 LOGD("SkiaGPUContext::FreeGpuResources, grContext_ is nullptr");
200 return;
201 }
202 grContext_->freeGpuResources();
203 }
204
DumpGpuStats(std::string & out)205 void SkiaGPUContext::DumpGpuStats(std::string& out)
206 {
207 if (!grContext_) {
208 LOGD("SkiaGPUContext::DumpGpuStats, grContext_ is nullptr");
209 return;
210 }
211 SkString stat;
212 grContext_->priv().dumpGpuStats(&stat);
213 grContext_->dumpVmaStats(&stat);
214 out = stat.c_str();
215 }
216
ReleaseResourcesAndAbandonContext()217 void SkiaGPUContext::ReleaseResourcesAndAbandonContext()
218 {
219 if (!grContext_) {
220 LOGD("SkiaGPUContext::ReleaseResourcesAndAbandonContext, grContext_ is nullptr");
221 return;
222 }
223 grContext_->releaseResourcesAndAbandonContext();
224 }
225
PurgeUnlockedResources(bool scratchResourcesOnly)226 void SkiaGPUContext::PurgeUnlockedResources(bool scratchResourcesOnly)
227 {
228 if (!grContext_) {
229 LOGD("SkiaGPUContext::PurgeUnlockedResources, grContext_ is nullptr");
230 return;
231 }
232 grContext_->purgeUnlockedResources(scratchResourcesOnly);
233 }
234
PurgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GPUResourceTag & tag)235 void SkiaGPUContext::PurgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GPUResourceTag &tag)
236 {
237 if (!grContext_) {
238 LOGD("SkiaGPUContext::PurgeUnlockedResourcesByTag, grContext_ is nullptr");
239 return;
240 }
241 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
242 grContext_->purgeUnlockedResourcesByTag(scratchResourcesOnly, grTag);
243 }
244
PurgeUnlockedResourcesByPid(bool scratchResourcesOnly,const std::set<pid_t> & exitedPidSet)245 void SkiaGPUContext::PurgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet)
246 {
247 if (!grContext_) {
248 LOGD("SkiaGPUContext::PurgeUnlockedResourcesByPid, grContext_ is nullptr");
249 return;
250 }
251 grContext_->purgeUnlockedResourcesByPid(scratchResourcesOnly, exitedPidSet);
252 }
253
PurgeUnlockAndSafeCacheGpuResources()254 void SkiaGPUContext::PurgeUnlockAndSafeCacheGpuResources()
255 {
256 if (!grContext_) {
257 LOGD("SkiaGPUContext::PurgeUnlockAndSafeCacheGpuResources, grContext_ is nullptr");
258 return;
259 }
260 grContext_->purgeUnlockAndSafeCacheGpuResources();
261 }
262
PurgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<pid_t> & exitedPidSet,const std::set<pid_t> & protectedPidSet)263 void SkiaGPUContext::PurgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<pid_t>& exitedPidSet,
264 const std::set<pid_t>& protectedPidSet)
265 {
266 if (!grContext_) {
267 LOGD("SkiaGPUContext::PurgeCacheBetweenFrames,grContext_ is nullptr");
268 return;
269 }
270 grContext_->purgeCacheBetweenFrames(scratchResourcesOnly, exitedPidSet, protectedPidSet);
271 }
272
ReleaseByTag(const GPUResourceTag & tag)273 void SkiaGPUContext::ReleaseByTag(const GPUResourceTag &tag)
274 {
275 if (!grContext_) {
276 LOGD("SkiaGPUContext::ReleaseByTag, grContext_ is nullptr");
277 return;
278 }
279 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
280 grContext_->releaseByTag(grTag);
281 }
282
ResetContext()283 void SkiaGPUContext::ResetContext()
284 {
285 if (!grContext_) {
286 LOGD("SkiaGPUContext::ResetContext, grContext_ is nullptr");
287 return;
288 }
289 grContext_->resetContext();
290 }
291
DumpMemoryStatisticsByTag(TraceMemoryDump * traceMemoryDump,GPUResourceTag & tag)292 void SkiaGPUContext::DumpMemoryStatisticsByTag(TraceMemoryDump* traceMemoryDump, GPUResourceTag &tag)
293 {
294 if (!grContext_) {
295 LOGD("SkiaGPUContext::DumpMemoryStatisticsByTag, grContext_ is nullptr");
296 return;
297 }
298
299 if (!traceMemoryDump) {
300 LOGD("SkiaGPUContext::DumpMemoryStatisticsByTag, traceMemoryDump is nullptr");
301 return;
302 }
303 SkTraceMemoryDump* skTraceMemoryDump = traceMemoryDump->GetImpl<SkiaTraceMemoryDump>()->GetTraceMemoryDump().get();
304 if (!skTraceMemoryDump) {
305 LOGD("SkiaGPUContext::DumpMemoryStatisticsByTag, sktraceMemoryDump is nullptr");
306 return;
307 }
308 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
309 grContext_->dumpMemoryStatisticsByTag(skTraceMemoryDump, grTag);
310 }
311
DumpMemoryStatistics(TraceMemoryDump * traceMemoryDump)312 void SkiaGPUContext::DumpMemoryStatistics(TraceMemoryDump* traceMemoryDump)
313 {
314 if (!grContext_) {
315 LOGD("SkiaGPUContext::DumpMemoryStatistics, grContext_ is nullptr");
316 return;
317 }
318
319 if (!traceMemoryDump) {
320 LOGD("SkiaGPUContext::DumpMemoryStatistics, traceMemoryDump is nullptr");
321 return;
322 }
323 SkTraceMemoryDump* skTraceMemoryDump = traceMemoryDump->GetImpl<SkiaTraceMemoryDump>()->GetTraceMemoryDump().get();
324 if (!skTraceMemoryDump) {
325 LOGD("SkiaGPUContext::DumpMemoryStatistics, sktraceMemoryDump is nullptr");
326 return;
327 }
328 grContext_->dumpMemoryStatistics(skTraceMemoryDump);
329 }
330
SetCurrentGpuResourceTag(const GPUResourceTag & tag)331 void SkiaGPUContext::SetCurrentGpuResourceTag(const GPUResourceTag &tag)
332 {
333 if (!grContext_) {
334 LOGD("SkiaGPUContext::ReleaseByTag, grContext_ is nullptr");
335 return;
336 }
337 GrGpuResourceTag grTag(tag.fPid, tag.fTid, tag.fWid, tag.fFid, tag.fName);
338 grContext_->setCurrentGrResourceTag(grTag);
339 }
340
GetGrContext() const341 sk_sp<GrDirectContext> SkiaGPUContext::GetGrContext() const
342 {
343 return grContext_;
344 }
345
SetGrContext(const sk_sp<GrDirectContext> & grContext)346 void SkiaGPUContext::SetGrContext(const sk_sp<GrDirectContext>& grContext)
347 {
348 grContext_ = grContext;
349 }
350
GetUpdatedMemoryMap(std::unordered_map<pid_t,size_t> & out)351 void SkiaGPUContext::GetUpdatedMemoryMap(std::unordered_map<pid_t, size_t> &out)
352 {
353 if (!grContext_) {
354 LOGD("SkiaGPUContext::GetUpdatedMemoryMap, grContext_ is nullptr");
355 return;
356 }
357 grContext_->getUpdatedMemoryMap(out);
358 }
359
InitGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)360 void SkiaGPUContext::InitGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
361 {
362 if (!grContext_) {
363 LOGD("SkiaGPUContext::InitGpuMemoryLimit, grContext_ is nullptr");
364 return;
365 }
366 grContext_->initGpuMemoryLimit(callback, size);
367 }
368 #ifdef RS_ENABLE_VK
StoreVkPipelineCacheData()369 void SkiaGPUContext::StoreVkPipelineCacheData()
370 {
371 if (!grContext_) {
372 LOGD("SkiaGPUContext::StoreVkPipelineCacheData, grContext_ is nullptr");
373 return;
374 }
375 grContext_->storeVkPipelineCacheData();
376 }
377 #endif
378
BeginFrame()379 void SkiaGPUContext::BeginFrame()
380 {
381 if (!grContext_) {
382 LOGD("SkiaGPUContext::BeginFrame, grContext_ is nullptr");
383 return;
384 }
385 grContext_->beginFrame();
386 }
387
EndFrame()388 void SkiaGPUContext::EndFrame()
389 {
390 if (!grContext_) {
391 LOGD("SkiaGPUContext::EndFrame, grContext_ is nullptr");
392 return;
393 }
394 grContext_->endFrame();
395 }
396
SetGpuCacheSuppressWindowSwitch(bool enabled)397 void SkiaGPUContext::SetGpuCacheSuppressWindowSwitch(bool enabled)
398 {
399 if (!grContext_) {
400 LOGD("SkiaGPUContext::SetGpuCacheSuppressWindowSwitch, grContext_ is nullptr");
401 return;
402 }
403 grContext_->setGpuCacheSuppressWindowSwitch(enabled);
404 }
405
SetGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)406 void SkiaGPUContext::SetGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority)
407 {
408 if (!grContext_) {
409 LOGD("SkiaGPUContext::SetGpuMemoryAsyncReclaimerSwitch, grContext_ is nullptr");
410 return;
411 }
412 grContext_->setGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
413 }
414
FlushGpuMemoryInWaitQueue()415 void SkiaGPUContext::FlushGpuMemoryInWaitQueue()
416 {
417 if (!grContext_) {
418 LOGD("SkiaGPUContext::FlushGpuMemoryInWaitQueue, grContext_ is nullptr");
419 return;
420 }
421 grContext_->flushGpuMemoryInWaitQueue();
422 }
423
SuppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)424 void SkiaGPUContext::SuppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
425 {
426 if (!grContext_) {
427 LOGD("SkiaGPUContext::SuppressGpuCacheBelowCertainRatio, grContext_ is nullptr");
428 return;
429 }
430 grContext_->suppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
431 }
432
433 std::unordered_map<uintptr_t, std::function<void(const std::function<void()>& task)>>
434 SkiaGPUContext::contextPostMap_ = {};
435
RegisterPostFunc(const std::function<void (const std::function<void ()> & task)> & func)436 void SkiaGPUContext::RegisterPostFunc(const std::function<void(const std::function<void()>& task)>& func)
437 {
438 std::unique_lock lock(g_registarMutex);
439 if (grContext_ != nullptr) {
440 contextPostMap_[uintptr_t(grContext_.get())] = func;
441 }
442 }
443
GetPostFunc(sk_sp<GrDirectContext> grContext)444 std::function<void(const std::function<void()>& task)> SkiaGPUContext::GetPostFunc(sk_sp<GrDirectContext> grContext)
445 {
446 std::unique_lock lock(g_registarMutex);
447 if (grContext != nullptr && contextPostMap_.count(uintptr_t(grContext.get())) > 0) {
448 return contextPostMap_[uintptr_t(grContext.get())];
449 }
450 return nullptr;
451 }
452
VmaDefragment()453 void SkiaGPUContext::VmaDefragment()
454 {
455 if (grContext_ != nullptr) {
456 grContext_->vmaDefragment();
457 }
458 }
459 } // namespace Drawing
460 } // namespace Rosen
461 } // namespace OHOS
462