1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "platform/ohos/backend/rs_vulkan_context.h"
17 #include <memory>
18 #include <mutex>
19 #include <set>
20 #include <dlfcn.h>
21 #include <vector>
22 #include "common/rs_optional_trace.h"
23 #include "platform/common/rs_log.h"
24 #include "render_context/memory_handler.h"
25 #include "include/gpu/vk/GrVkExtensions.h"
26 #include "unistd.h"
27 #include "vulkan/vulkan_core.h"
28 #include "vulkan/vulkan_ohos.h"
29 #include "sync_fence.h"
30
31 #define ACQUIRE_PROC(name, context) \
32 if (!(vk##name = AcquireProc("vk" #name, context))) { \
33 ROSEN_LOGE("Could not acquire proc: vk" #name); \
34 }
35
36 namespace OHOS {
37 namespace Rosen {
38
39 thread_local std::shared_ptr<Drawing::GPUContext> RsVulkanContext::drawingContext_ = nullptr;
40 thread_local std::shared_ptr<Drawing::GPUContext> RsVulkanContext::protectedDrawingContext_ = nullptr;
41 thread_local bool RsVulkanContext::isProtected_ = false;
42 static RsVulkanInterface rsVulkanInterface;
43 static RsVulkanInterface rsProtectedVulkanInterface;
GetRsVulkanInterfaceInternal(bool isProtected)44 static RsVulkanInterface& GetRsVulkanInterfaceInternal(bool isProtected)
45 {
46 return isProtected ? rsProtectedVulkanInterface : rsVulkanInterface;
47 }
48
49 static std::vector<const char*> gInstanceExtensions = {
50 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
51 };
52
53 static std::vector<const char*> gDeviceExtensions = {
54 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
55 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
56 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
57 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
58 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
59 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
60 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
61 VK_OHOS_NATIVE_BUFFER_EXTENSION_NAME,
62 VK_OHOS_EXTERNAL_MEMORY_EXTENSION_NAME,
63 };
64
65 static const int GR_CACHE_MAX_COUNT = 8192;
66 static const size_t GR_CACHE_MAX_BYTE_SIZE = 96 * (1 << 20);
67 static const int32_t CACHE_LIMITS_TIMES = 5; // this will change RS memory!
68
Init(bool isProtected)69 void RsVulkanInterface::Init(bool isProtected)
70 {
71 handle_ = nullptr;
72 acquiredMandatoryProcAddresses_ = false;
73 memHandler_ = nullptr;
74 acquiredMandatoryProcAddresses_ = OpenLibraryHandle() && SetupLoaderProcAddresses();
75 CreateInstance();
76 SelectPhysicalDevice(isProtected);
77 CreateDevice(isProtected);
78 std::unique_lock<std::mutex> lock(vkMutex_);
79 if (RSSystemProperties::GetVkQueueDividedEnable()) {
80 if (!isProtected) {
81 CreateSkiaBackendContext(&backendContext_, false, isProtected);
82 }
83 CreateSkiaBackendContext(&hbackendContext_, true, isProtected);
84 } else {
85 CreateSkiaBackendContext(&backendContext_, false, isProtected);
86 }
87 }
88
~RsVulkanInterface()89 RsVulkanInterface::~RsVulkanInterface()
90 {
91 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
92 if (semaphoreFence.fence != nullptr) {
93 semaphoreFence.fence->Wait(-1);
94 }
95 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
96 }
97 usedSemaphoreFenceList_.clear();
98 if (protectedMemoryFeatures_) {
99 delete protectedMemoryFeatures_;
100 }
101 CloseLibraryHandle();
102 }
103
IsValid() const104 bool RsVulkanInterface::IsValid() const
105 {
106 return instance_ != VK_NULL_HANDLE && device_ != VK_NULL_HANDLE;
107 }
108
SetupLoaderProcAddresses()109 bool RsVulkanInterface::SetupLoaderProcAddresses()
110 {
111 if (handle_ == nullptr) {
112 return true;
113 }
114 vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(dlsym(handle_, "vkGetInstanceProcAddr"));
115 vkGetDeviceProcAddr = reinterpret_cast<PFN_vkGetDeviceProcAddr>(dlsym(handle_, "vkGetDeviceProcAddr"));
116 vkEnumerateInstanceExtensionProperties = reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
117 dlsym(handle_, "vkEnumerateInstanceExtensionProperties"));
118 vkCreateInstance = reinterpret_cast<PFN_vkCreateInstance>(dlsym(handle_, "vkCreateInstance"));
119
120 if (!vkGetInstanceProcAddr) {
121 ROSEN_LOGE("Could not acquire vkGetInstanceProcAddr");
122 return false;
123 }
124
125 VkInstance null_instance = VK_NULL_HANDLE;
126 ACQUIRE_PROC(EnumerateInstanceLayerProperties, null_instance);
127 return true;
128 }
129
CreateInstance()130 bool RsVulkanInterface::CreateInstance()
131 {
132 if (!acquiredMandatoryProcAddresses_) {
133 return false;
134 }
135
136 const VkApplicationInfo info = {
137 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
138 .pNext = nullptr,
139 .pApplicationName = "OHOS",
140 .applicationVersion = 0,
141 .pEngineName = "Rosen",
142 .engineVersion = VK_MAKE_VERSION(1, 0, 0),
143 .apiVersion = VK_API_VERSION_1_2,
144 };
145
146 const VkInstanceCreateInfo create_info = {
147 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
148 .pNext = nullptr,
149 .flags = 0,
150 .pApplicationInfo = &info,
151 .enabledLayerCount = 0,
152 .ppEnabledLayerNames = nullptr,
153 .enabledExtensionCount = static_cast<uint32_t>(gInstanceExtensions.size()),
154 .ppEnabledExtensionNames = gInstanceExtensions.data(),
155 };
156 if (vkCreateInstance(&create_info, nullptr, &instance_) != VK_SUCCESS) {
157 ROSEN_LOGE("Could not create vulkan instance");
158 return false;
159 }
160
161 ACQUIRE_PROC(CreateDevice, instance_);
162 ACQUIRE_PROC(DestroyDevice, instance_);
163 ACQUIRE_PROC(DestroyInstance, instance_);
164 ACQUIRE_PROC(EnumerateDeviceLayerProperties, instance_);
165 ACQUIRE_PROC(EnumeratePhysicalDevices, instance_);
166 ACQUIRE_PROC(GetPhysicalDeviceFeatures, instance_);
167 ACQUIRE_PROC(GetPhysicalDeviceQueueFamilyProperties, instance_);
168 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties, instance_);
169 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties2, instance_);
170 ACQUIRE_PROC(GetPhysicalDeviceFeatures2, instance_);
171
172 return true;
173 }
174
SelectPhysicalDevice(bool isProtected)175 bool RsVulkanInterface::SelectPhysicalDevice(bool isProtected)
176 {
177 if (!instance_) {
178 return false;
179 }
180 uint32_t deviceCount = 0;
181 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, nullptr) != VK_SUCCESS) {
182 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
183 return false;
184 }
185
186 std::vector<VkPhysicalDevice> physicalDevices;
187 physicalDevices.resize(deviceCount);
188 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, physicalDevices.data()) != VK_SUCCESS) {
189 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
190 return false;
191 }
192 physicalDevice_ = physicalDevices[0];
193 VkPhysicalDeviceProperties2 physDevProps = {
194 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
195 0,
196 {},
197 };
198 VkPhysicalDeviceProtectedMemoryProperties protMemProps = {
199 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES,
200 0,
201 {},
202 };
203 if (isProtected) {
204 physDevProps.pNext = &protMemProps;
205 }
206 vkGetPhysicalDeviceProperties2(physicalDevice_, &physDevProps);
207 return true;
208 }
209
CreateDevice(bool isProtected)210 bool RsVulkanInterface::CreateDevice(bool isProtected)
211 {
212 if (!physicalDevice_) {
213 return false;
214 }
215 uint32_t queueCount;
216 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, nullptr);
217
218 std::vector<VkQueueFamilyProperties> queueProps(queueCount);
219 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, queueProps.data());
220
221 for (uint32_t i = 0; i < queueCount; i++) {
222 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
223 graphicsQueueFamilyIndex_ = i;
224 break;
225 }
226 }
227
228 if (graphicsQueueFamilyIndex_ == UINT32_MAX) {
229 ROSEN_LOGE("graphicsQueueFamilyIndex_ is not valid");
230 return false;
231 }
232 // The priority of the queue under the same device is determined
233 // when it is greater than 0.5 indicates high priority and less than 0.5 indicates low priority
234 const float priorities[2] = {1.0f, 0.2f};
235 VkDeviceQueueCreateFlags deviceQueueCreateFlags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
236 std::vector<VkDeviceQueueCreateInfo> queueCreate {{
237 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = nullptr,
238 .flags = deviceQueueCreateFlags, .queueFamilyIndex = graphicsQueueFamilyIndex_, .queueCount = 2,
239 .pQueuePriorities = priorities,
240 }};
241 ycbcrFeature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES,
242 ycbcrFeature_.pNext = nullptr;
243 physicalDeviceFeatures2_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
244 physicalDeviceFeatures2_.pNext = &ycbcrFeature_;
245 void** tailPnext = &ycbcrFeature_.pNext;
246 protectedMemoryFeatures_ = new VkPhysicalDeviceProtectedMemoryFeatures;
247 if (isProtected) {
248 protectedMemoryFeatures_->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
249 protectedMemoryFeatures_->pNext = nullptr;
250 *tailPnext = protectedMemoryFeatures_;
251 tailPnext = &protectedMemoryFeatures_->pNext;
252 }
253
254 vkGetPhysicalDeviceFeatures2(physicalDevice_, &physicalDeviceFeatures2_);
255
256 const VkDeviceCreateInfo createInfo = {
257 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = &physicalDeviceFeatures2_,
258 .flags = 0, .queueCreateInfoCount = queueCreate.size(), .pQueueCreateInfos = queueCreate.data(),
259 .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr,
260 .enabledExtensionCount = static_cast<uint32_t>(gDeviceExtensions.size()),
261 .ppEnabledExtensionNames = gDeviceExtensions.data(), .pEnabledFeatures = nullptr,
262 };
263 if (vkCreateDevice(physicalDevice_, &createInfo, nullptr, &device_) != VK_SUCCESS) {
264 ROSEN_LOGE("vkCreateDevice failed");
265 return false;
266 }
267 #ifdef RS_ENABLE_VKQUEUE_PRIORITY
268 if (createInfo.pQueueCreateInfos != nullptr) {
269 RS_LOGI("%{public}s queue priority[%{public}f], hardware queue priority[%{public}f]",
270 __func__, createInfo.pQueueCreateInfos->pQueuePriorities[0],
271 createInfo.pQueueCreateInfos->pQueuePriorities[1]);
272 }
273 #endif
274 if (!SetupDeviceProcAddresses(device_)) {
275 return false;
276 }
277
278 const VkDeviceQueueInfo2 deviceQueueInfo2 = {VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, nullptr,
279 deviceQueueCreateFlags, static_cast<uint32_t>(graphicsQueueFamilyIndex_), 0};
280 const VkDeviceQueueInfo2 deviceQueueInfo2HardW = {VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, nullptr,
281 deviceQueueCreateFlags, static_cast<uint32_t>(graphicsQueueFamilyIndex_), 1};
282 vkGetDeviceQueue2(device_, &deviceQueueInfo2, &queue_);
283 vkGetDeviceQueue2(device_, &deviceQueueInfo2HardW, &hardwareQueue_);
284 return true;
285 }
286
CreateSkiaBackendContext(GrVkBackendContext * context,bool createNew,bool isProtected)287 bool RsVulkanInterface::CreateSkiaBackendContext(GrVkBackendContext* context, bool createNew, bool isProtected)
288 {
289 auto getProc = CreateSkiaGetProc();
290 if (getProc == nullptr) {
291 ROSEN_LOGE("CreateSkiaBackendContext getProc is null");
292 return false;
293 }
294
295 VkPhysicalDeviceFeatures features;
296 vkGetPhysicalDeviceFeatures(physicalDevice_, &features);
297
298 uint32_t fFeatures = 0;
299 if (features.geometryShader) {
300 fFeatures |= kGeometryShader_GrVkFeatureFlag;
301 }
302 if (features.dualSrcBlend) {
303 fFeatures |= kDualSrcBlend_GrVkFeatureFlag;
304 }
305 if (features.sampleRateShading) {
306 fFeatures |= kSampleRateShading_GrVkFeatureFlag;
307 }
308
309 context->fInstance = instance_;
310 context->fPhysicalDevice = physicalDevice_;
311 context->fDevice = device_;
312 if (createNew) {
313 context->fQueue = hardwareQueue_;
314 } else {
315 context->fQueue = queue_;
316 }
317 context->fGraphicsQueueIndex = graphicsQueueFamilyIndex_;
318 context->fMinAPIVersion = VK_API_VERSION_1_2;
319
320 uint32_t extensionFlags = kKHR_surface_GrVkExtensionFlag;
321 extensionFlags |= kKHR_ohos_surface_GrVkExtensionFlag;
322
323 context->fExtensions = extensionFlags;
324
325 skVkExtensions_.init(getProc, instance_, physicalDevice_,
326 gInstanceExtensions.size(), gInstanceExtensions.data(),
327 gDeviceExtensions.size(), gDeviceExtensions.data());
328
329 context->fVkExtensions = &skVkExtensions_;
330 context->fDeviceFeatures2 = &physicalDeviceFeatures2_;
331 context->fFeatures = fFeatures;
332 context->fGetProc = std::move(getProc);
333 context->fOwnsInstanceAndDevice = false;
334 context->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
335
336 return true;
337 }
338
SetupDeviceProcAddresses(VkDevice device)339 bool RsVulkanInterface::SetupDeviceProcAddresses(VkDevice device)
340 {
341 ACQUIRE_PROC(AllocateCommandBuffers, device_);
342 ACQUIRE_PROC(AllocateMemory, device_);
343 ACQUIRE_PROC(BeginCommandBuffer, device_);
344 ACQUIRE_PROC(BindImageMemory, device_);
345 ACQUIRE_PROC(BindImageMemory2, device_);
346 ACQUIRE_PROC(CmdPipelineBarrier, device_);
347 ACQUIRE_PROC(CreateCommandPool, device_);
348 ACQUIRE_PROC(CreateFence, device_);
349 ACQUIRE_PROC(CreateImage, device_);
350 ACQUIRE_PROC(CreateImageView, device_);
351 ACQUIRE_PROC(CreateSemaphore, device_);
352 ACQUIRE_PROC(DestroyCommandPool, device_);
353 ACQUIRE_PROC(DestroyFence, device_);
354 ACQUIRE_PROC(DestroyImage, device_);
355 ACQUIRE_PROC(DestroyImageView, device_);
356 ACQUIRE_PROC(DestroySemaphore, device_);
357 ACQUIRE_PROC(DeviceWaitIdle, device_);
358 ACQUIRE_PROC(EndCommandBuffer, device_);
359 ACQUIRE_PROC(FreeCommandBuffers, device_);
360 ACQUIRE_PROC(FreeMemory, device_);
361 ACQUIRE_PROC(GetDeviceQueue, device_);
362 ACQUIRE_PROC(GetImageMemoryRequirements, device_);
363 ACQUIRE_PROC(QueueSubmit, device_);
364 ACQUIRE_PROC(QueueWaitIdle, device_);
365 ACQUIRE_PROC(ResetCommandBuffer, device_);
366 ACQUIRE_PROC(ResetFences, device_);
367 ACQUIRE_PROC(WaitForFences, device_);
368 ACQUIRE_PROC(GetNativeBufferPropertiesOHOS, device_);
369 ACQUIRE_PROC(QueueSignalReleaseImageOHOS, device_);
370 ACQUIRE_PROC(ImportSemaphoreFdKHR, device_);
371 ACQUIRE_PROC(SetFreqAdjustEnable, device_);
372
373 return true;
374 }
375
OpenLibraryHandle()376 bool RsVulkanInterface::OpenLibraryHandle()
377 {
378 ROSEN_LOGI("VulkanProcTable OpenLibararyHandle: dlopen libvulkan.so.");
379 dlerror();
380 handle_ = dlopen("/system/lib64/platformsdk/libvulkan.so", RTLD_NOW | RTLD_LOCAL);
381 if (handle_ == nullptr) {
382 ROSEN_LOGE("Could not open the vulkan library: %{public}s", dlerror());
383 return false;
384 }
385 return true;
386 }
387
CloseLibraryHandle()388 bool RsVulkanInterface::CloseLibraryHandle()
389 {
390 if (handle_ != nullptr) {
391 dlerror();
392 if (dlclose(handle_) != 0) {
393 ROSEN_LOGE("Could not close the vulkan lib handle. This indicates a leak. %{public}s", dlerror());
394 }
395 handle_ = nullptr;
396 }
397 return handle_ == nullptr;
398 }
399
AcquireProc(const char * procName,const VkInstance & instance) const400 PFN_vkVoidFunction RsVulkanInterface::AcquireProc(
401 const char* procName,
402 const VkInstance& instance) const
403 {
404 if (procName == nullptr || !vkGetInstanceProcAddr) {
405 return nullptr;
406 }
407
408 return vkGetInstanceProcAddr(instance, procName);
409 }
410
AcquireProc(const char * procName,const VkDevice & device) const411 PFN_vkVoidFunction RsVulkanInterface::AcquireProc(
412 const char* procName,
413 const VkDevice& device) const
414 {
415 if (procName == nullptr || !device || !vkGetInstanceProcAddr) {
416 return nullptr;
417 }
418 return vkGetDeviceProcAddr(device, procName);
419 }
420
CreateSkiaGetProc() const421 GrVkGetProc RsVulkanInterface::CreateSkiaGetProc() const
422 {
423 if (!IsValid()) {
424 return nullptr;
425 }
426
427 return [this](const char* procName, VkInstance instance, VkDevice device) {
428 if (device != VK_NULL_HANDLE) {
429 std::string_view s{procName};
430 if (s.find("vkQueueSubmit") == 0) {
431 return (PFN_vkVoidFunction)RsVulkanContext::HookedVkQueueSubmit;
432 }
433 auto result = AcquireProc(procName, device);
434 if (result != nullptr) {
435 return result;
436 }
437 }
438 return AcquireProc(procName, instance);
439 };
440 }
441
CreateDrawingContext(bool independentContext,bool isProtected)442 std::shared_ptr<Drawing::GPUContext> RsVulkanInterface::CreateDrawingContext(bool independentContext, bool isProtected)
443 {
444 std::unique_lock<std::mutex> lock(vkMutex_);
445 if (independentContext) {
446 return CreateNewDrawingContext(isProtected);
447 }
448
449 auto drawingContext = std::make_shared<Drawing::GPUContext>();
450 Drawing::GPUContextOptions options;
451 memHandler_ = std::make_shared<MemoryHandler>();
452 std::string vkVersion = std::to_string(VK_API_VERSION_1_2);
453 auto size = vkVersion.size();
454 memHandler_->ConfigureContext(&options, vkVersion.c_str(), size);
455 drawingContext->BuildFromVK(backendContext_, options);
456 int maxResources = 0;
457 size_t maxResourcesSize = 0;
458 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
459 drawingContext->GetResourceCacheLimits(&maxResources, &maxResourcesSize);
460 if (maxResourcesSize > 0) {
461 drawingContext->SetResourceCacheLimits(cacheLimitsTimes * maxResources,
462 cacheLimitsTimes * std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
463 } else {
464 drawingContext->SetResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
465 }
466 return drawingContext;
467 }
468
469
DestroyAllSemaphoreFence()470 void RsVulkanInterface::DestroyAllSemaphoreFence()
471 {
472 std::lock_guard<std::mutex> lock(semaphoreLock_);
473 RS_LOGE("Device lost clear all semaphore fences, count [%{public}zu] ", usedSemaphoreFenceList_.size());
474 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
475 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
476 }
477 usedSemaphoreFenceList_.clear();
478 }
479
RequireSemaphore()480 VkSemaphore RsVulkanInterface::RequireSemaphore()
481 {
482 {
483 std::lock_guard<std::mutex> lock(semaphoreLock_);
484 // 3000 means too many used semaphore fences
485 if (usedSemaphoreFenceList_.size() >= 3000) {
486 RS_LOGE("Too many used semaphore fences, count [%{public}zu] ", usedSemaphoreFenceList_.size());
487 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
488 if (semaphoreFence.fence != nullptr) {
489 semaphoreFence.fence->Wait(-1);
490 }
491 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
492 }
493 usedSemaphoreFenceList_.clear();
494 }
495 for (auto it = usedSemaphoreFenceList_.begin(); it != usedSemaphoreFenceList_.end();) {
496 auto& fence = it->fence;
497 if (fence == nullptr || fence->GetStatus() == FenceStatus::SIGNALED) {
498 vkDestroySemaphore(device_, it->semaphore, nullptr);
499 it->semaphore = VK_NULL_HANDLE;
500 it = usedSemaphoreFenceList_.erase(it);
501 } else {
502 it++;
503 }
504 }
505 }
506
507 VkSemaphoreCreateInfo semaphoreInfo;
508 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
509 semaphoreInfo.pNext = nullptr;
510 semaphoreInfo.flags = 0;
511 VkSemaphore semaphore;
512 auto err = vkCreateSemaphore(device_, &semaphoreInfo, nullptr, &semaphore);
513 if (err != VK_SUCCESS) {
514 return VK_NULL_HANDLE;
515 }
516 return semaphore;
517 }
518
SendSemaphoreWithFd(VkSemaphore semaphore,int fenceFd)519 void RsVulkanInterface::SendSemaphoreWithFd(VkSemaphore semaphore, int fenceFd)
520 {
521 std::lock_guard<std::mutex> lock(semaphoreLock_);
522 auto& semaphoreFence = usedSemaphoreFenceList_.emplace_back();
523 semaphoreFence.semaphore = semaphore;
524 semaphoreFence.fence = (fenceFd != -1 ? std::make_unique<SyncFence>(fenceFd) : nullptr);
525 }
526
CreateNewDrawingContext(bool isProtected)527 std::shared_ptr<Drawing::GPUContext> RsVulkanInterface::CreateNewDrawingContext(bool isProtected)
528 {
529 if (hcontext_ != nullptr) {
530 return hcontext_;
531 }
532 if (!RSSystemProperties::GetVkQueueDividedEnable()) {
533 CreateSkiaBackendContext(&hbackendContext_, true, isProtected);
534 }
535 auto drawingContext = std::make_shared<Drawing::GPUContext>();
536 Drawing::GPUContextOptions options;
537 memHandler_ = std::make_shared<MemoryHandler>();
538 std::string vkVersion = std::to_string(VK_API_VERSION_1_2);
539 auto size = vkVersion.size();
540 memHandler_->ConfigureContext(&options, vkVersion.c_str(), size);
541 drawingContext->BuildFromVK(hbackendContext_, options);
542 int maxResources = 0;
543 size_t maxResourcesSize = 0;
544 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
545 drawingContext->GetResourceCacheLimits(&maxResources, &maxResourcesSize);
546 if (maxResourcesSize > 0) {
547 drawingContext->SetResourceCacheLimits(cacheLimitsTimes * maxResources, cacheLimitsTimes *
548 std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
549 } else {
550 drawingContext->SetResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
551 }
552 hcontext_ = drawingContext;
553 return drawingContext;
554 }
555
RsVulkanContext()556 RsVulkanContext::RsVulkanContext()
557 {
558 rsVulkanInterface.Init();
559 // Init drawingContext_ bind to backendContext
560 drawingContext_ = rsVulkanInterface.CreateDrawingContext();
561 isProtected_ = true;
562 rsProtectedVulkanInterface.Init(isProtected_);
563 // Init protectedDrawingContext_ bind to hbackendContext
564 protectedDrawingContext_ = rsProtectedVulkanInterface.CreateDrawingContext(
565 RSSystemProperties::GetVkQueueDividedEnable(), true);
566 isProtected_ = false;
567 }
568
GetSingleton()569 RsVulkanContext& RsVulkanContext::GetSingleton()
570 {
571 static RsVulkanContext singleton {};
572 return singleton;
573 }
574
GetRsVulkanInterface()575 RsVulkanInterface& RsVulkanContext::GetRsVulkanInterface()
576 {
577 return GetRsVulkanInterfaceInternal(isProtected_);
578 }
579
HookedVkQueueSubmit(VkQueue queue,uint32_t submitCount,VkSubmitInfo * pSubmits,VkFence fence)580 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSubmit(VkQueue queue, uint32_t submitCount,
581 VkSubmitInfo* pSubmits, VkFence fence)
582 {
583 bool isProtected = RsVulkanContext::GetSingleton().GetIsProtected();
584 VkProtectedSubmitInfo protectedSubmitInfo;
585 if (isProtected) {
586 memset_s(&protectedSubmitInfo, sizeof(VkProtectedSubmitInfo), 0, sizeof(VkProtectedSubmitInfo));
587 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
588 protectedSubmitInfo.pNext = nullptr;
589 protectedSubmitInfo.protectedSubmit = VK_TRUE;
590 pSubmits->pNext = &protectedSubmitInfo;
591 }
592
593 RsVulkanInterface& vkInterface = RsVulkanContext::GetSingleton().GetRsVulkanInterface();
594 if (queue == vkInterface.GetHardwareQueue()) {
595 std::lock_guard<std::mutex> lock(vkInterface.hGraphicsQueueMutex_);
596 RS_LOGD("%{public}s hardware queue", __func__);
597 RS_OPTIONAL_TRACE_NAME_FMT("%s hardware queue", __func__);
598 return vkInterface.vkQueueSubmit(queue, submitCount, pSubmits, fence);
599 }
600 std::lock_guard<std::mutex> lock(vkInterface.graphicsQueueMutex_);
601 RS_LOGD("%{public}s queue", __func__);
602 RS_OPTIONAL_TRACE_NAME_FMT("%s queue", __func__);
603 return vkInterface.vkQueueSubmit(queue, submitCount, pSubmits, fence);
604 }
605
HookedVkQueueSignalReleaseImageOHOS(VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int32_t * pNativeFenceFd)606 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSignalReleaseImageOHOS(VkQueue queue, uint32_t waitSemaphoreCount,
607 const VkSemaphore* pWaitSemaphores, VkImage image, int32_t* pNativeFenceFd)
608 {
609 RsVulkanInterface& vkInterface = RsVulkanContext::GetSingleton().GetRsVulkanInterface();
610 if (queue == vkInterface.GetHardwareQueue()) {
611 std::lock_guard<std::mutex> lock(vkInterface.hGraphicsQueueMutex_);
612 RS_LOGD("%{public}s hardware queue", __func__);
613 RS_OPTIONAL_TRACE_NAME_FMT("%s hardware queue", __func__);
614 return vkInterface.vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount,
615 pWaitSemaphores, image, pNativeFenceFd);
616 }
617 std::lock_guard<std::mutex> lock(vkInterface.graphicsQueueMutex_);
618 RS_LOGD("%{public}s queue", __func__);
619 RS_OPTIONAL_TRACE_NAME_FMT("%s queue", __func__);
620 return vkInterface.vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
621 }
622
CreateDrawingContext(bool independentContext)623 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::CreateDrawingContext(bool independentContext)
624 {
625 auto& drawingContext = isProtected_ ? protectedDrawingContext_ : drawingContext_;
626 if (drawingContext != nullptr && !independentContext) {
627 return drawingContext;
628 }
629 drawingContext = GetRsVulkanInterface().CreateDrawingContext(independentContext, isProtected_);
630 return drawingContext;
631 }
632
GetDrawingContext()633 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::GetDrawingContext()
634 {
635 auto& drawingContext = isProtected_ ? protectedDrawingContext_ : drawingContext_;
636 if (drawingContext != nullptr) {
637 return drawingContext;
638 }
639 drawingContext = GetRsVulkanInterface().CreateDrawingContext(false, isProtected_);
640 return drawingContext;
641 }
642
SetIsProtected(bool isProtected)643 void RsVulkanContext::SetIsProtected(bool isProtected)
644 {
645 if (isProtected_ != isProtected) {
646 ClearGrContext(isProtected);
647 }
648 }
649
ClearGrContext(bool isProtected)650 void RsVulkanContext::ClearGrContext(bool isProtected)
651 {
652 GetDrawingContext()->PurgeUnlockedResources(true);
653 isProtected_ = isProtected;
654 GetDrawingContext()->ResetContext();
655 }
656 }
657 }
658