1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "VulkanManager.h"
18 
19 #include <EGL/egl.h>
20 #include <EGL/eglext.h>
21 #include <GrBackendSemaphore.h>
22 #include <GrBackendSurface.h>
23 #include <GrDirectContext.h>
24 #include <GrTypes.h>
25 #include <android/sync.h>
26 #include <gui/TraceUtils.h>
27 #include <ui/FatVector.h>
28 #include <vk/GrVkExtensions.h>
29 #include <vk/GrVkTypes.h>
30 
31 #include "Properties.h"
32 #include "RenderThread.h"
33 #include "pipeline/skia/ShaderCache.h"
34 #include "renderstate/RenderState.h"
35 
36 #undef LOG_TAG
37 #define LOG_TAG "VulkanManager"
38 
39 namespace android {
40 namespace uirenderer {
41 namespace renderthread {
42 
43 static std::array<std::string_view, 20> sEnableExtensions{
44         VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
45         VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
46         VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
47         VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
48         VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
49         VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
50         VK_KHR_MAINTENANCE1_EXTENSION_NAME,
51         VK_KHR_MAINTENANCE2_EXTENSION_NAME,
52         VK_KHR_MAINTENANCE3_EXTENSION_NAME,
53         VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
54         VK_KHR_SURFACE_EXTENSION_NAME,
55         VK_KHR_SWAPCHAIN_EXTENSION_NAME,
56         VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME,
57         VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
58         VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
59         VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
60         VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
61         VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
62         VK_KHR_ANDROID_SURFACE_EXTENSION_NAME,
63         VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME,
64 };
65 
shouldEnableExtension(const std::string_view & extension)66 static bool shouldEnableExtension(const std::string_view& extension) {
67     for (const auto& it : sEnableExtensions) {
68         if (it == extension) {
69             return true;
70         }
71     }
72     return false;
73 }
74 
free_features_extensions_structs(const VkPhysicalDeviceFeatures2 & features)75 static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
76     // All Vulkan structs that could be part of the features chain will start with the
77     // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
78     // so we can get access to the pNext for the next struct.
79     struct CommonVulkanHeader {
80         VkStructureType sType;
81         void* pNext;
82     };
83 
84     void* pNext = features.pNext;
85     while (pNext) {
86         void* current = pNext;
87         pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
88         free(current);
89     }
90 }
91 
92 #define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
93 #define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
94 #define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
95 
96 // cache a weakptr to the context to enable a second thread to share the same vulkan state
97 static wp<VulkanManager> sWeakInstance = nullptr;
98 static std::mutex sLock;
99 
getInstance()100 sp<VulkanManager> VulkanManager::getInstance() {
101     std::lock_guard _lock{sLock};
102     sp<VulkanManager> vulkanManager = sWeakInstance.promote();
103     if (!vulkanManager.get()) {
104         vulkanManager = new VulkanManager();
105         sWeakInstance = vulkanManager;
106     }
107 
108     return vulkanManager;
109 }
110 
peekInstance()111 sp<VulkanManager> VulkanManager::peekInstance() {
112     std::lock_guard _lock{sLock};
113     return sWeakInstance.promote();
114 }
115 
~VulkanManager()116 VulkanManager::~VulkanManager() {
117     if (mDevice != VK_NULL_HANDLE) {
118         mDeviceWaitIdle(mDevice);
119         mDestroyDevice(mDevice, nullptr);
120     }
121 
122     if (mInstance != VK_NULL_HANDLE) {
123         mDestroyInstance(mInstance, nullptr);
124     }
125 
126     mGraphicsQueue = VK_NULL_HANDLE;
127     mAHBUploadQueue = VK_NULL_HANDLE;
128     mDevice = VK_NULL_HANDLE;
129     mPhysicalDevice = VK_NULL_HANDLE;
130     mInstance = VK_NULL_HANDLE;
131     mInstanceExtensionsOwner.clear();
132     mInstanceExtensions.clear();
133     mDeviceExtensionsOwner.clear();
134     mDeviceExtensions.clear();
135     free_features_extensions_structs(mPhysicalDeviceFeatures2);
136     mPhysicalDeviceFeatures2 = {};
137 }
138 
setupDevice(GrVkExtensions & grExtensions,VkPhysicalDeviceFeatures2 & features)139 void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
140     VkResult err;
141 
142     constexpr VkApplicationInfo app_info = {
143             VK_STRUCTURE_TYPE_APPLICATION_INFO,  // sType
144             nullptr,                             // pNext
145             "android framework",                 // pApplicationName
146             0,                                   // applicationVersion
147             "android framework",                 // pEngineName
148             0,                                   // engineVerison
149             mAPIVersion,                         // apiVersion
150     };
151 
152     {
153         GET_PROC(EnumerateInstanceExtensionProperties);
154 
155         uint32_t extensionCount = 0;
156         err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
157         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
158         mInstanceExtensionsOwner.resize(extensionCount);
159         err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
160                                                     mInstanceExtensionsOwner.data());
161         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
162         bool hasKHRSurfaceExtension = false;
163         bool hasKHRAndroidSurfaceExtension = false;
164         for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
165             if (!shouldEnableExtension(extension.extensionName)) {
166                 ALOGV("Not enabling instance extension %s", extension.extensionName);
167                 continue;
168             }
169             ALOGV("Enabling instance extension %s", extension.extensionName);
170             mInstanceExtensions.push_back(extension.extensionName);
171             if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
172                 hasKHRSurfaceExtension = true;
173             }
174             if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
175                 hasKHRAndroidSurfaceExtension = true;
176             }
177         }
178         LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
179     }
180 
181     const VkInstanceCreateInfo instance_create = {
182             VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,  // sType
183             nullptr,                                 // pNext
184             0,                                       // flags
185             &app_info,                               // pApplicationInfo
186             0,                                       // enabledLayerNameCount
187             nullptr,                                 // ppEnabledLayerNames
188             (uint32_t)mInstanceExtensions.size(),    // enabledExtensionNameCount
189             mInstanceExtensions.data(),              // ppEnabledExtensionNames
190     };
191 
192     GET_PROC(CreateInstance);
193     err = mCreateInstance(&instance_create, nullptr, &mInstance);
194     LOG_ALWAYS_FATAL_IF(err < 0);
195 
196     GET_INST_PROC(CreateDevice);
197     GET_INST_PROC(DestroyInstance);
198     GET_INST_PROC(EnumerateDeviceExtensionProperties);
199     GET_INST_PROC(EnumeratePhysicalDevices);
200     GET_INST_PROC(GetPhysicalDeviceFeatures2);
201     GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
202     GET_INST_PROC(GetPhysicalDeviceProperties);
203     GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
204 
205     uint32_t gpuCount;
206     LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
207     LOG_ALWAYS_FATAL_IF(!gpuCount);
208     // Just returning the first physical device instead of getting the whole array. Since there
209     // should only be one device on android.
210     gpuCount = 1;
211     err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
212     // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
213     LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
214 
215     VkPhysicalDeviceProperties physDeviceProperties;
216     mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
217     LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
218     mDriverVersion = physDeviceProperties.driverVersion;
219 
220     // query to get the initial queue props size
221     uint32_t queueCount = 0;
222     mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
223     LOG_ALWAYS_FATAL_IF(!queueCount);
224 
225     // now get the actual queue props
226     std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
227     mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
228 
229     constexpr auto kRequestedQueueCount = 2;
230 
231     // iterate to find the graphics queue
232     mGraphicsQueueIndex = queueCount;
233     for (uint32_t i = 0; i < queueCount; i++) {
234         if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
235             mGraphicsQueueIndex = i;
236             LOG_ALWAYS_FATAL_IF(queueProps[i].queueCount < kRequestedQueueCount);
237             break;
238         }
239     }
240     LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
241 
242     {
243         uint32_t extensionCount = 0;
244         err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
245                                                   nullptr);
246         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
247         mDeviceExtensionsOwner.resize(extensionCount);
248         err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
249                                                   mDeviceExtensionsOwner.data());
250         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
251         bool hasKHRSwapchainExtension = false;
252         for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
253             if (!shouldEnableExtension(extension.extensionName)) {
254                 ALOGV("Not enabling device extension %s", extension.extensionName);
255                 continue;
256             }
257             ALOGV("Enabling device extension %s", extension.extensionName);
258             mDeviceExtensions.push_back(extension.extensionName);
259             if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
260                 hasKHRSwapchainExtension = true;
261             }
262         }
263         LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
264     }
265 
266     auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
267         if (device != VK_NULL_HANDLE) {
268             return vkGetDeviceProcAddr(device, proc_name);
269         }
270         return vkGetInstanceProcAddr(instance, proc_name);
271     };
272 
273     grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
274                       mInstanceExtensions.data(), mDeviceExtensions.size(),
275                       mDeviceExtensions.data());
276 
277     LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
278 
279     memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
280     features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
281     features.pNext = nullptr;
282 
283     // Setup all extension feature structs we may want to use.
284     void** tailPNext = &features.pNext;
285 
286     if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
287         VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
288         blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
289                 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
290         LOG_ALWAYS_FATAL_IF(!blend);
291         blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
292         blend->pNext = nullptr;
293         *tailPNext = blend;
294         tailPNext = &blend->pNext;
295     }
296 
297     VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
298     ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
299             sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
300     LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
301     ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
302     ycbcrFeature->pNext = nullptr;
303     *tailPNext = ycbcrFeature;
304     tailPNext = &ycbcrFeature->pNext;
305 
306     // query to get the physical device features
307     mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
308     // this looks like it would slow things down,
309     // and we can't depend on it on all platforms
310     features.features.robustBufferAccess = VK_FALSE;
311 
312     float queuePriorities[kRequestedQueueCount] = {0.0};
313 
314     void* queueNextPtr = nullptr;
315 
316     VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
317 
318     if (Properties::contextPriority != 0 &&
319         grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
320         memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
321         queuePriorityCreateInfo.sType =
322                 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
323         queuePriorityCreateInfo.pNext = nullptr;
324         switch (Properties::contextPriority) {
325             case EGL_CONTEXT_PRIORITY_LOW_IMG:
326                 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
327                 break;
328             case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
329                 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
330                 break;
331             case EGL_CONTEXT_PRIORITY_HIGH_IMG:
332                 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
333                 break;
334             default:
335                 LOG_ALWAYS_FATAL("Unsupported context priority");
336         }
337         queueNextPtr = &queuePriorityCreateInfo;
338     }
339 
340     const VkDeviceQueueCreateInfo queueInfo = {
341             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,  // sType
342             queueNextPtr,                                // pNext
343             0,                                           // VkDeviceQueueCreateFlags
344             mGraphicsQueueIndex,                         // queueFamilyIndex
345             kRequestedQueueCount,                        // queueCount
346             queuePriorities,                             // pQueuePriorities
347     };
348 
349     const VkDeviceCreateInfo deviceInfo = {
350             VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,  // sType
351             &features,                             // pNext
352             0,                                     // VkDeviceCreateFlags
353             1,                                     // queueCreateInfoCount
354             &queueInfo,                            // pQueueCreateInfos
355             0,                                     // layerCount
356             nullptr,                               // ppEnabledLayerNames
357             (uint32_t)mDeviceExtensions.size(),    // extensionCount
358             mDeviceExtensions.data(),              // ppEnabledExtensionNames
359             nullptr,                               // ppEnabledFeatures
360     };
361 
362     LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
363 
364     GET_DEV_PROC(AllocateCommandBuffers);
365     GET_DEV_PROC(BeginCommandBuffer);
366     GET_DEV_PROC(CmdPipelineBarrier);
367     GET_DEV_PROC(CreateCommandPool);
368     GET_DEV_PROC(CreateFence);
369     GET_DEV_PROC(CreateSemaphore);
370     GET_DEV_PROC(DestroyCommandPool);
371     GET_DEV_PROC(DestroyDevice);
372     GET_DEV_PROC(DestroyFence);
373     GET_DEV_PROC(DestroySemaphore);
374     GET_DEV_PROC(DeviceWaitIdle);
375     GET_DEV_PROC(EndCommandBuffer);
376     GET_DEV_PROC(FreeCommandBuffers);
377     GET_DEV_PROC(GetDeviceQueue);
378     GET_DEV_PROC(GetSemaphoreFdKHR);
379     GET_DEV_PROC(ImportSemaphoreFdKHR);
380     GET_DEV_PROC(QueueSubmit);
381     GET_DEV_PROC(QueueWaitIdle);
382     GET_DEV_PROC(ResetCommandBuffer);
383     GET_DEV_PROC(ResetFences);
384     GET_DEV_PROC(WaitForFences);
385     GET_DEV_PROC(FrameBoundaryANDROID);
386 }
387 
initialize()388 void VulkanManager::initialize() {
389     std::lock_guard _lock{mInitializeLock};
390 
391     if (mDevice != VK_NULL_HANDLE) {
392         return;
393     }
394 
395     GET_PROC(EnumerateInstanceVersion);
396     uint32_t instanceVersion;
397     LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
398     LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
399 
400     this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
401 
402     mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
403     mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 1, &mAHBUploadQueue);
404 
405     if (Properties::enablePartialUpdates && Properties::useBufferAge) {
406         mSwapBehavior = SwapBehavior::BufferAge;
407     }
408 }
409 
onGrContextReleased(void * context)410 static void onGrContextReleased(void* context) {
411     VulkanManager* manager = (VulkanManager*)context;
412     manager->decStrong((void*)onGrContextReleased);
413 }
414 
createContext(GrContextOptions & options,ContextType contextType)415 sk_sp<GrDirectContext> VulkanManager::createContext(GrContextOptions& options,
416                                                     ContextType contextType) {
417     auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
418         if (device != VK_NULL_HANDLE) {
419             return vkGetDeviceProcAddr(device, proc_name);
420         }
421         return vkGetInstanceProcAddr(instance, proc_name);
422     };
423 
424     GrVkBackendContext backendContext;
425     backendContext.fInstance = mInstance;
426     backendContext.fPhysicalDevice = mPhysicalDevice;
427     backendContext.fDevice = mDevice;
428     backendContext.fQueue =
429             (contextType == ContextType::kRenderThread) ? mGraphicsQueue : mAHBUploadQueue;
430     backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
431     backendContext.fMaxAPIVersion = mAPIVersion;
432     backendContext.fVkExtensions = &mExtensions;
433     backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
434     backendContext.fGetProc = std::move(getProc);
435 
436     LOG_ALWAYS_FATAL_IF(options.fContextDeleteProc != nullptr, "Conflicting fContextDeleteProcs!");
437     this->incStrong((void*)onGrContextReleased);
438     options.fContextDeleteContext = this;
439     options.fContextDeleteProc = onGrContextReleased;
440 
441     return GrDirectContext::MakeVulkan(backendContext, options);
442 }
443 
getVkFunctorInitParams() const444 VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
445     return VkFunctorInitParams{
446             .instance = mInstance,
447             .physical_device = mPhysicalDevice,
448             .device = mDevice,
449             .queue = mGraphicsQueue,
450             .graphics_queue_index = mGraphicsQueueIndex,
451             .api_version = mAPIVersion,
452             .enabled_instance_extension_names = mInstanceExtensions.data(),
453             .enabled_instance_extension_names_length =
454                     static_cast<uint32_t>(mInstanceExtensions.size()),
455             .enabled_device_extension_names = mDeviceExtensions.data(),
456             .enabled_device_extension_names_length =
457                     static_cast<uint32_t>(mDeviceExtensions.size()),
458             .device_features_2 = &mPhysicalDeviceFeatures2,
459     };
460 }
461 
dequeueNextBuffer(VulkanSurface * surface)462 Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
463     VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
464 
465     if (bufferInfo == nullptr) {
466         ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
467         return Frame(-1, -1, 0);
468     }
469 
470     LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
471 
472     if (bufferInfo->dequeue_fence != -1) {
473         struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
474         bool isSignalPending = false;
475         if (finfo != NULL) {
476             isSignalPending = finfo->status != 1;
477             sync_file_info_free(finfo);
478         }
479         if (isSignalPending) {
480             int fence_clone = dup(bufferInfo->dequeue_fence);
481             if (fence_clone == -1) {
482                 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
483                       errno);
484                 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
485             } else {
486                 VkSemaphoreCreateInfo semaphoreInfo;
487                 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
488                 semaphoreInfo.pNext = nullptr;
489                 semaphoreInfo.flags = 0;
490                 VkSemaphore semaphore;
491                 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
492                 if (err != VK_SUCCESS) {
493                     ALOGE("Failed to create import semaphore, err: %d", err);
494                     close(fence_clone);
495                     sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
496                 } else {
497                     VkImportSemaphoreFdInfoKHR importInfo;
498                     importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
499                     importInfo.pNext = nullptr;
500                     importInfo.semaphore = semaphore;
501                     importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
502                     importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
503                     importInfo.fd = fence_clone;
504 
505                     err = mImportSemaphoreFdKHR(mDevice, &importInfo);
506                     if (err != VK_SUCCESS) {
507                         ALOGE("Failed to import semaphore, err: %d", err);
508                         mDestroySemaphore(mDevice, semaphore, nullptr);
509                         close(fence_clone);
510                         sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
511                     } else {
512                         GrBackendSemaphore backendSemaphore;
513                         backendSemaphore.initVulkan(semaphore);
514                         // Skia will take ownership of the VkSemaphore and delete it once the wait
515                         // has finished. The VkSemaphore also owns the imported fd, so it will
516                         // close the fd when it is deleted.
517                         bufferInfo->skSurface->wait(1, &backendSemaphore);
518                         // The following flush blocks the GPU immediately instead of waiting for
519                         // other drawing ops. It seems dequeue_fence is not respected otherwise.
520                         // TODO: remove the flush after finding why backendSemaphore is not working.
521                         bufferInfo->skSurface->flushAndSubmit();
522                     }
523                 }
524             }
525         }
526     }
527 
528     int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
529     return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
530 }
531 
532 struct DestroySemaphoreInfo {
533     PFN_vkDestroySemaphore mDestroyFunction;
534     VkDevice mDevice;
535     VkSemaphore mSemaphore;
536     // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
537     // (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
538     // owned by Skia and one owned by the VulkanManager. The refs are decremented each time
539     // destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
540     // done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
541     // calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
542     int mRefs = 2;
543 
DestroySemaphoreInfoandroid::uirenderer::renderthread::DestroySemaphoreInfo544     DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
545                          VkSemaphore semaphore)
546             : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
547 };
548 
destroy_semaphore(void * context)549 static void destroy_semaphore(void* context) {
550     DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
551     --info->mRefs;
552     if (!info->mRefs) {
553         info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
554         delete info;
555     }
556 }
557 
finishFrame(SkSurface * surface)558 nsecs_t VulkanManager::finishFrame(SkSurface* surface) {
559     ATRACE_NAME("Vulkan finish frame");
560     ALOGE_IF(mSwapSemaphore != VK_NULL_HANDLE || mDestroySemaphoreContext != nullptr,
561              "finishFrame already has an outstanding semaphore");
562 
563     VkExportSemaphoreCreateInfo exportInfo;
564     exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
565     exportInfo.pNext = nullptr;
566     exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
567 
568     VkSemaphoreCreateInfo semaphoreInfo;
569     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
570     semaphoreInfo.pNext = &exportInfo;
571     semaphoreInfo.flags = 0;
572     VkSemaphore semaphore;
573     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
574     ALOGE_IF(VK_SUCCESS != err, "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
575 
576     GrBackendSemaphore backendSemaphore;
577     backendSemaphore.initVulkan(semaphore);
578 
579     GrFlushInfo flushInfo;
580     if (err == VK_SUCCESS) {
581         mDestroySemaphoreContext = new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
582         flushInfo.fNumSemaphores = 1;
583         flushInfo.fSignalSemaphores = &backendSemaphore;
584         flushInfo.fFinishedProc = destroy_semaphore;
585         flushInfo.fFinishedContext = mDestroySemaphoreContext;
586     } else {
587         semaphore = VK_NULL_HANDLE;
588     }
589     GrSemaphoresSubmitted submitted =
590             surface->flush(SkSurface::BackendSurfaceAccess::kPresent, flushInfo);
591     GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
592     ALOGE_IF(!context, "Surface is not backed by gpu");
593     context->submit();
594     const nsecs_t submissionTime = systemTime();
595     if (semaphore != VK_NULL_HANDLE) {
596         if (submitted == GrSemaphoresSubmitted::kYes) {
597             mSwapSemaphore = semaphore;
598             if (mFrameBoundaryANDROID) {
599                 // retrieve VkImage used as render target
600                 VkImage image = VK_NULL_HANDLE;
601                 GrBackendRenderTarget backendRenderTarget =
602                         surface->getBackendRenderTarget(SkSurface::kFlushRead_BackendHandleAccess);
603                 if (backendRenderTarget.isValid()) {
604                     GrVkImageInfo info;
605                     if (backendRenderTarget.getVkImageInfo(&info)) {
606                         image = info.fImage;
607                     } else {
608                         ALOGE("Frame boundary: backend is not vulkan");
609                     }
610                 } else {
611                     ALOGE("Frame boundary: invalid backend render target");
612                 }
613                 // frameBoundaryANDROID needs to know about mSwapSemaphore, but
614                 // it won't wait on it.
615                 mFrameBoundaryANDROID(mDevice, mSwapSemaphore, image);
616             }
617         } else {
618             destroy_semaphore(mDestroySemaphoreContext);
619             mDestroySemaphoreContext = nullptr;
620         }
621     }
622     skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
623 
624     return submissionTime;
625 }
626 
swapBuffers(VulkanSurface * surface,const SkRect & dirtyRect)627 void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
628     if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
629         ATRACE_NAME("Finishing GPU work");
630         mDeviceWaitIdle(mDevice);
631     }
632 
633     int fenceFd = -1;
634     if (mSwapSemaphore != VK_NULL_HANDLE) {
635         VkSemaphoreGetFdInfoKHR getFdInfo;
636         getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
637         getFdInfo.pNext = nullptr;
638         getFdInfo.semaphore = mSwapSemaphore;
639         getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
640 
641         VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
642         ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
643     } else {
644         ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
645         mQueueWaitIdle(mGraphicsQueue);
646     }
647     if (mDestroySemaphoreContext) {
648         destroy_semaphore(mDestroySemaphoreContext);
649     }
650 
651     surface->presentCurrentBuffer(dirtyRect, fenceFd);
652     mSwapSemaphore = VK_NULL_HANDLE;
653     mDestroySemaphoreContext = nullptr;
654 }
655 
destroySurface(VulkanSurface * surface)656 void VulkanManager::destroySurface(VulkanSurface* surface) {
657     // Make sure all submit commands have finished before starting to destroy objects.
658     if (VK_NULL_HANDLE != mGraphicsQueue) {
659         mQueueWaitIdle(mGraphicsQueue);
660     }
661 
662     delete surface;
663 }
664 
createSurface(ANativeWindow * window,ColorMode colorMode,sk_sp<SkColorSpace> surfaceColorSpace,SkColorType surfaceColorType,GrDirectContext * grContext,uint32_t extraBuffers)665 VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
666                                             ColorMode colorMode,
667                                             sk_sp<SkColorSpace> surfaceColorSpace,
668                                             SkColorType surfaceColorType,
669                                             GrDirectContext* grContext,
670                                             uint32_t extraBuffers) {
671     LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
672     if (!window) {
673         return nullptr;
674     }
675 
676     return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
677                                  *this, extraBuffers);
678 }
679 
fenceWait(int fence,GrDirectContext * grContext)680 status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
681     if (!hasVkContext()) {
682         ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
683         return INVALID_OPERATION;
684     }
685 
686     // Block GPU on the fence.
687     int fenceFd = ::dup(fence);
688     if (fenceFd == -1) {
689         ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
690         return -errno;
691     }
692 
693     VkSemaphoreCreateInfo semaphoreInfo;
694     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
695     semaphoreInfo.pNext = nullptr;
696     semaphoreInfo.flags = 0;
697     VkSemaphore semaphore;
698     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
699     if (VK_SUCCESS != err) {
700         close(fenceFd);
701         ALOGE("Failed to create import semaphore, err: %d", err);
702         return UNKNOWN_ERROR;
703     }
704     VkImportSemaphoreFdInfoKHR importInfo;
705     importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
706     importInfo.pNext = nullptr;
707     importInfo.semaphore = semaphore;
708     importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
709     importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
710     importInfo.fd = fenceFd;
711 
712     err = mImportSemaphoreFdKHR(mDevice, &importInfo);
713     if (VK_SUCCESS != err) {
714         mDestroySemaphore(mDevice, semaphore, nullptr);
715         close(fenceFd);
716         ALOGE("Failed to import semaphore, err: %d", err);
717         return UNKNOWN_ERROR;
718     }
719 
720     GrBackendSemaphore beSemaphore;
721     beSemaphore.initVulkan(semaphore);
722 
723     // Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
724     // VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
725     grContext->wait(1, &beSemaphore);
726     grContext->flushAndSubmit();
727 
728     return OK;
729 }
730 
createReleaseFence(int * nativeFence,GrDirectContext * grContext)731 status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
732     *nativeFence = -1;
733     if (!hasVkContext()) {
734         ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
735         return INVALID_OPERATION;
736     }
737 
738     VkExportSemaphoreCreateInfo exportInfo;
739     exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
740     exportInfo.pNext = nullptr;
741     exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
742 
743     VkSemaphoreCreateInfo semaphoreInfo;
744     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
745     semaphoreInfo.pNext = &exportInfo;
746     semaphoreInfo.flags = 0;
747     VkSemaphore semaphore;
748     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
749     if (VK_SUCCESS != err) {
750         ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
751         return INVALID_OPERATION;
752     }
753 
754     GrBackendSemaphore backendSemaphore;
755     backendSemaphore.initVulkan(semaphore);
756 
757     DestroySemaphoreInfo* destroyInfo =
758             new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
759     // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
760     // which will remove its ref to the semaphore. The VulkanManager must still release its ref,
761     // when it is done with the semaphore.
762     GrFlushInfo flushInfo;
763     flushInfo.fNumSemaphores = 1;
764     flushInfo.fSignalSemaphores = &backendSemaphore;
765     flushInfo.fFinishedProc = destroy_semaphore;
766     flushInfo.fFinishedContext = destroyInfo;
767     GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
768     grContext->submit();
769 
770     if (submitted == GrSemaphoresSubmitted::kNo) {
771         ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
772         destroy_semaphore(destroyInfo);
773         return INVALID_OPERATION;
774     }
775 
776     VkSemaphoreGetFdInfoKHR getFdInfo;
777     getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
778     getFdInfo.pNext = nullptr;
779     getFdInfo.semaphore = semaphore;
780     getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
781 
782     int fenceFd = 0;
783 
784     err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
785     destroy_semaphore(destroyInfo);
786     if (VK_SUCCESS != err) {
787         ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
788         return INVALID_OPERATION;
789     }
790     *nativeFence = fenceFd;
791 
792     return OK;
793 }
794 
795 } /* namespace renderthread */
796 } /* namespace uirenderer */
797 } /* namespace android */
798