1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "node_context_pool_manager_vk.h"
17 
18 #include <cstdint>
19 #include <vulkan/vulkan_core.h>
20 
21 #include <base/containers/fixed_string.h>
22 #include <base/math/mathf.h>
23 #include <base/util/compile_time_hashes.h>
24 #include <render/device/pipeline_state_desc.h>
25 #include <render/namespace.h>
26 
27 #include "device/device.h"
28 #include "device/gpu_image.h"
29 #include "device/gpu_resource_handle_util.h"
30 #include "device/gpu_resource_manager.h"
31 #include "nodecontext/node_context_pool_manager.h"
32 #include "nodecontext/render_command_list.h"
33 #include "util/log.h"
34 #include "vulkan/device_vk.h"
35 #include "vulkan/gpu_image_vk.h"
36 #include "vulkan/gpu_resource_util_vk.h"
37 #include "vulkan/pipeline_create_functions_vk.h"
38 #include "vulkan/validate_vk.h"
39 
40 using namespace BASE_NS;
41 
42 template<>
hash(const RENDER_NS::ImageLayout & val)43 uint64_t BASE_NS::hash(const RENDER_NS::ImageLayout& val)
44 {
45     return static_cast<uint64_t>(val);
46 }
47 template<>
hash(const RENDER_NS::RenderPassSubpassDesc & subpass)48 uint64_t BASE_NS::hash(const RENDER_NS::RenderPassSubpassDesc& subpass)
49 {
50     uint64_t seed = 0;
51     HashRange(seed, subpass.inputAttachmentIndices, subpass.inputAttachmentIndices + subpass.inputAttachmentCount);
52     HashRange(seed, subpass.colorAttachmentIndices, subpass.colorAttachmentIndices + subpass.colorAttachmentCount);
53     HashRange(
54         seed, subpass.resolveAttachmentIndices, subpass.resolveAttachmentIndices + subpass.resolveAttachmentCount);
55     if (subpass.depthAttachmentCount) {
56         HashCombine(seed, static_cast<uint64_t>(subpass.depthAttachmentIndex));
57     }
58     if (subpass.viewMask > 1U) {
59         HashCombine(seed, subpass.viewMask);
60     }
61     return seed;
62 }
63 
64 RENDER_BEGIN_NAMESPACE()
65 namespace {
66 struct FBSize {
67     uint32_t width { 0 };
68     uint32_t height { 0 };
69     uint32_t layers { 1 };
70 };
71 
HashRenderPassCompatibility(uint64_t & hash,const RenderPassDesc & renderPassDesc,const LowLevelRenderPassCompatibilityDescVk & renderPassCompatibilityDesc,const RenderPassSubpassDesc & subpasses,const RenderPassAttachmentResourceStates & intputResourceStates)72 inline void HashRenderPassCompatibility(uint64_t& hash, const RenderPassDesc& renderPassDesc,
73     const LowLevelRenderPassCompatibilityDescVk& renderPassCompatibilityDesc, const RenderPassSubpassDesc& subpasses,
74     const RenderPassAttachmentResourceStates& intputResourceStates)
75 {
76     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
77         const LowLevelRenderPassCompatibilityDescVk::Attachment& atCompatibilityDesc =
78             renderPassCompatibilityDesc.attachments[idx];
79         HashCombine(hash, static_cast<uint64_t>(atCompatibilityDesc.format),
80             static_cast<uint64_t>(atCompatibilityDesc.sampleCountFlags));
81         // render pass needs have matching stage masks
82         HashCombine(hash, static_cast<uint64_t>(intputResourceStates.states[idx].pipelineStageFlags));
83         if (subpasses.viewMask > 1U) {
84             // with multi-view extension, renderpass updated for all mips
85             HashCombine(hash, (static_cast<uint64_t>(renderPassDesc.attachments[idx].layer) << 32ULL) |
86                                   (static_cast<uint64_t>(renderPassDesc.attachments[idx].mipLevel)));
87         }
88     }
89     // NOTE: subpass resources states are not hashed
90     HashRange(hash, &subpasses, &subpasses + renderPassDesc.subpassCount);
91 }
92 
HashRenderPassLayouts(uint64_t & hash,const RenderPassDesc & renderPassDesc,const RenderPassImageLayouts & renderPassImageLayouts)93 inline void HashRenderPassLayouts(
94     uint64_t& hash, const RenderPassDesc& renderPassDesc, const RenderPassImageLayouts& renderPassImageLayouts)
95 {
96     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
97         HashCombine(hash, renderPassImageLayouts.attachmentInitialLayouts[idx],
98             renderPassImageLayouts.attachmentFinalLayouts[idx]);
99     }
100 }
101 
HashFramebuffer(uint64_t & hash,const RenderPassDesc & renderPassDesc,const GpuResourceManager & gpuResourceMgr)102 inline void HashFramebuffer(
103     uint64_t& hash, const RenderPassDesc& renderPassDesc, const GpuResourceManager& gpuResourceMgr)
104 {
105     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
106         const RenderPassDesc::AttachmentDesc& atDesc = renderPassDesc.attachments[idx];
107         // with generation index
108         const EngineResourceHandle gpuHandle = gpuResourceMgr.GetGpuHandle(renderPassDesc.attachmentHandles[idx]);
109         HashCombine(hash, gpuHandle.id, static_cast<uint64_t>(atDesc.layer), static_cast<uint64_t>(atDesc.mipLevel));
110     }
111 }
112 
HashRenderPassOps(uint64_t & hash,const RenderPassDesc & renderPassDesc)113 inline void HashRenderPassOps(uint64_t& hash, const RenderPassDesc& renderPassDesc)
114 {
115     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
116         const auto& attachRef = renderPassDesc.attachments[idx];
117         const uint64_t opHash = (static_cast<uint64_t>(attachRef.loadOp) << 48ULL) |
118                                 (static_cast<uint64_t>(attachRef.storeOp) << 32ULL) |
119                                 (static_cast<uint64_t>(attachRef.stencilLoadOp) << 16ULL) |
120                                 (static_cast<uint64_t>(attachRef.stencilStoreOp));
121         HashCombine(hash, opHash);
122     }
123 }
124 
125 struct RenderPassHashes {
126     uint64_t renderPassCompatibilityHash { 0 };
127     uint64_t renderPassHash { 0 };  // continued hashing from compatibility
128     uint64_t frameBufferHash { 0 }; // only framebuffer related hash
129 };
130 
HashBeginRenderPass(const RenderCommandBeginRenderPass & beginRenderPass,const LowLevelRenderPassCompatibilityDescVk & renderPassCompatibilityDesc,const GpuResourceManager & gpuResourceMgr)131 inline RenderPassHashes HashBeginRenderPass(const RenderCommandBeginRenderPass& beginRenderPass,
132     const LowLevelRenderPassCompatibilityDescVk& renderPassCompatibilityDesc, const GpuResourceManager& gpuResourceMgr)
133 {
134     RenderPassHashes rpHashes;
135 
136     const auto& renderPassDesc = beginRenderPass.renderPassDesc;
137 
138     PLUGIN_ASSERT(renderPassDesc.subpassCount > 0);
139     HashRenderPassCompatibility(rpHashes.renderPassCompatibilityHash, renderPassDesc, renderPassCompatibilityDesc,
140         beginRenderPass.subpasses[0], beginRenderPass.inputResourceStates);
141 
142     rpHashes.renderPassHash = rpHashes.renderPassCompatibilityHash; // for starting point
143     HashRenderPassLayouts(rpHashes.renderPassHash, renderPassDesc, beginRenderPass.imageLayouts);
144     HashRenderPassOps(rpHashes.renderPassHash, renderPassDesc);
145 
146     rpHashes.frameBufferHash = rpHashes.renderPassCompatibilityHash; // depends on the compatible render pass
147     HashFramebuffer(rpHashes.frameBufferHash, renderPassDesc, gpuResourceMgr);
148 
149     return rpHashes;
150 }
151 
CreateFramebuffer(const GpuResourceManager & gpuResourceMgr,const RenderPassDesc & renderPassDesc,const LowLevelRenderPassDataVk & renderPassData,const VkDevice device)152 VkFramebuffer CreateFramebuffer(const GpuResourceManager& gpuResourceMgr, const RenderPassDesc& renderPassDesc,
153     const LowLevelRenderPassDataVk& renderPassData, const VkDevice device)
154 {
155     const uint32_t attachmentCount = renderPassDesc.attachmentCount;
156     PLUGIN_ASSERT(attachmentCount <= PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT);
157 
158     // the size is taken from the render pass data
159     // there might e.g. fragment shading rate images whose size differ
160     FBSize size { renderPassData.framebufferSize.width, renderPassData.framebufferSize.height, 1u };
161     VkImageView imageViews[PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT];
162     uint32_t viewIndex = 0;
163 
164     bool validImageViews = true;
165     for (uint32_t idx = 0; idx < attachmentCount; ++idx) {
166         const RenderHandle handle = renderPassDesc.attachmentHandles[idx];
167         const RenderPassDesc::AttachmentDesc& attachmentDesc = renderPassDesc.attachments[idx];
168         if (const GpuImageVk* image = gpuResourceMgr.GetImage<GpuImageVk>(handle); image) {
169             const GpuImagePlatformDataVk& plat = image->GetPlatformData();
170             const GpuImagePlatformDataViewsVk& imagePlat = image->GetPlatformDataViews();
171             imageViews[viewIndex] = plat.imageViewBase;
172             if ((renderPassData.viewMask > 1u) && (plat.arrayLayers > 1u)) {
173                 // multi-view, we select the view with all the layers, but the layers count is 1
174                 if ((!imagePlat.mipImageAllLayerViews.empty()) &&
175                     (attachmentDesc.mipLevel < static_cast<uint32_t>(imagePlat.mipImageAllLayerViews.size()))) {
176                     imageViews[viewIndex] = imagePlat.mipImageAllLayerViews[attachmentDesc.mipLevel];
177                 } else {
178                     imageViews[viewIndex] = plat.imageView;
179                 }
180                 size.layers = 1u;
181             } else if ((attachmentDesc.mipLevel >= 1) && (attachmentDesc.mipLevel < imagePlat.mipImageViews.size())) {
182                 imageViews[viewIndex] = imagePlat.mipImageViews[attachmentDesc.mipLevel];
183             } else if ((attachmentDesc.layer >= 1) && (attachmentDesc.layer < imagePlat.layerImageViews.size())) {
184                 imageViews[viewIndex] = imagePlat.layerImageViews[attachmentDesc.layer];
185             }
186             viewIndex++;
187         }
188         if (!imageViews[idx]) {
189             validImageViews = false;
190         }
191     }
192 #if (RENDER_VALIDATION_ENABLED == 1)
193     if (!validImageViews || (viewIndex != attachmentCount)) {
194         PLUGIN_LOG_E("RENDER_VALIDATION: invalid image attachment in FBO creation");
195     }
196 #endif
197     VkFramebuffer framebuffer = VK_NULL_HANDLE;
198     if (validImageViews && (viewIndex == attachmentCount)) {
199         const VkFramebufferCreateInfo framebufferCreateInfo {
200             VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
201             nullptr,                                   // pNext
202             VkFramebufferCreateFlags { 0 },            // flags
203             renderPassData.renderPassCompatibility,    // renderPass
204             attachmentCount,                           // attachmentCount
205             imageViews,                                // pAttachments
206             size.width,                                // width
207             size.height,                               // height
208             size.layers,                               // layers
209         };
210 
211         VALIDATE_VK_RESULT(vkCreateFramebuffer(device, // device
212             &framebufferCreateInfo,                    // pCreateInfo
213             nullptr,                                   // pAllocator
214             &framebuffer));                            // pFramebuffer
215     }
216 
217     return framebuffer;
218 }
219 
CreateContextCommandPool(const VkDevice device,const VkCommandBufferLevel cmdBufferLevel,const uint32_t queueFamilyIndex)220 ContextCommandPoolVk CreateContextCommandPool(
221     const VkDevice device, const VkCommandBufferLevel cmdBufferLevel, const uint32_t queueFamilyIndex)
222 {
223     constexpr VkCommandPoolCreateFlags commandPoolCreateFlags { 0u };
224     const VkCommandPoolCreateInfo commandPoolCreateInfo {
225         VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
226         nullptr,                                    // pNext
227         commandPoolCreateFlags,                     // flags
228         queueFamilyIndex,                           // queueFamilyIndexlayers
229     };
230     constexpr VkSemaphoreCreateFlags semaphoreCreateFlags { 0 };
231     constexpr VkSemaphoreCreateInfo semaphoreCreateInfo {
232         VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, // sType
233         nullptr,                                 // pNext
234         semaphoreCreateFlags,                    // flags
235     };
236 
237     ContextCommandPoolVk ctxPool;
238     VALIDATE_VK_RESULT(vkCreateCommandPool(device, // device
239         &commandPoolCreateInfo,                    // pCreateInfo
240         nullptr,                                   // pAllocator
241         &ctxPool.commandPool));                    // pCommandPool
242 
243     // pre-create command buffers and semaphores
244     const VkCommandBufferAllocateInfo commandBufferAllocateInfo {
245         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
246         nullptr,                                        // pNext
247         ctxPool.commandPool,                            // commandPool
248         cmdBufferLevel,                                 // level
249         1,                                              // commandBufferCount
250     };
251 
252     VALIDATE_VK_RESULT(vkAllocateCommandBuffers(device, // device
253         &commandBufferAllocateInfo,                     // pAllocateInfo
254         &ctxPool.commandBuffer.commandBuffer));         // pCommandBuffers
255 
256     VALIDATE_VK_RESULT(vkCreateSemaphore(device, // device
257         &semaphoreCreateInfo,                    // pCreateInfo
258         nullptr,                                 // pAllocator
259         &ctxPool.commandBuffer.semaphore));      // pSemaphore
260 
261     return ctxPool;
262 }
263 } // namespace
264 
NodeContextPoolManagerVk(Device & device,GpuResourceManager & gpuResourceManager,const GpuQueue & gpuQueue)265 NodeContextPoolManagerVk::NodeContextPoolManagerVk(
266     Device& device, GpuResourceManager& gpuResourceManager, const GpuQueue& gpuQueue)
267     : NodeContextPoolManager(), device_ { device }, gpuResourceMgr_ { gpuResourceManager }, gpuQueue_(gpuQueue)
268 {
269     const DeviceVk& deviceVk = static_cast<const DeviceVk&>(device_);
270     const VkDevice vkDevice = static_cast<const DevicePlatformDataVk&>(device_.GetPlatformData()).device;
271 
272     const LowLevelGpuQueueVk lowLevelGpuQueue = deviceVk.GetGpuQueue(gpuQueue);
273     const uint32_t bufferingCount = device_.GetCommandBufferingCount();
274     if (bufferingCount > 0) {
275         // prepare and create command buffers
276         commandPools_.resize(bufferingCount);
277         commandSecondaryPools_.resize(bufferingCount);
278         const uint32_t queueFamilyIndex = lowLevelGpuQueue.queueInfo.queueFamilyIndex;
279         for (uint32_t frameIdx = 0; frameIdx < commandPools_.size(); ++frameIdx) {
280             commandPools_[frameIdx] = CreateContextCommandPool(
281                 vkDevice, VkCommandBufferLevel::VK_COMMAND_BUFFER_LEVEL_PRIMARY, queueFamilyIndex);
282             commandSecondaryPools_[frameIdx] = CreateContextCommandPool(
283                 vkDevice, VkCommandBufferLevel::VK_COMMAND_BUFFER_LEVEL_SECONDARY, queueFamilyIndex);
284         }
285         // NOTE: cmd buffers tagged in first beginFrame
286     }
287 }
288 
~NodeContextPoolManagerVk()289 NodeContextPoolManagerVk::~NodeContextPoolManagerVk()
290 {
291     const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
292 
293     auto DestroyContextCommandPool = [](const auto& device, const auto& commandPools) {
294         for (auto& cmdPoolRef : commandPools) {
295             if (cmdPoolRef.commandBuffer.semaphore) {
296                 vkDestroySemaphore(device,              // device
297                     cmdPoolRef.commandBuffer.semaphore, // semaphore
298                     nullptr);                           // pAllocator
299             }
300             if (cmdPoolRef.commandPool) {
301                 vkDestroyCommandPool(device, // device
302                     cmdPoolRef.commandPool,  // commandPool
303                     nullptr);                // pAllocator
304             }
305         }
306     };
307     DestroyContextCommandPool(device, commandPools_);
308     DestroyContextCommandPool(device, commandSecondaryPools_);
309 
310     for (auto& ref : framebufferCache_.hashToElement) {
311         if (ref.second.frameBuffer != VK_NULL_HANDLE) {
312             vkDestroyFramebuffer(device, // device
313                 ref.second.frameBuffer,  // framebuffer
314                 nullptr);                // pAllocator
315         }
316     }
317     for (auto& ref : renderPassCache_.hashToElement) {
318         if (ref.second.renderPass != VK_NULL_HANDLE) {
319             renderPassCreator_.DestroyRenderPass(device, ref.second.renderPass);
320         }
321     }
322     for (auto& ref : renderPassCompatibilityCache_.hashToElement) {
323         if (ref.second.renderPass != VK_NULL_HANDLE) {
324             renderPassCreator_.DestroyRenderPass(device, ref.second.renderPass);
325         }
326     }
327 }
328 
BeginFrame()329 void NodeContextPoolManagerVk::BeginFrame()
330 {
331 #if (RENDER_VALIDATION_ENABLED == 1)
332     frameIndexFront_ = device_.GetFrameCount();
333 #endif
334 }
335 
BeginBackendFrame()336 void NodeContextPoolManagerVk::BeginBackendFrame()
337 {
338     const uint64_t frameCount = device_.GetFrameCount();
339 
340 #if (RENDER_VALIDATION_ENABLED == 1)
341     PLUGIN_ASSERT(frameIndexBack_ != frameCount); // prevent multiple calls per frame
342     frameIndexBack_ = frameCount;
343     PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
344 #endif
345 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
346     if (firstFrame_) {
347         firstFrame_ = false;
348         for (const auto& cmdPoolRef : commandPools_) {
349             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_COMMAND_BUFFER,
350                 VulkanHandleCast<uint64_t>(cmdPoolRef.commandBuffer.commandBuffer), debugName_ + "_cmd_buf");
351         }
352         // TODO: deferred creation
353         for (const auto& cmdPoolRef : commandSecondaryPools_) {
354             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_COMMAND_BUFFER,
355                 VulkanHandleCast<uint64_t>(cmdPoolRef.commandBuffer.commandBuffer), debugName_ + "_secondary_cmd_buf");
356         }
357     }
358 #endif
359 
360     bufferingIndex_ = (bufferingIndex_ + 1) % static_cast<uint32_t>(commandPools_.size());
361 
362     constexpr uint64_t additionalFrameCount { 2u };
363     const auto minAge = device_.GetCommandBufferingCount() + additionalFrameCount;
364     const auto ageLimit = (frameCount < minAge) ? 0 : (frameCount - minAge);
365 
366     const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
367     {
368         auto& cache = framebufferCache_.hashToElement;
369         for (auto iter = cache.begin(); iter != cache.end();) {
370             if (iter->second.frameUseIndex < ageLimit) {
371                 if (iter->second.frameBuffer) {
372                     vkDestroyFramebuffer(device, iter->second.frameBuffer, nullptr);
373                 }
374                 iter = cache.erase(iter);
375             } else {
376                 ++iter;
377             }
378         }
379     }
380     {
381         auto& cache = renderPassCache_.hashToElement;
382         for (auto iter = cache.begin(); iter != cache.end();) {
383             if (iter->second.frameUseIndex < ageLimit) {
384                 if (iter->second.renderPass) {
385                     renderPassCreator_.DestroyRenderPass(device, iter->second.renderPass);
386                 }
387                 iter = cache.erase(iter);
388             } else {
389                 ++iter;
390             }
391         }
392     }
393 }
394 
GetContextCommandPool() const395 const ContextCommandPoolVk& NodeContextPoolManagerVk::GetContextCommandPool() const
396 {
397 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
398     PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
399 #endif
400     return commandPools_[bufferingIndex_];
401 }
402 
GetContextSecondaryCommandPool() const403 const ContextCommandPoolVk& NodeContextPoolManagerVk::GetContextSecondaryCommandPool() const
404 {
405 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
406     PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
407 #endif
408     PLUGIN_ASSERT(bufferingIndex_ < static_cast<uint32_t>(commandSecondaryPools_.size()));
409     return commandSecondaryPools_[bufferingIndex_];
410 }
411 
GetRenderPassData(const RenderCommandBeginRenderPass & beginRenderPass)412 LowLevelRenderPassDataVk NodeContextPoolManagerVk::GetRenderPassData(
413     const RenderCommandBeginRenderPass& beginRenderPass)
414 {
415     LowLevelRenderPassDataVk renderPassData;
416     renderPassData.subpassIndex = beginRenderPass.subpassStartIndex;
417 
418     PLUGIN_ASSERT(renderPassData.subpassIndex < static_cast<uint32_t>(beginRenderPass.subpasses.size()));
419     const DeviceVk& deviceVk = (const DeviceVk&)device_;
420     if (deviceVk.GetCommonDeviceExtensions().multiView) {
421         renderPassData.viewMask = beginRenderPass.subpasses[renderPassData.subpassIndex].viewMask;
422     }
423 
424     // collect render pass attachment compatibility info and default viewport/scissor
425     for (uint32_t idx = 0; idx < beginRenderPass.renderPassDesc.attachmentCount; ++idx) {
426         if (const GpuImageVk* image =
427                 gpuResourceMgr_.GetImage<const GpuImageVk>(beginRenderPass.renderPassDesc.attachmentHandles[idx]);
428             image) {
429             const auto& platData = image->GetPlatformData();
430             renderPassData.renderPassCompatibilityDesc.attachments[idx] = { platData.format, platData.samples,
431                 platData.aspectFlags };
432             if (idx == 0) {
433                 uint32_t maxFbWidth = platData.extent.width;
434                 uint32_t maxFbHeight = platData.extent.height;
435                 const auto& attachmentRef = beginRenderPass.renderPassDesc.attachments[idx];
436                 if ((attachmentRef.mipLevel >= 1) && (attachmentRef.mipLevel < platData.mipLevels)) {
437                     maxFbWidth = Math::max(1u, maxFbWidth >> attachmentRef.mipLevel);
438                     maxFbHeight = Math::max(1u, maxFbHeight >> attachmentRef.mipLevel);
439                 }
440                 renderPassData.viewport = { 0.0f, 0.0f, static_cast<float>(maxFbWidth), static_cast<float>(maxFbHeight),
441                     0.0f, 1.0f };
442                 renderPassData.scissor = { { 0, 0 }, { maxFbWidth, maxFbHeight } };
443                 renderPassData.framebufferSize = { maxFbWidth, maxFbHeight };
444             }
445         }
446     }
447 
448     {
449         const RenderPassHashes rpHashes =
450             HashBeginRenderPass(beginRenderPass, renderPassData.renderPassCompatibilityDesc, gpuResourceMgr_);
451         renderPassData.renderPassCompatibilityHash = rpHashes.renderPassCompatibilityHash;
452         renderPassData.renderPassHash = rpHashes.renderPassHash;
453         renderPassData.frameBufferHash = rpHashes.frameBufferHash;
454     }
455 
456     const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
457     const uint64_t frameCount = device_.GetFrameCount();
458 
459     {
460         auto& cache = renderPassCompatibilityCache_;
461         if (const auto iter = cache.hashToElement.find(renderPassData.renderPassCompatibilityHash);
462             iter != cache.hashToElement.cend()) {
463             renderPassData.renderPassCompatibility = iter->second.renderPass;
464         } else { // new
465             renderPassData.renderPassCompatibility =
466                 renderPassCreator_.CreateRenderPassCompatibility(deviceVk, beginRenderPass, renderPassData);
467             cache.hashToElement[renderPassData.renderPassCompatibilityHash] = { 0,
468                 renderPassData.renderPassCompatibility };
469 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
470             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_RENDER_PASS,
471                 VulkanHandleCast<uint64_t>(renderPassData.renderPassCompatibility), debugName_ + "_rp_compatibility");
472 #endif
473         }
474     }
475 
476     {
477         auto& cache = framebufferCache_;
478         if (auto iter = cache.hashToElement.find(renderPassData.frameBufferHash); iter != cache.hashToElement.cend()) {
479             iter->second.frameUseIndex = frameCount;
480             renderPassData.framebuffer = iter->second.frameBuffer;
481         } else { // new
482             renderPassData.framebuffer =
483                 CreateFramebuffer(gpuResourceMgr_, beginRenderPass.renderPassDesc, renderPassData, device);
484             cache.hashToElement[renderPassData.frameBufferHash] = { frameCount, renderPassData.framebuffer };
485 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
486             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_FRAMEBUFFER,
487                 VulkanHandleCast<uint64_t>(renderPassData.framebuffer),
488                 debugName_ + "_fbo_" + to_string(renderPassData.framebufferSize.width) + "_" +
489                     to_string(renderPassData.framebufferSize.height));
490 #endif
491         }
492     }
493 
494     {
495         auto& cache = renderPassCache_;
496         if (const auto iter = cache.hashToElement.find(renderPassData.renderPassHash);
497             iter != cache.hashToElement.cend()) {
498             iter->second.frameUseIndex = frameCount;
499             renderPassData.renderPass = iter->second.renderPass;
500         } else { // new
501             renderPassData.renderPass = renderPassCreator_.CreateRenderPass(deviceVk, beginRenderPass, renderPassData);
502             cache.hashToElement[renderPassData.renderPassHash] = { frameCount, renderPassData.renderPass };
503 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
504             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_RENDER_PASS,
505                 VulkanHandleCast<uint64_t>(renderPassData.renderPass), debugName_ + "_rp");
506 #endif
507         }
508     }
509 
510     return renderPassData;
511 }
512 
513 #if ((RENDER_VALIDATION_ENABLED == 1) || (RENDER_VULKAN_VALIDATION_ENABLED == 1))
SetValidationDebugName(const string_view debugName)514 void NodeContextPoolManagerVk::SetValidationDebugName(const string_view debugName)
515 {
516     debugName_ = debugName;
517 }
518 #endif
519 RENDER_END_NAMESPACE()
520