1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "gpu_resource_manager.h"
17 
18 #include <algorithm>
19 #include <cinttypes>
20 #if !defined(NDEBUG) || (defined(PLUGIN_LOG_DEBUG) && (PLUGIN_LOG_DEBUG == 1))
21 #include <sstream>
22 #include <thread>
23 #endif
24 
25 #include <base/containers/fixed_string.h>
26 #include <base/math/mathf.h>
27 #include <render/namespace.h>
28 
29 #include "device/device.h"
30 #include "device/gpu_buffer.h"
31 #include "device/gpu_image.h"
32 #include "device/gpu_resource_cache.h"
33 #include "device/gpu_resource_desc_flag_validation.h"
34 #include "device/gpu_resource_manager_base.h"
35 #include "device/gpu_sampler.h"
36 #include "resource_handle_impl.h"
37 #include "util/log.h"
38 
39 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
40 #include "device/gpu_resource_util.h"
41 #endif
42 
43 using namespace BASE_NS;
44 using namespace CORE_NS;
45 
46 RENDER_BEGIN_NAMESPACE()
47 namespace {
48 static constexpr uint32_t INVALID_PENDING_INDEX { ~0u };
49 static constexpr uint32_t MAX_IMAGE_EXTENT { 32768u }; // should be fetched from the device
50 
51 static constexpr MemoryPropertyFlags NEEDED_DEVICE_MEMORY_PROPERTY_FLAGS_FOR_STAGING_MEM_OPT {
52     CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT
53 };
54 static constexpr MemoryPropertyFlags ADD_STAGING_MEM_OPT_FLAGS { CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
55                                                                  CORE_MEMORY_PROPERTY_HOST_COHERENT_BIT };
56 
57 // make sure that generation is valid
58 static constexpr uint64_t INVALIDATE_GENERATION_SHIFT { 32 };
InvalidateWithGeneration(const EngineResourceHandle handle)59 EngineResourceHandle InvalidateWithGeneration(const EngineResourceHandle handle)
60 {
61     return { handle.id | ~RenderHandleUtil::RES_HANDLE_GENERATION_MASK };
62 }
63 
UnpackNewHandle(const EngineResourceHandle & handle,const RenderHandleType type,const uint32_t arrayIndex)64 EngineResourceHandle UnpackNewHandle(
65     const EngineResourceHandle& handle, const RenderHandleType type, const uint32_t arrayIndex)
66 {
67     // increments generation counter
68     if (RenderHandleUtil::IsValid(handle)) {
69         const uint32_t gpuGenIndex = RenderHandleUtil::GetGenerationIndexPart(handle) + 1;
70         return RenderHandleUtil::CreateEngineResourceHandle(type, arrayIndex, gpuGenIndex);
71     } else {
72         const uint32_t gpuGenIndex = uint32_t(handle.id >> INVALIDATE_GENERATION_SHIFT) + 1;
73         return RenderHandleUtil::CreateEngineResourceHandle(type, arrayIndex, gpuGenIndex);
74     }
75 }
76 
77 // we need to know if image is a depth format when binding to descriptor set as read only
GetAdditionalImageFlagsFromFormat(const Format format)78 constexpr RenderHandleInfoFlags GetAdditionalImageFlagsFromFormat(const Format format)
79 {
80     RenderHandleInfoFlags flags {};
81 
82     const bool isDepthFormat =
83         ((format == Format::BASE_FORMAT_D16_UNORM) || (format == Format::BASE_FORMAT_X8_D24_UNORM_PACK32) ||
84             (format == Format::BASE_FORMAT_D32_SFLOAT) || (format == Format::BASE_FORMAT_D24_UNORM_S8_UINT))
85             ? true
86             : false;
87     if (isDepthFormat) {
88         flags |= CORE_RESOURCE_HANDLE_DEPTH_IMAGE;
89     }
90 
91     return flags;
92 }
93 
94 #if (RENDER_VALIDATION_ENABLED == 1)
ValidateGpuBufferDesc(const GpuBufferDesc & desc)95 void ValidateGpuBufferDesc(const GpuBufferDesc& desc)
96 {
97     if (desc.usageFlags == 0) {
98         PLUGIN_LOG_E("RENDER_VALIDATION: BufferUsageFlags must not be 0");
99     }
100     if ((desc.usageFlags & (~GpuResourceDescFlagValidation::ALL_GPU_BUFFER_USAGE_FLAGS)) != 0) {
101         PLUGIN_LOG_E("RENDER_VALIDATION: Invalid BufferUsageFlags (%u)", desc.usageFlags);
102     }
103     if (desc.memoryPropertyFlags == 0) {
104         PLUGIN_LOG_E("RENDER_VALIDATION: MemoryPropertyFlags must not be 0");
105     }
106     if ((desc.memoryPropertyFlags & (~GpuResourceDescFlagValidation::ALL_MEMORY_PROPERTY_FLAGS)) != 0) {
107         PLUGIN_LOG_E("RENDER_VALIDATION: Invalid MemoryPropertyFlags (%u)", desc.memoryPropertyFlags);
108     }
109     if ((desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_MAP_OUTSIDE_RENDERER) &&
110         ((desc.memoryPropertyFlags & MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)) {
111         PLUGIN_LOG_E(
112             "RENDER_VALIDATION: Invalid MemoryPropertyFlags for CORE_ENGINE_BUFFER_CREATION_MAP_OUTSIDE_RENDERER (%u)",
113             desc.memoryPropertyFlags);
114     }
115     if (desc.byteSize == 0) {
116         PLUGIN_LOG_E("RENDER_VALIDATION: Buffer byteSize must larger than zero");
117     }
118 }
119 
ValidateGpuImageDesc(const GpuImageDesc & desc,const string_view name)120 void ValidateGpuImageDesc(const GpuImageDesc& desc, const string_view name)
121 {
122     bool valid = true;
123     if (desc.format == Format::BASE_FORMAT_UNDEFINED) {
124         PLUGIN_LOG_E("RENDER_VALIDATION: Undefined image format");
125         valid = false;
126     }
127     if (desc.imageType > ImageType::CORE_IMAGE_TYPE_3D) {
128         PLUGIN_LOG_E("RENDER_VALIDATION: Unsupported image type");
129         valid = false;
130     }
131     if ((desc.imageViewType == ImageViewType::CORE_IMAGE_VIEW_TYPE_2D) && (desc.layerCount > 1u)) {
132         PLUGIN_LOG_E(
133             "RENDER_VALIDATION: Default image view is done for sampling / shader resource access and needs to be "
134             "CORE_IMAGE_VIEW_TYPE_2D_ARRAY with multiple layers");
135         valid = false;
136     }
137     if (desc.imageTiling > ImageTiling::CORE_IMAGE_TILING_LINEAR) {
138         PLUGIN_LOG_E("RENDER_VALIDATION: Unsupported image tiling mode (%u)", static_cast<uint32_t>(desc.imageTiling));
139         valid = false;
140     }
141     if (desc.usageFlags == 0) {
142         PLUGIN_LOG_E("RENDER_VALIDATION: ImageUsageFlags must not be 0");
143         valid = false;
144     }
145     if ((desc.usageFlags & (~GpuResourceDescFlagValidation::ALL_GPU_IMAGE_USAGE_FLAGS)) != 0) {
146         PLUGIN_LOG_E("RENDER_VALIDATION: Invalid ImageUsageFlags (%u)", desc.usageFlags);
147         valid = false;
148     }
149     if (desc.memoryPropertyFlags == 0) {
150         PLUGIN_LOG_E("RENDER_VALIDATION: MemoryPropertyFlags must not be 0");
151         valid = false;
152     }
153     if ((desc.memoryPropertyFlags & (~GpuResourceDescFlagValidation::ALL_MEMORY_PROPERTY_FLAGS)) != 0) {
154         PLUGIN_LOG_E("RENDER_VALIDATION: Invalid MemoryPropertyFlags (%u)", desc.memoryPropertyFlags);
155         valid = false;
156     }
157     if (desc.width == 0 || desc.height == 0 || desc.depth == 0) {
158         PLUGIN_LOG_E("RENDER_VALIDATION: Image extents must not be zero (x:%u, y:%u, z:%u)", desc.width, desc.height,
159             desc.depth);
160         valid = false;
161     }
162     if (desc.width > MAX_IMAGE_EXTENT || desc.height > MAX_IMAGE_EXTENT || desc.depth > MAX_IMAGE_EXTENT) {
163         PLUGIN_LOG_E("RENDER_VALIDATION: Image extents must not be bigger than (%u) (x:%u, y:%u, z:%u)",
164             MAX_IMAGE_EXTENT, desc.width, desc.height, desc.depth);
165         valid = false;
166     }
167     if (desc.mipCount == 0 || desc.layerCount == 0) {
168         PLUGIN_LOG_E("RENDER_VALIDATION: Image mip and layer count must be >=1 (mipCount:%u, layerCount:%u)",
169             desc.mipCount, desc.layerCount);
170         valid = false;
171     }
172     if ((desc.createFlags & (~GpuResourceDescFlagValidation::ALL_IMAGE_CREATE_FLAGS)) != 0) {
173         PLUGIN_LOG_E("RENDER_VALIDATION: Invalid ImageCreateFlags (%u)", desc.createFlags);
174         valid = false;
175     }
176     if ((desc.engineCreationFlags & CORE_ENGINE_IMAGE_CREATION_GENERATE_MIPS) &&
177         ((desc.usageFlags & CORE_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0)) {
178         PLUGIN_LOG_E(
179             "RENDER_VALIDATION: Must use usage flags CORE_IMAGE_USAGE_TRANSFER_SRC_BIT when generating mip maps");
180         valid = false;
181     }
182     if (desc.usageFlags & CORE_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) {
183         const ImageUsageFlags usageFlags =
184             desc.usageFlags &
185             ~(CORE_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | CORE_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
186                 CORE_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | CORE_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
187         if (usageFlags != 0) {
188             PLUGIN_LOG_E(
189                 "RENDER_VALIDATION: If image usage flags contain CORE_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, only "
190                 "DEPTH_STENCIL_ATTACHMENT_BIT, INPUT_ATTACHMENT_BIT, and COLOR_ATTACHMENT_BIT can be set.");
191             valid = false;
192         }
193     }
194     if ((desc.layerCount > 1u) && (desc.imageViewType <= CORE_IMAGE_VIEW_TYPE_3D)) {
195         PLUGIN_LOG_E("RENDER_VALIDATION: If image layer count (%u) is larger than 1, then image view type must be "
196                      "CORE_IMAGE_VIEW_TYPE_XX_ARRAY",
197             desc.layerCount);
198         valid = false;
199     }
200 
201     if ((!valid) && (!name.empty())) {
202         PLUGIN_LOG_E("RENDER_VALIDATION: validation issue(s) with image (name: %s)", name.data());
203     }
204 }
205 
ValidateGpuAccelerationStructureDesc(const GpuAccelerationStructureDesc & desc)206 void ValidateGpuAccelerationStructureDesc(const GpuAccelerationStructureDesc& desc)
207 {
208     ValidateGpuBufferDesc(desc.bufferDesc);
209     if ((desc.bufferDesc.usageFlags & CORE_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT) == 0) {
210         PLUGIN_LOG_E("RENDER_VALIDATION: usageFlags must include CORE_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT "
211                      "for acceleration structures");
212     }
213 }
214 
ValidateGpuImageCopy(const GpuImageDesc & desc,const BufferImageCopy & copy,const string_view name)215 void ValidateGpuImageCopy(const GpuImageDesc& desc, const BufferImageCopy& copy, const string_view name)
216 {
217     const uint32_t mip = copy.imageSubresource.mipLevel;
218     const Size3D imageSize { desc.width >> mip, desc.height >> mip, desc.depth };
219     if ((copy.imageOffset.width >= imageSize.width) || (copy.imageOffset.width >= imageSize.height) ||
220         (copy.imageOffset.depth >= imageSize.depth)) {
221         PLUGIN_LOG_E(
222             "RENDER_VALIDATION: BufferImageCopy offset exceeds GpuImageDesc. Mipsize: %u, %u, %u, offset: %u %u %u. "
223             "(name: %s)",
224             imageSize.width, imageSize.height, imageSize.depth, copy.imageOffset.width, copy.imageOffset.height,
225             copy.imageOffset.depth, name.data());
226     }
227 }
228 #endif
229 
GetValidGpuBufferDesc(const GpuBufferDesc & desc)230 GpuBufferDesc GetValidGpuBufferDesc(const GpuBufferDesc& desc)
231 {
232     return GpuBufferDesc {
233         desc.usageFlags & GpuResourceDescFlagValidation::ALL_GPU_BUFFER_USAGE_FLAGS,
234         desc.memoryPropertyFlags & GpuResourceDescFlagValidation::ALL_MEMORY_PROPERTY_FLAGS,
235         desc.engineCreationFlags,
236         desc.byteSize,
237     };
238 }
239 
CheckAndEnableMemoryOptimizations(const uint32_t gpuResourceMgrFlags,GpuBufferDesc & desc)240 void CheckAndEnableMemoryOptimizations(const uint32_t gpuResourceMgrFlags, GpuBufferDesc& desc)
241 {
242     if (gpuResourceMgrFlags & GpuResourceManager::GPU_RESOURCE_MANAGER_OPTIMIZE_STAGING_MEMORY) {
243         if ((desc.memoryPropertyFlags == CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
244             (desc.usageFlags & CORE_BUFFER_USAGE_TRANSFER_DST_BIT) &&
245             (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_ENABLE_MEMORY_OPTIMIZATIONS)) {
246             desc.memoryPropertyFlags |= ADD_STAGING_MEM_OPT_FLAGS;
247         }
248     }
249 }
250 
GetScalingImageNeed(const GpuImageDesc & desc,const array_view<const IImageContainer::SubImageDesc> & copies)251 bool GetScalingImageNeed(const GpuImageDesc& desc, const array_view<const IImageContainer::SubImageDesc>& copies)
252 {
253     bool scale = false;
254     if (desc.engineCreationFlags & EngineImageCreationFlagBits::CORE_ENGINE_IMAGE_CREATION_SCALE) {
255         // we only support single image (single buffer image copy) scaling
256         if (copies.size() == 1) {
257             scale = (copies[0].width != desc.width) || (copies[0].height != desc.height);
258         }
259     }
260     return scale;
261 }
262 
GetScalingImageNeed(const GpuImageDesc & desc,const array_view<const BufferImageCopy> & copies)263 bool GetScalingImageNeed(const GpuImageDesc& desc, const array_view<const BufferImageCopy>& copies)
264 {
265     bool scale = false;
266     if (desc.engineCreationFlags & EngineImageCreationFlagBits::CORE_ENGINE_IMAGE_CREATION_SCALE) {
267         // we only support single image (single buffer image copy) scaling
268         if (copies.size() == 1) {
269             scale = (copies[0].imageExtent.width != desc.width) || (copies[0].imageExtent.height != desc.height);
270         }
271     }
272     return scale;
273 }
274 
275 // staging needs to be locked when called with the input resources
UpdateStagingScaling(const Format format,const array_view<const IImageContainer::SubImageDesc> & copies,ScalingImageDataStruct & siData)276 void UpdateStagingScaling(
277     const Format format, const array_view<const IImageContainer::SubImageDesc>& copies, ScalingImageDataStruct& siData)
278 {
279     PLUGIN_ASSERT(copies.size() == 1);
280     if (auto iter = siData.formatToScalingImages.find(format); iter != siData.formatToScalingImages.end()) {
281         const size_t index = iter->second;
282         PLUGIN_ASSERT(index < siData.scalingImages.size());
283         auto& scaleImage = siData.scalingImages[index];
284         scaleImage.maxWidth = Math::max(scaleImage.maxWidth, copies[0].width);
285         scaleImage.maxHeight = Math::max(scaleImage.maxHeight, copies[0].height);
286     } else {
287         const size_t index = siData.scalingImages.size();
288         siData.scalingImages.push_back({ {}, format, copies[0].width, copies[0].height });
289         siData.formatToScalingImages[format] = index;
290     }
291 }
292 
UpdateStagingScaling(const Format format,const array_view<const BufferImageCopy> & copies,ScalingImageDataStruct & siData)293 void UpdateStagingScaling(
294     const Format format, const array_view<const BufferImageCopy>& copies, ScalingImageDataStruct& siData)
295 {
296     PLUGIN_ASSERT(copies.size() == 1);
297     const auto& extent = copies[0].imageExtent;
298     if (auto iter = siData.formatToScalingImages.find(format); iter != siData.formatToScalingImages.end()) {
299         const size_t index = iter->second;
300         PLUGIN_ASSERT(index < siData.scalingImages.size());
301         auto& scaleImage = siData.scalingImages[index];
302         scaleImage.maxWidth = Math::max(scaleImage.maxWidth, extent.width);
303         scaleImage.maxHeight = Math::max(scaleImage.maxHeight, extent.height);
304     } else {
305         const size_t index = siData.scalingImages.size();
306         siData.scalingImages.push_back({ {}, format, extent.width, extent.height });
307         siData.formatToScalingImages[format] = index;
308     }
309 }
310 
GetStagingScalingImageDesc(const Format format,const uint32_t width,const uint32_t height)311 GpuImageDesc GetStagingScalingImageDesc(const Format format, const uint32_t width, const uint32_t height)
312 {
313     return GpuImageDesc {
314         ImageType::CORE_IMAGE_TYPE_2D,
315         ImageViewType::CORE_IMAGE_VIEW_TYPE_2D,
316         format,
317         ImageTiling::CORE_IMAGE_TILING_OPTIMAL,
318         // NOTE sampled is not needed, but image view should not be created
319         ImageUsageFlagBits::CORE_IMAGE_USAGE_TRANSFER_SRC_BIT | ImageUsageFlagBits::CORE_IMAGE_USAGE_TRANSFER_DST_BIT |
320             ImageUsageFlagBits::CORE_IMAGE_USAGE_SAMPLED_BIT,
321         MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
322         0,
323         0, // No dynamic barrriers
324         width,
325         height,
326         1u,
327         1u,
328         1u,
329         1u,
330         {},
331     };
332 }
333 
ConvertCoreBufferImageCopy(const IImageContainer::SubImageDesc & bufferImageCopy)334 BufferImageCopy ConvertCoreBufferImageCopy(const IImageContainer::SubImageDesc& bufferImageCopy)
335 {
336     return BufferImageCopy {
337         /** Buffer offset */
338         bufferImageCopy.bufferOffset,
339         /** Buffer row length */
340         bufferImageCopy.bufferRowLength,
341         /** Buffer image height */
342         bufferImageCopy.bufferImageHeight,
343         /** Image subresource */
344         { ImageAspectFlagBits::CORE_IMAGE_ASPECT_COLOR_BIT, bufferImageCopy.mipLevel, 0, bufferImageCopy.layerCount },
345         /** Image offset */
346         { 0, 0, 0 },
347         /** Image extent */
348         { bufferImageCopy.width, bufferImageCopy.height, bufferImageCopy.depth },
349     };
350 }
351 
352 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
LogGpuResource(const RenderHandle & gpuHandle,const EngineResourceHandle engineHandle)353 void LogGpuResource(const RenderHandle& gpuHandle, const EngineResourceHandle engineHandle)
354 {
355     constexpr string_view names[] = { "buffer", "image", "sampler" };
356     const RenderHandleType handleType = RenderHandleUtil::GetHandleType(gpuHandle);
357     uint32_t idx = 0;
358     if (handleType == RenderHandleType::GPU_IMAGE) {
359         idx = 1u;
360     } else if (handleType == RenderHandleType::GPU_SAMPLER) {
361         idx = 2u;
362     }
363     PLUGIN_LOG_E("gpu %s > %" PRIx64 "=%" PRIx64 " generation: %u=%u", names[idx].data(), gpuHandle.id, engineHandle.id,
364         RenderHandleUtil::GetGenerationIndexPart(gpuHandle), RenderHandleUtil::GetGenerationIndexPart(engineHandle));
365 }
366 #endif
367 } // namespace
368 
GpuResourceManager(Device & device,const CreateInfo & createInfo)369 GpuResourceManager::GpuResourceManager(Device& device, const CreateInfo& createInfo)
370     : device_(device), gpuResourceMgrFlags_(createInfo.flags),
371       gpuBufferMgr_(make_unique<GpuResourceManagerTyped<GpuBuffer, GpuBufferDesc>>(device)),
372       gpuImageMgr_(make_unique<GpuResourceManagerTyped<GpuImage, GpuImageDesc>>(device)),
373       gpuSamplerMgr_(make_unique<GpuResourceManagerTyped<GpuSampler, GpuSamplerDesc>>(device))
374 {
375     gpuResourceCache_ = make_unique<GpuResourceCache>(*this);
376 
377     bufferStore_.mgr = gpuBufferMgr_.get();
378     imageStore_.mgr = gpuImageMgr_.get();
379     samplerStore_.mgr = gpuSamplerMgr_.get();
380 
381     const MemoryPropertyFlags deviceSharedMemoryPropertyFlags = device_.GetSharedMemoryPropertyFlags();
382     // remove create info flag if not really available
383     if (((gpuResourceMgrFlags_ & GPU_RESOURCE_MANAGER_OPTIMIZE_STAGING_MEMORY) == 0) ||
384         !((deviceSharedMemoryPropertyFlags & NEEDED_DEVICE_MEMORY_PROPERTY_FLAGS_FOR_STAGING_MEM_OPT) ==
385             NEEDED_DEVICE_MEMORY_PROPERTY_FLAGS_FOR_STAGING_MEM_OPT)) {
386         gpuResourceMgrFlags_ = gpuResourceMgrFlags_ & ~GPU_RESOURCE_MANAGER_OPTIMIZE_STAGING_MEMORY;
387     }
388 }
389 
~GpuResourceManager()390 GpuResourceManager::~GpuResourceManager()
391 {
392     // reset cache before logging
393     // cache logs it's own un-released resources
394     gpuResourceCache_.reset();
395 
396 #if (RENDER_VALIDATION_ENABLED == 1)
397     auto checkAndPrintValidation = [](const PerManagerStore& store, const string_view name) {
398         uint32_t aliveCounter = 0;
399         const auto clientLock = std::lock_guard(store.clientMutex);
400         for (const auto& ref : store.clientHandles) {
401             if (ref && (ref.GetRefCount() > 1)) {
402                 aliveCounter++;
403             }
404         }
405         if (aliveCounter > 0) {
406             PLUGIN_LOG_W(
407                 "RENDER_VALIDATION: Not all %s handle references released (count: %u)", name.data(), aliveCounter);
408         }
409     };
410     checkAndPrintValidation(bufferStore_, "GPU buffer");
411     checkAndPrintValidation(imageStore_, "GPU image");
412     checkAndPrintValidation(samplerStore_, "GPU sampler");
413 #endif
414 }
415 
Get(const RenderHandle & handle) const416 RenderHandleReference GpuResourceManager::Get(const RenderHandle& handle) const
417 {
418     if (RenderHandleUtil::IsValid(handle)) {
419         const RenderHandleType handleType = RenderHandleUtil::GetHandleType(handle);
420         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
421         if (handleType == RenderHandleType::GPU_BUFFER) {
422             auto& store = bufferStore_;
423             auto const clientLock = std::shared_lock(store.clientMutex);
424             if (arrayIndex < static_cast<uint32_t>(store.clientHandles.size())) {
425                 return store.clientHandles[arrayIndex];
426             }
427         } else if (handleType == RenderHandleType::GPU_IMAGE) {
428             auto& store = imageStore_;
429             auto const clientLock = std::shared_lock(store.clientMutex);
430             if (arrayIndex < static_cast<uint32_t>(store.clientHandles.size())) {
431                 return store.clientHandles[arrayIndex];
432             }
433         } else if (handleType == RenderHandleType::GPU_SAMPLER) {
434             auto& store = samplerStore_;
435             auto const clientLock = std::shared_lock(store.clientMutex);
436             if (arrayIndex < static_cast<uint32_t>(store.clientHandles.size())) {
437                 return store.clientHandles[arrayIndex];
438             }
439         }
440         PLUGIN_LOG_I(
441             "invalid gpu resource handle (id: %" PRIu64 ", type: %u)", handle.id, static_cast<uint32_t>(handleType));
442     }
443     return RenderHandleReference {};
444 }
445 
GetStagingBufferDesc(const uint32_t byteSize)446 GpuBufferDesc GpuResourceManager::GetStagingBufferDesc(const uint32_t byteSize)
447 {
448     return {
449         BufferUsageFlagBits::CORE_BUFFER_USAGE_TRANSFER_SRC_BIT,
450         MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_HOST_COHERENT_BIT |
451             MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
452         EngineBufferCreationFlagBits::CORE_ENGINE_BUFFER_CREATION_SINGLE_SHOT_STAGING,
453         byteSize,
454     };
455 }
456 
457 // call to evaluate if there's already pending resources which we will replace
458 // store.clientMutex needs to be locked
GetPendingOptionalResourceIndex(const PerManagerStore & store,const RenderHandle & handle,const string_view name)459 uint32_t GpuResourceManager::GetPendingOptionalResourceIndex(
460     const PerManagerStore& store, const RenderHandle& handle, const string_view name)
461 {
462     uint32_t optionalResourceIndex = ~0u;
463     uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
464     const bool hasReplaceHandle = (arrayIndex < static_cast<uint32_t>(store.clientHandles.size()));
465     if ((!hasReplaceHandle) && (!name.empty())) {
466         if (auto const iter = store.nameToClientIndex.find(name); iter != store.nameToClientIndex.cend()) {
467             arrayIndex = RenderHandleUtil::GetIndexPart(iter->second);
468         }
469     }
470     if (arrayIndex < static_cast<uint32_t>(store.clientHandles.size())) {
471         if (const uint32_t pendingArrIndex = store.additionalData[arrayIndex].indexToPendingData;
472             pendingArrIndex != INVALID_PENDING_INDEX) {
473             PLUGIN_ASSERT(pendingArrIndex < store.pendingData.allocations.size());
474             if (pendingArrIndex < static_cast<uint32_t>(store.pendingData.allocations.size())) {
475                 const auto& allocOp = store.pendingData.allocations[pendingArrIndex];
476                 optionalResourceIndex = allocOp.optionalResourceIndex;
477             }
478         }
479     }
480     return optionalResourceIndex;
481 }
482 
483 // needs to be locked when called
CreateStagingBuffer(const GpuBufferDesc & desc)484 RenderHandleReference GpuResourceManager::CreateStagingBuffer(const GpuBufferDesc& desc)
485 {
486     PerManagerStore& store = bufferStore_;
487     return StoreAllocation(store, { ResourceDescriptor { desc }, {}, {}, RenderHandleType::GPU_BUFFER, ~0u, 0u })
488         .handle;
489 }
490 
491 // needs to be locked when called
CreateBuffer(const string_view name,const RenderHandle & replacedHandle,const GpuBufferDesc & desc)492 GpuResourceManager::StoreAllocationData GpuResourceManager::CreateBuffer(
493     const string_view name, const RenderHandle& replacedHandle, const GpuBufferDesc& desc)
494 {
495 #if (RENDER_VALIDATION_ENABLED == 1)
496     ValidateGpuBufferDesc(desc);
497 #endif
498     MemoryPropertyFlags additionalMemPropFlags = 0U;
499     if (device_.GetBackendType() == DeviceBackendType::VULKAN) {
500         additionalMemPropFlags = (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_MAP_OUTSIDE_RENDERER)
501                                     ? (MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
502                                     : 0U;
503     } else {
504         additionalMemPropFlags = (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_MAP_OUTSIDE_RENDERER)
505                                     ? (MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_HOST_COHERENT_BIT |
506                                             MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
507                                     : 0U;
508     }
509     const GpuBufferDesc validatedDesc {
510         desc.usageFlags | defaultBufferUsageFlags_,
511         desc.memoryPropertyFlags | additionalMemPropFlags,
512         desc.engineCreationFlags,
513         Math::max(desc.byteSize, 1u),
514         desc.format,
515     };
516     PerManagerStore& store = bufferStore_;
517     if (validatedDesc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
518         // replace immediate created if still pending (i.e. not usable on the GPU)
519         // memory pointers on client become invalid
520         const uint32_t emplaceResourceIndex = static_cast<uint32_t>(store.pendingData.buffers.size());
521         const uint32_t optionalResourceIndex =
522             Math::min(emplaceResourceIndex, GetPendingOptionalResourceIndex(store, replacedHandle, name));
523 
524         if (unique_ptr<GpuBuffer> gpuBuffer = [this](const GpuBufferDesc validatedDesc) {
525             // protect GPU memory allocations
526             auto lock = std::lock_guard(allocationMutex_);
527             return device_.CreateGpuBuffer(validatedDesc);
528         }(validatedDesc)) {
529             // safety checks
530             if ((optionalResourceIndex < emplaceResourceIndex) &&
531                 (optionalResourceIndex < store.pendingData.buffers.size())) {
532                 store.pendingData.buffers[optionalResourceIndex] = move(gpuBuffer);
533             } else {
534                 store.pendingData.buffers.push_back(move(gpuBuffer));
535             }
536         }
537 
538         StoreAllocationData sad = StoreAllocation(store, { ResourceDescriptor { validatedDesc }, name, replacedHandle,
539                                                              RenderHandleType::GPU_BUFFER, optionalResourceIndex, 0u });
540         // additional data is increased in StoreAllocation
541         // there are as many additional data elements as clientHandle elements
542         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(sad.handle.GetHandle());
543         PLUGIN_ASSERT(arrayIndex < store.additionalData.size());
544         if (GpuBuffer* buffer = store.pendingData.buffers[optionalResourceIndex].get(); buffer) {
545             store.additionalData[arrayIndex].resourcePtr = reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(buffer));
546         }
547         return sad;
548     } else {
549         return StoreAllocation(store,
550             { ResourceDescriptor { validatedDesc }, name, replacedHandle, RenderHandleType::GPU_BUFFER, ~0u, 0u });
551     }
552 }
553 
Create(const string_view name,const GpuBufferDesc & desc)554 RenderHandleReference GpuResourceManager::Create(const string_view name, const GpuBufferDesc& desc)
555 {
556     RenderHandleReference handle;
557 
558     GpuBufferDesc validDesc = GetValidGpuBufferDesc(desc);
559     CheckAndEnableMemoryOptimizations(gpuResourceMgrFlags_, validDesc);
560 
561     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
562         device_.Activate();
563     }
564     PerManagerStore& store = bufferStore_;
565     {
566         const auto lock = std::lock_guard(store.clientMutex);
567 
568         handle = CreateBuffer(name, {}, validDesc).handle;
569     }
570     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
571         device_.Deactivate();
572     }
573     return handle;
574 }
575 
Create(const RenderHandleReference & replacedHandle,const GpuBufferDesc & desc)576 RenderHandleReference GpuResourceManager::Create(const RenderHandleReference& replacedHandle, const GpuBufferDesc& desc)
577 {
578     RenderHandleReference handle;
579 
580     const RenderHandle rawHandle = replacedHandle.GetHandle();
581 #if (RENDER_VALIDATION_ENABLED == 1)
582     const bool valid = RenderHandleUtil::IsValid(rawHandle);
583     const RenderHandleType type = RenderHandleUtil::GetHandleType(rawHandle);
584     if (valid && (type != RenderHandleType::GPU_BUFFER)) {
585         PLUGIN_LOG_E("RENDER_VALIDATION: trying to replace a non GPU buffer handle (type: %u) with GpuBufferDesc",
586             static_cast<uint32_t>(type));
587     }
588 #endif
589     GpuBufferDesc validDesc = GetValidGpuBufferDesc(desc);
590     CheckAndEnableMemoryOptimizations(gpuResourceMgrFlags_, validDesc);
591 
592     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
593         device_.Activate();
594     }
595     {
596         PerManagerStore& store = bufferStore_;
597         const auto lock = std::lock_guard(store.clientMutex);
598 
599         handle = CreateBuffer({}, rawHandle, validDesc).handle;
600     }
601     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
602         device_.Deactivate();
603     }
604     return handle;
605 }
606 
Create(const string_view name,const GpuBufferDesc & desc,const array_view<const uint8_t> data)607 RenderHandleReference GpuResourceManager::Create(
608     const string_view name, const GpuBufferDesc& desc, const array_view<const uint8_t> data)
609 {
610     RenderHandleReference handle;
611 
612 #if (RENDER_VALIDATION_ENABLED == 1)
613     ValidateGpuBufferDesc(desc);
614 #endif
615 
616     GpuBufferDesc validDesc = GetValidGpuBufferDesc(desc);
617     CheckAndEnableMemoryOptimizations(gpuResourceMgrFlags_, validDesc);
618     const bool useStagingBuffer =
619         (validDesc.memoryPropertyFlags & CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT) ? false : true;
620 
621     auto& store = bufferStore_;
622     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
623         device_.Activate();
624     }
625     {
626         StoreAllocationData sad;
627         const auto lock = std::lock_guard(store.clientMutex);
628 
629         sad = CreateBuffer(name, {}, validDesc);
630         const uint32_t minByteSize = std::min(validDesc.byteSize, static_cast<uint32_t>(data.size_bytes()));
631 
632         auto const stagingLock = std::lock_guard(stagingMutex_);
633 
634         stagingOperations_.bufferCopies.push_back(BufferCopy { 0, 0, minByteSize });
635         const uint32_t beginIndex = static_cast<uint32_t>(stagingOperations_.bufferCopies.size()) - 1;
636         vector<uint8_t> copiedData(data.cbegin().ptr(), data.cbegin().ptr() + minByteSize);
637 
638         // add staging vector index handle to resource handle in pending allocations
639         PLUGIN_ASSERT(sad.allocationIndex < store.pendingData.allocations.size());
640         auto& allocRef = store.pendingData.allocations[sad.allocationIndex];
641         allocRef.optionalStagingVectorIndex = static_cast<uint32_t>(stagingOperations_.bufferToBuffer.size());
642         allocRef.optionalStagingCopyType = useStagingBuffer ? StagingCopyStruct::CopyType::BUFFER_TO_BUFFER
643                                                             : StagingCopyStruct::CopyType::CPU_TO_BUFFER;
644 
645         if (useStagingBuffer) {
646             const uint32_t stagingBufferByteSize =
647                 useStagingBuffer ? static_cast<uint32_t>(copiedData.size_in_bytes()) : 0u;
648             stagingOperations_.bufferToBuffer.push_back(
649                 StagingCopyStruct { StagingCopyStruct::DataType::DATA_TYPE_VECTOR, {}, sad.handle, beginIndex, 1,
650                     move(copiedData), nullptr, Format::BASE_FORMAT_UNDEFINED, stagingBufferByteSize, false });
651         } else {
652             stagingOperations_.cpuToBuffer.push_back(StagingCopyStruct { StagingCopyStruct::DataType::DATA_TYPE_VECTOR,
653                 {}, sad.handle, beginIndex, 1, move(copiedData), nullptr, Format::BASE_FORMAT_UNDEFINED, 0u, false });
654         }
655         handle = move(sad.handle);
656     }
657     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
658         device_.Deactivate();
659     }
660     return handle;
661 }
662 
Create(const GpuBufferDesc & desc)663 RenderHandleReference GpuResourceManager::Create(const GpuBufferDesc& desc)
664 {
665     RenderHandleReference handle;
666     GpuBufferDesc validDesc = GetValidGpuBufferDesc(desc);
667     CheckAndEnableMemoryOptimizations(gpuResourceMgrFlags_, validDesc);
668     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
669         device_.Activate();
670     }
671     {
672         auto& store = bufferStore_;
673         const auto lock = std::lock_guard(store.clientMutex);
674 
675         handle = CreateBuffer({}, {}, validDesc).handle;
676     }
677     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE) {
678         device_.Deactivate();
679     }
680     return handle;
681 }
682 
Create(const GpuBufferDesc & desc,const array_view<const uint8_t> data)683 RenderHandleReference GpuResourceManager::Create(const GpuBufferDesc& desc, const array_view<const uint8_t> data)
684 {
685     // this is a fwd-method, desc is validated inside the called method
686     return Create({}, desc, data);
687 }
688 
689 // needs to be locked when called
CreateImage(const string_view name,const RenderHandle & replacedHandle,const GpuImageDesc & desc)690 GpuResourceManager::StoreAllocationData GpuResourceManager::CreateImage(
691     const string_view name, const RenderHandle& replacedHandle, const GpuImageDesc& desc)
692 {
693 #if (RENDER_VALIDATION_ENABLED == 1)
694     ValidateGpuImageDesc(desc, name);
695 #endif
696 
697     PerManagerStore& store = imageStore_;
698     const StoreAllocationInfo info {
699         ResourceDescriptor { GpuImageDesc {
700             desc.imageType,
701             desc.imageViewType,
702             device_.GetFormatOrFallback(desc.format),
703             (desc.imageTiling > ImageTiling::CORE_IMAGE_TILING_LINEAR) ? ImageTiling::CORE_IMAGE_TILING_OPTIMAL
704                                                                        : desc.imageTiling,
705             (desc.usageFlags & GpuResourceDescFlagValidation::ALL_GPU_IMAGE_USAGE_FLAGS) | defaultImageUsageFlags_,
706             ((desc.memoryPropertyFlags != 0) ? desc.memoryPropertyFlags
707                                              : MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &
708                 GpuResourceDescFlagValidation::ALL_MEMORY_PROPERTY_FLAGS,
709             desc.createFlags & GpuResourceDescFlagValidation::ALL_IMAGE_CREATE_FLAGS,
710             desc.engineCreationFlags,
711             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.width)),
712             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.height)),
713             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.depth)),
714             Math::max(1u,
715                 Math::min(desc.mipCount,
716                     static_cast<uint32_t>(std::log2f(static_cast<float>(Math::max(desc.width, desc.height)))) + 1u)),
717             Math::max(1u, desc.layerCount),
718             Math::max(1u, desc.sampleCountFlags),
719             desc.componentMapping,
720         } },
721         name, replacedHandle, RenderHandleType::GPU_IMAGE, ~0u, 0u
722     };
723     if (info.descriptor.imageDescriptor.format == Format::BASE_FORMAT_UNDEFINED) {
724         PLUGIN_LOG_E("Undefined image BASE_FORMAT_UNDEFINED (input format %u)", static_cast<uint32_t>(desc.format));
725         return {};
726     }
727 
728     return StoreAllocation(store, info);
729 }
730 
Create(const RenderHandleReference & replacedHandle,const GpuImageDesc & desc)731 RenderHandleReference GpuResourceManager::Create(const RenderHandleReference& replacedHandle, const GpuImageDesc& desc)
732 {
733     const RenderHandle rawHandle = replacedHandle.GetHandle();
734 #if (RENDER_VALIDATION_ENABLED == 1)
735     const bool valid = RenderHandleUtil::IsValid(rawHandle);
736     const RenderHandleType type = RenderHandleUtil::GetHandleType(rawHandle);
737     if (valid && (type != RenderHandleType::GPU_IMAGE)) {
738         PLUGIN_LOG_E(
739             "RENDER_VALIDATION: trying to replace a non GPU image handle (type: %u) with GpuImageDesc",
740             static_cast<uint32_t>(type));
741     }
742 #endif
743     PerManagerStore& store = imageStore_;
744     const auto lock = std::lock_guard(store.clientMutex);
745 
746     return CreateImage({}, rawHandle, desc).handle;
747 }
748 
Create(const string_view name,const GpuImageDesc & desc)749 RenderHandleReference GpuResourceManager::Create(const string_view name, const GpuImageDesc& desc)
750 {
751     PerManagerStore& store = imageStore_;
752     const auto lock = std::lock_guard(store.clientMutex);
753 
754     return CreateImage(name, {}, desc).handle;
755 }
756 
Create(const GpuImageDesc & desc)757 RenderHandleReference GpuResourceManager::Create(const GpuImageDesc& desc)
758 {
759     PerManagerStore& store = imageStore_;
760     const auto lock = std::lock_guard(store.clientMutex);
761 
762     return CreateImage({}, {}, desc).handle;
763 }
764 
RemapGpuImageHandle(const RenderHandle & clientHandle,const RenderHandle & clientHandleGpuResource)765 void GpuResourceManager::RemapGpuImageHandle(
766     const RenderHandle& clientHandle, const RenderHandle& clientHandleGpuResource)
767 {
768     bool validClientHandles = (RenderHandleUtil::GetHandleType(clientHandle) == RenderHandleType::GPU_IMAGE) ||
769                               (RenderHandleUtil::GetHandleType(clientHandleGpuResource) == RenderHandleType::GPU_IMAGE);
770     if (validClientHandles) {
771         PerManagerStore& store = imageStore_;
772         auto const lock = std::lock_guard(store.clientMutex);
773 
774         const uint32_t clientArrayIndex = RenderHandleUtil::GetIndexPart(clientHandle);
775         const uint32_t clientResourceArrayIndex = RenderHandleUtil::GetIndexPart(clientHandleGpuResource);
776         validClientHandles =
777             validClientHandles && ((clientArrayIndex < static_cast<uint32_t>(store.clientHandles.size())) &&
778                                       (clientResourceArrayIndex < static_cast<uint32_t>(store.clientHandles.size())));
779         if (validClientHandles) {
780             store.descriptions[clientArrayIndex] = store.descriptions[clientResourceArrayIndex];
781             store.pendingData.remaps.push_back(RemapDescription { clientHandle, clientHandleGpuResource });
782         }
783     }
784 
785     if (!validClientHandles) {
786         PLUGIN_LOG_E("invalid client handles given to RemapGpuImageHandle()");
787     }
788 }
789 
Create(const string_view name,const GpuImageDesc & desc,IImageContainer::Ptr image)790 RenderHandleReference GpuResourceManager::Create(
791     const string_view name, const GpuImageDesc& desc, IImageContainer::Ptr image)
792 {
793     StoreAllocationData sad;
794     if (image) {
795         PerManagerStore& store = imageStore_;
796         auto const lockImg = std::lock_guard(store.clientMutex);
797 
798         sad = CreateImage(name, {}, desc);
799         if (IsGpuImage(sad.handle)) {
800             auto const lockStag = std::lock_guard(stagingMutex_);
801 
802             const auto& copies = image->GetBufferImageCopies();
803             const bool scaleImage = GetScalingImageNeed(desc, copies);
804 
805             Format format = Format::BASE_FORMAT_UNDEFINED;
806             if (scaleImage) { // needs to be locked
807                 UpdateStagingScaling(desc.format, copies, stagingOperations_.scalingImageData);
808                 format = desc.format;
809             }
810             for (const auto& copyRef : copies) {
811                 stagingOperations_.bufferImageCopies.push_back(ConvertCoreBufferImageCopy(copyRef));
812             }
813 
814             // add staging handle to resource handle in pending allocations
815             PLUGIN_ASSERT(sad.allocationIndex < store.pendingData.allocations.size());
816             auto& allocRef = store.pendingData.allocations[sad.allocationIndex];
817             allocRef.optionalStagingVectorIndex = static_cast<uint32_t>(stagingOperations_.bufferToImage.size());
818             allocRef.optionalStagingCopyType = StagingCopyStruct::CopyType::BUFFER_TO_IMAGE;
819 
820             const uint32_t stagingBufferByteSize = static_cast<uint32_t>(image->GetData().size_bytes());
821             const uint32_t count = static_cast<uint32_t>(copies.size());
822             const uint32_t beginIndex = static_cast<uint32_t>(stagingOperations_.bufferImageCopies.size()) - count;
823             stagingOperations_.bufferToImage.push_back(
824                 StagingCopyStruct { StagingCopyStruct::DataType::DATA_TYPE_IMAGE_CONTAINER, {}, sad.handle, beginIndex,
825                     count, {}, move(image), format, stagingBufferByteSize, false });
826         }
827     } else {
828         PLUGIN_LOG_E("invalid image pointer to Create GPU image");
829     }
830     return sad.handle;
831 }
832 
Create(const string_view name,const GpuImageDesc & desc,const array_view<const uint8_t> data,const array_view<const BufferImageCopy> bufferImageCopies)833 RenderHandleReference GpuResourceManager::Create(const string_view name, const GpuImageDesc& desc,
834     const array_view<const uint8_t> data, const array_view<const BufferImageCopy> bufferImageCopies)
835 {
836     StoreAllocationData sad;
837     {
838         PerManagerStore& store = imageStore_;
839         auto const lockImg = std::lock_guard(store.clientMutex);
840 
841         sad = CreateImage(name, {}, desc);
842         if (IsGpuImage(sad.handle)) {
843             auto const lockStag = std::lock_guard(stagingMutex_);
844 
845             Format format = Format::BASE_FORMAT_UNDEFINED;
846             if (GetScalingImageNeed(desc, bufferImageCopies)) { // needs to be locked
847                 UpdateStagingScaling(desc.format, bufferImageCopies, stagingOperations_.scalingImageData);
848                 format = desc.format;
849             }
850             for (const auto& copyRef : bufferImageCopies) {
851 #if (RENDER_VALIDATION_ENABLED == 1)
852                 ValidateGpuImageCopy(desc, copyRef, name);
853 #endif
854                 stagingOperations_.bufferImageCopies.push_back(copyRef);
855             }
856             // add staging vector index to resource alloc in pending allocations
857             PLUGIN_ASSERT(sad.allocationIndex < store.pendingData.allocations.size());
858             auto& allocRef = store.pendingData.allocations[sad.allocationIndex];
859             allocRef.optionalStagingVectorIndex = static_cast<uint32_t>(stagingOperations_.bufferToImage.size());
860             allocRef.optionalStagingCopyType = StagingCopyStruct::CopyType::BUFFER_TO_IMAGE;
861 
862             const uint32_t stagingBufferByteSize = static_cast<uint32_t>(data.size_bytes());
863             const uint32_t count = static_cast<uint32_t>(bufferImageCopies.size());
864             const uint32_t beginIndex = static_cast<uint32_t>(stagingOperations_.bufferImageCopies.size()) - count;
865 
866             vector<uint8_t> copiedData(data.cbegin().ptr(), data.cend().ptr());
867             stagingOperations_.bufferToImage.push_back(
868                 StagingCopyStruct { StagingCopyStruct::DataType::DATA_TYPE_VECTOR, {}, sad.handle, beginIndex, count,
869                     move(copiedData), nullptr, format, stagingBufferByteSize, false });
870         }
871     }
872     return sad.handle;
873 }
874 
Create(const string_view name,const GpuImageDesc & desc,const array_view<const uint8_t> data)875 RenderHandleReference GpuResourceManager::Create(
876     const string_view name, const GpuImageDesc& desc, const array_view<const uint8_t> data)
877 {
878     BufferImageCopy bufferImageCopy {
879         0,
880         desc.width,
881         desc.height,
882         { ImageAspectFlagBits::CORE_IMAGE_ASPECT_COLOR_BIT, 0, 0, desc.layerCount },
883         { 0, 0, 0 },
884         { desc.width, desc.height, desc.depth },
885     };
886 
887     const array_view<const BufferImageCopy> av(&bufferImageCopy, 1);
888     return Create(name, desc, data, av);
889 }
890 
CreateView(const string_view name,const GpuImageDesc & desc,const GpuImagePlatformData & gpuImagePlatformData)891 RenderHandleReference GpuResourceManager::CreateView(
892     const string_view name, const GpuImageDesc& desc, const GpuImagePlatformData& gpuImagePlatformData)
893 {
894     device_.Activate();
895     PerManagerStore& store = imageStore_;
896     const auto lock = std::lock_guard(store.clientMutex);
897 
898     // replace immediate created if still pending (i.e. not usable on the GPU)
899     const uint32_t emplaceResourceIndex = static_cast<uint32_t>(store.pendingData.images.size());
900     const uint32_t optionalResourceIndex =
901         Math::min(emplaceResourceIndex, GetPendingOptionalResourceIndex(store, {}, name));
902 
903     if (unique_ptr<GpuImage> gpuImage = [this](const GpuImageDesc& desc,
904         const GpuImagePlatformData& gpuImagePlatformData) {
905             // protect GPU memory allocations
906             auto lock = std::lock_guard(allocationMutex_);
907             return device_.CreateGpuImageView(desc, gpuImagePlatformData);
908         } (desc, gpuImagePlatformData)) {
909         // safety checks
910         if ((optionalResourceIndex < emplaceResourceIndex) &&
911             (optionalResourceIndex < store.pendingData.images.size())) {
912             store.pendingData.images[optionalResourceIndex] = move(gpuImage);
913         } else {
914             store.pendingData.images.push_back(move(gpuImage));
915         }
916     }
917     device_.Deactivate();
918 
919     return StoreAllocation(
920         store, { ResourceDescriptor { desc }, name, {}, RenderHandleType::GPU_IMAGE, optionalResourceIndex, 0u })
921         .handle;
922 }
923 
CreateView(const string_view name,const GpuImageDesc & desc,const BackendSpecificImageDesc & backendSpecificData)924 RenderHandleReference GpuResourceManager::CreateView(
925     const string_view name, const GpuImageDesc& desc, const BackendSpecificImageDesc& backendSpecificData)
926 {
927     device_.Activate();
928     PerManagerStore& store = imageStore_;
929     const auto lock = std::lock_guard(store.clientMutex);
930 
931     // replace immediate created if still pending (i.e. not usable on the GPU)
932     const uint32_t emplaceResourceIndex = static_cast<uint32_t>(store.pendingData.images.size());
933     const uint32_t optionalResourceIndex =
934         Math::min(emplaceResourceIndex, GetPendingOptionalResourceIndex(store, {}, name));
935 
936     // additional handle flags provide information if platform conversion is needed
937     uint32_t additionalHandleFlags = 0u;
938 
939     if (unique_ptr<GpuImage> gpuImage =
940         [this](const GpuImageDesc& desc, const BackendSpecificImageDesc& backendSpecificData) {
941             // protect GPU memory allocations
942             auto lock = std::lock_guard(allocationMutex_);
943             return device_.CreateGpuImageView(desc, backendSpecificData);
944         } (desc, backendSpecificData)) {
945         const auto additionalImageFlags = gpuImage->GetAdditionalFlags();
946         additionalHandleFlags =
947             (additionalImageFlags & GpuImage::AdditionalFlagBits::ADDITIONAL_PLATFORM_CONVERSION_BIT)
948                 ? CORE_RESOURCE_HANDLE_PLATFORM_CONVERSION
949                 : 0u;
950         // safety checks
951         if ((optionalResourceIndex < emplaceResourceIndex) &&
952             (optionalResourceIndex < store.pendingData.images.size())) {
953             store.pendingData.images[optionalResourceIndex] = move(gpuImage);
954         } else {
955             store.pendingData.images.push_back(move(gpuImage));
956         }
957     }
958     device_.Deactivate();
959 
960     const auto& images = store.pendingData.images;
961     const auto& finalDesc = (optionalResourceIndex < images.size() && images[optionalResourceIndex])
962                                 ? images[optionalResourceIndex]->GetDesc()
963                                 : desc;
964     auto handle = StoreAllocation(store, { ResourceDescriptor { finalDesc }, name, {}, RenderHandleType::GPU_IMAGE,
965                                              optionalResourceIndex, additionalHandleFlags })
966                       .handle;
967 #if (RENDER_VALIDATION_ENABLED == 1)
968     if ((additionalHandleFlags & CORE_RESOURCE_HANDLE_PLATFORM_CONVERSION) &&
969         !RenderHandleUtil::IsPlatformConversionResource(handle.GetHandle())) {
970         PLUGIN_LOG_ONCE_W("core_validation_create_view_plat_conversion",
971             "RENDER_VALIDATION: platform conversion needing resource cannot replace existing resource handle (name: "
972             "%s)",
973             name.data());
974     }
975 #endif
976     return handle;
977 }
978 
Create(const GpuImageDesc & desc,const array_view<const uint8_t> data,const array_view<const BufferImageCopy> bufferImageCopies)979 RenderHandleReference GpuResourceManager::Create(const GpuImageDesc& desc, const array_view<const uint8_t> data,
980     const array_view<const BufferImageCopy> bufferImageCopies)
981 {
982     return Create({}, desc, data, bufferImageCopies);
983 }
984 
Create(const GpuImageDesc & desc,const array_view<const uint8_t> data)985 RenderHandleReference GpuResourceManager::Create(const GpuImageDesc& desc, const array_view<const uint8_t> data)
986 {
987     return Create({}, desc, data);
988 }
989 
Create(const GpuImageDesc & desc,IImageContainer::Ptr image)990 RenderHandleReference GpuResourceManager::Create(const GpuImageDesc& desc, IImageContainer::Ptr image)
991 {
992     return Create({}, desc, move(image));
993 }
994 
Create(const string_view name,const GpuSamplerDesc & desc)995 RenderHandleReference GpuResourceManager::Create(const string_view name, const GpuSamplerDesc& desc)
996 {
997     PerManagerStore& store = samplerStore_;
998     const auto lock = std::lock_guard(store.clientMutex);
999 
1000     return StoreAllocation(store, { ResourceDescriptor { desc }, name, {}, RenderHandleType::GPU_SAMPLER, ~0u, 0u })
1001         .handle;
1002 }
1003 
Create(const RenderHandleReference & replacedHandle,const GpuSamplerDesc & desc)1004 RenderHandleReference GpuResourceManager::Create(
1005     const RenderHandleReference& replacedHandle, const GpuSamplerDesc& desc)
1006 {
1007     const RenderHandle rawHandle = replacedHandle.GetHandle();
1008 #if (RENDER_VALIDATION_ENABLED == 1)
1009     const bool valid = RenderHandleUtil::IsValid(rawHandle);
1010     const RenderHandleType type = RenderHandleUtil::GetHandleType(rawHandle);
1011     if (valid && (type != RenderHandleType::GPU_SAMPLER)) {
1012         PLUGIN_LOG_E("RENDER_VALIDATION: trying to replace a non GPU sampler handle (type: %u) with GpuSamplerDesc",
1013             static_cast<uint32_t>(type));
1014     }
1015 #endif
1016     PerManagerStore& store = samplerStore_;
1017     const auto lock = std::lock_guard(store.clientMutex);
1018 
1019     return StoreAllocation(
1020         store, { ResourceDescriptor { desc }, {}, rawHandle, RenderHandleType::GPU_SAMPLER, ~0u, 0u })
1021         .handle;
1022 }
1023 
Create(const GpuSamplerDesc & desc)1024 RenderHandleReference GpuResourceManager::Create(const GpuSamplerDesc& desc)
1025 {
1026     return Create("", desc);
1027 }
1028 
CreateAccelerationStructure(const BASE_NS::string_view name,const RenderHandle & replacedHandle,const GpuAccelerationStructureDesc & desc)1029 GpuResourceManager::StoreAllocationData GpuResourceManager::CreateAccelerationStructure(
1030     const BASE_NS::string_view name, const RenderHandle& replacedHandle, const GpuAccelerationStructureDesc& desc)
1031 {
1032     PerManagerStore& store = bufferStore_;
1033 #if (RENDER_VALIDATION_ENABLED == 1)
1034     ValidateGpuAccelerationStructureDesc(desc);
1035 #endif
1036     GpuAccelerationStructureDesc validatedDesc = desc;
1037     validatedDesc.bufferDesc.usageFlags |= defaultBufferUsageFlags_;
1038     validatedDesc.bufferDesc.byteSize = Math::max(validatedDesc.bufferDesc.byteSize, 1u),
1039     validatedDesc.bufferDesc.usageFlags |= CORE_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT;
1040 
1041     constexpr auto additionalBufferFlags = CORE_RESOURCE_HANDLE_ACCELERATION_STRUCTURE;
1042     return StoreAllocation(store, { ResourceDescriptor { validatedDesc }, name, replacedHandle,
1043                                       RenderHandleType::GPU_BUFFER, ~0u, additionalBufferFlags });
1044 }
1045 
Create(const GpuAccelerationStructureDesc & desc)1046 RenderHandleReference GpuResourceManager::Create(const GpuAccelerationStructureDesc& desc)
1047 {
1048     PerManagerStore& store = bufferStore_;
1049     const auto lock = std::lock_guard(store.clientMutex);
1050 
1051     return CreateAccelerationStructure("", {}, desc).handle;
1052 }
1053 
Create(const string_view name,const GpuAccelerationStructureDesc & desc)1054 RenderHandleReference GpuResourceManager::Create(const string_view name, const GpuAccelerationStructureDesc& desc)
1055 {
1056     PerManagerStore& store = bufferStore_;
1057     const auto lock = std::lock_guard(store.clientMutex);
1058 
1059     return CreateAccelerationStructure(name, {}, desc).handle;
1060 }
1061 
Create(const RenderHandleReference & replacedHandle,const GpuAccelerationStructureDesc & desc)1062 RenderHandleReference GpuResourceManager::Create(
1063     const RenderHandleReference& replacedHandle, const GpuAccelerationStructureDesc& desc)
1064 {
1065     const RenderHandle rawHandle = replacedHandle.GetHandle();
1066 #if (RENDER_VALIDATION_ENABLED == 1)
1067     const bool valid = RenderHandleUtil::IsValid(rawHandle);
1068     const RenderHandleType type = RenderHandleUtil::GetHandleType(rawHandle);
1069     if (valid &&
1070         ((type != RenderHandleType::GPU_BUFFER) || (!RenderHandleUtil::IsGpuAccelerationStructure(rawHandle)))) {
1071         PLUGIN_LOG_E("RENDER_VALIDATION: trying to replace a non GPU acceleration structure handle (type: %u) with "
1072                      "GpuAccelerationStructureDesc", static_cast<uint32_t>(type));
1073     }
1074 #endif
1075     PerManagerStore& store = bufferStore_;
1076     const auto lock = std::lock_guard(store.clientMutex);
1077 
1078     return CreateAccelerationStructure("", rawHandle, desc).handle;
1079 }
1080 
CreateSwapchainImage(const RenderHandleReference & replacedHandle,const BASE_NS::string_view name,const GpuImageDesc & desc)1081 RenderHandleReference GpuResourceManager::CreateSwapchainImage(
1082     const RenderHandleReference& replacedHandle, const BASE_NS::string_view name, const GpuImageDesc& desc)
1083 {
1084 #if (RENDER_VALIDATION_ENABLED == 1)
1085     ValidateGpuImageDesc(desc, "");
1086 #endif
1087 
1088     PerManagerStore& store = imageStore_;
1089     const auto lock = std::lock_guard(store.clientMutex);
1090 
1091     const uint32_t addFlags = RenderHandleInfoFlagBits::CORE_RESOURCE_HANDLE_SWAPCHAIN_RESOURCE;
1092     // NOTE: no mips for swapchains
1093     // TODO: NOTE: allocation type is undefined
1094     const StoreAllocationInfo info {
1095         ResourceDescriptor { GpuImageDesc {
1096             desc.imageType,
1097             desc.imageViewType,
1098             device_.GetFormatOrFallback(desc.format),
1099             (desc.imageTiling > ImageTiling::CORE_IMAGE_TILING_LINEAR) ? ImageTiling::CORE_IMAGE_TILING_OPTIMAL
1100                                                                        : desc.imageTiling,
1101             (desc.usageFlags & GpuResourceDescFlagValidation::ALL_GPU_IMAGE_USAGE_FLAGS) | defaultImageUsageFlags_,
1102             ((desc.memoryPropertyFlags != 0) ? desc.memoryPropertyFlags
1103                                              : MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &
1104                 GpuResourceDescFlagValidation::ALL_MEMORY_PROPERTY_FLAGS,
1105             desc.createFlags & GpuResourceDescFlagValidation::ALL_IMAGE_CREATE_FLAGS,
1106             desc.engineCreationFlags,
1107             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.width)),
1108             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.height)),
1109             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.depth)),
1110             1u, // hard-coded mip count
1111             Math::max(1u, desc.layerCount),
1112             Math::max(1u, desc.sampleCountFlags),
1113             desc.componentMapping,
1114         } },
1115         name, replacedHandle.GetHandle(), RenderHandleType::GPU_IMAGE, ~0u, addFlags, AllocType::UNDEFINED
1116     };
1117     if (info.descriptor.imageDescriptor.format == Format::BASE_FORMAT_UNDEFINED) {
1118         PLUGIN_LOG_E("Undefined image BASE_FORMAT_UNDEFINED (input format %u)", static_cast<uint32_t>(desc.format));
1119         return {};
1120     }
1121 
1122     return StoreAllocation(store, info).handle;
1123 }
1124 
CreateShallowHandle(const GpuImageDesc & desc)1125 RenderHandleReference GpuResourceManager::CreateShallowHandle(const GpuImageDesc& desc)
1126 {
1127     PerManagerStore& store = imageStore_;
1128     const auto lock = std::lock_guard(store.clientMutex);
1129 
1130 #if (RENDER_VALIDATION_ENABLED == 1)
1131     ValidateGpuImageDesc(desc, "");
1132 #endif
1133 
1134     const uint32_t addFlags = RenderHandleInfoFlagBits::CORE_RESOURCE_HANDLE_SHALLOW_RESOURCE;
1135     const StoreAllocationInfo info {
1136         ResourceDescriptor { GpuImageDesc {
1137             desc.imageType,
1138             desc.imageViewType,
1139             device_.GetFormatOrFallback(desc.format),
1140             (desc.imageTiling > ImageTiling::CORE_IMAGE_TILING_LINEAR) ? ImageTiling::CORE_IMAGE_TILING_OPTIMAL
1141                                                                        : desc.imageTiling,
1142             (desc.usageFlags & GpuResourceDescFlagValidation::ALL_GPU_IMAGE_USAGE_FLAGS) | defaultImageUsageFlags_,
1143             ((desc.memoryPropertyFlags != 0) ? desc.memoryPropertyFlags
1144                                              : MemoryPropertyFlagBits::CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &
1145                 GpuResourceDescFlagValidation::ALL_MEMORY_PROPERTY_FLAGS,
1146             desc.createFlags & GpuResourceDescFlagValidation::ALL_IMAGE_CREATE_FLAGS,
1147             desc.engineCreationFlags,
1148             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.width)),
1149             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.height)),
1150             Math::min(MAX_IMAGE_EXTENT, Math::max(1u, desc.depth)),
1151             Math::max(1u,
1152                 Math::min(desc.mipCount,
1153                     static_cast<uint32_t>(std::log2f(static_cast<float>(Math::max(desc.width, desc.height)))) + 1u)),
1154             Math::max(1u, desc.layerCount),
1155             Math::max(1u, desc.sampleCountFlags),
1156             desc.componentMapping,
1157         } },
1158         "", {}, RenderHandleType::GPU_IMAGE, ~0u, addFlags, AllocType::UNDEFINED
1159     };
1160     if (info.descriptor.imageDescriptor.format == Format::BASE_FORMAT_UNDEFINED) {
1161         PLUGIN_LOG_E("Undefined image BASE_FORMAT_UNDEFINED (input format %u)", static_cast<uint32_t>(desc.format));
1162         return {};
1163     }
1164 
1165     return StoreAllocation(store, info).handle;
1166 }
1167 
1168 // has staging lock and possible gpu buffer lock inside
RemoveStagingOperations(const OperationDescription & destroyAlloc)1169 void GpuResourceManager::RemoveStagingOperations(const OperationDescription& destroyAlloc)
1170 {
1171     // remove possible stagings
1172     const RenderHandleType handleType = RenderHandleUtil::GetHandleType(destroyAlloc.handle);
1173     if (((handleType == RenderHandleType::GPU_BUFFER) || (handleType == RenderHandleType::GPU_IMAGE)) &&
1174         (destroyAlloc.optionalStagingCopyType != StagingCopyStruct::CopyType::UNDEFINED)) {
1175         auto const lockStaging = std::lock_guard(stagingMutex_);
1176 
1177         auto invalidateStagingCopy = [](const OperationDescription& alloc, vector<StagingCopyStruct>& vecRef) {
1178             PLUGIN_ASSERT(alloc.optionalStagingVectorIndex < vecRef.size());
1179             vecRef[alloc.optionalStagingVectorIndex].srcHandle = {};
1180             vecRef[alloc.optionalStagingVectorIndex].dstHandle = {};
1181             vecRef[alloc.optionalStagingVectorIndex].invalidOperation = true;
1182         };
1183 
1184         if (destroyAlloc.optionalStagingCopyType == StagingCopyStruct::CopyType::BUFFER_TO_BUFFER) {
1185             invalidateStagingCopy(destroyAlloc, stagingOperations_.bufferToBuffer);
1186         } else if (destroyAlloc.optionalStagingCopyType == StagingCopyStruct::CopyType::BUFFER_TO_IMAGE) {
1187             invalidateStagingCopy(destroyAlloc, stagingOperations_.bufferToImage);
1188         } else if (destroyAlloc.optionalStagingCopyType == StagingCopyStruct::CopyType::IMAGE_TO_BUFFER) {
1189             invalidateStagingCopy(destroyAlloc, stagingOperations_.imageToBuffer);
1190         } else if (destroyAlloc.optionalStagingCopyType == StagingCopyStruct::CopyType::IMAGE_TO_IMAGE) {
1191             invalidateStagingCopy(destroyAlloc, stagingOperations_.imageToImage);
1192         } else if (destroyAlloc.optionalStagingCopyType == StagingCopyStruct::CopyType::CPU_TO_BUFFER) {
1193             invalidateStagingCopy(destroyAlloc, stagingOperations_.cpuToBuffer);
1194         } else {
1195             PLUGIN_ASSERT(false);
1196         }
1197     }
1198 
1199     // NOTE: we do not clean-up/invalidate copy operations stagingOperations_.cpuToBuffer ATM
1200     // it is user's responsibility do not use handle that you've destroyed
1201 }
1202 
1203 // needs to be locked from outside
1204 // staging cannot be locked when called
Destroy(PerManagerStore & store,const RenderHandle & handle)1205 void GpuResourceManager::Destroy(PerManagerStore& store, const RenderHandle& handle)
1206 {
1207 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
1208     PLUGIN_LOG_E("gpu resource deallocation %" PRIx64, handle.id);
1209 #endif
1210 
1211     const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1212     if (arrayIndex < store.clientHandles.size()) {
1213         if (!(store.clientHandles[arrayIndex])) {
1214             return; // early out if re-destroying the same handle
1215         }
1216 #if (RENDER_VALIDATION_ENABLED == 1)
1217         const uint32_t currGeneration =
1218             RenderHandleUtil::GetGenerationIndexPart(store.clientHandles[arrayIndex].GetHandle());
1219         const uint32_t destroyHandleGeneration = RenderHandleUtil::GetGenerationIndexPart(handle);
1220         if (currGeneration != destroyHandleGeneration) {
1221             PLUGIN_LOG_W("RENDER_VALIDATION: destroy handle is not the current generation (destroy:%u != current:%u)",
1222                 currGeneration, destroyHandleGeneration);
1223         }
1224 #endif
1225         const uint32_t hasNameId = RenderHandleUtil::GetHasNamePart(handle);
1226         if (hasNameId != 0) {
1227             // remove name if present
1228             if (auto const pos = std::find_if(store.nameToClientIndex.begin(), store.nameToClientIndex.end(),
1229                     [arrayIndex](auto const& nameToHandle) { return nameToHandle.second == arrayIndex; });
1230                 pos != store.nameToClientIndex.end()) {
1231                 store.nameToClientIndex.erase(pos);
1232             }
1233         }
1234 
1235         // we do not set default values to GpuXDesc (we leave the old data, it won't be used)
1236         // invalidate handle, noticed when trying to re-destroy (early-out in the beginning of the if)
1237         store.clientHandles[arrayIndex] = {};
1238 
1239         // if the handle is already found and it's an alloc we do not want to allocate and then deallocate
1240         if (const uint32_t pendingArrIndex = store.additionalData[arrayIndex].indexToPendingData;
1241             pendingArrIndex != INVALID_PENDING_INDEX) {
1242             // NOTE: check valid assert here (if called before resources are created)
1243             if (pendingArrIndex < store.pendingData.allocations.size()) {
1244                 auto& ref = store.pendingData.allocations[pendingArrIndex];
1245                 if (ref.allocType == AllocType::ALLOC) {
1246                     ref.allocType = AllocType::REMOVED;
1247                     RemoveStagingOperations(ref);
1248                 }
1249             }
1250         } else {
1251             PLUGIN_ASSERT(store.additionalData[arrayIndex].indexToPendingData == INVALID_PENDING_INDEX);
1252             store.additionalData[arrayIndex] = { 0, static_cast<uint32_t>(store.pendingData.allocations.size()) };
1253             store.pendingData.allocations.emplace_back(
1254                 handle, ResourceDescriptor { GpuBufferDesc {} }, AllocType::DEALLOC, static_cast<uint32_t>(~0u));
1255             // there cannot be staging operations because pendingData was not found
1256             // all other operations to destroyable handle are user's responsibility
1257         }
1258     }
1259 }
1260 
1261 // needs to be locked when called
DestroyImmediate(PerManagerStore & store,const RenderHandle & handle)1262 void GpuResourceManager::DestroyImmediate(PerManagerStore& store, const RenderHandle& handle)
1263 {
1264     if (RenderHandleUtil::IsValid(handle)) { // found, Destroy immediate
1265         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1266         if (arrayIndex < static_cast<uint32_t>(store.gpuHandles.size())) {
1267             store.mgr->DestroyImmediate(arrayIndex);
1268             store.clientHandles[arrayIndex] = {};
1269             store.additionalData[arrayIndex] = {};
1270             store.gpuHandles[arrayIndex] = InvalidateWithGeneration(store.gpuHandles[arrayIndex]);
1271         }
1272     }
1273 }
1274 
Destroy(const RenderHandle & handle)1275 void GpuResourceManager::Destroy(const RenderHandle& handle)
1276 {
1277     if (RenderHandleUtil::IsValid(handle)) {
1278         const RenderHandleType handleType = RenderHandleUtil::GetHandleType(handle);
1279         if (handleType == RenderHandleType::GPU_BUFFER) {
1280             const auto clientLock = std::lock_guard(bufferStore_.clientMutex);
1281             Destroy(bufferStore_, handle);
1282         } else if (handleType == RenderHandleType::GPU_IMAGE) {
1283             const auto clientLock = std::lock_guard(imageStore_.clientMutex);
1284             Destroy(imageStore_, handle);
1285         } else if (handleType == RenderHandleType::GPU_SAMPLER) {
1286             const auto clientLock = std::lock_guard(samplerStore_.clientMutex);
1287             Destroy(samplerStore_, handle);
1288         } else {
1289             PLUGIN_LOG_I("invalid gpu resource handle : %" PRIu64, handle.id);
1290         }
1291     }
1292 }
1293 
GetHandle(const PerManagerStore & store,const string_view name) const1294 RenderHandleReference GpuResourceManager::GetHandle(const PerManagerStore& store, const string_view name) const
1295 {
1296     if (name.empty()) { // early out before lock
1297         return RenderHandleReference {};
1298     }
1299 
1300     auto const clientLock = std::shared_lock(store.clientMutex);
1301     if (auto const pos = store.nameToClientIndex.find(name); pos != store.nameToClientIndex.end()) {
1302         PLUGIN_ASSERT(pos->second < static_cast<uint32_t>(store.clientHandles.size()));
1303         return store.clientHandles[pos->second];
1304     }
1305     // NOTE: This is used in some places to check if the GPU resource is found
1306     // therefore no error prints here
1307     return RenderHandleReference {};
1308 }
1309 
GetRawHandle(const PerManagerStore & store,const string_view name) const1310 RenderHandle GpuResourceManager::GetRawHandle(const PerManagerStore& store, const string_view name) const
1311 {
1312     if (name.empty()) { // early out before lock
1313         return RenderHandle {};
1314     }
1315 
1316     auto const clientLock = std::shared_lock(store.clientMutex);
1317     if (auto const pos = store.nameToClientIndex.find(name); pos != store.nameToClientIndex.end()) {
1318         PLUGIN_ASSERT(pos->second < static_cast<uint32_t>(store.clientHandles.size()));
1319         return store.clientHandles[pos->second].GetHandle();
1320     }
1321     // NOTE: This is used in some places to check if the GPU resource is found
1322     // therefore no error prints here
1323     return RenderHandle {};
1324 }
1325 
GetBufferHandle(const string_view name) const1326 RenderHandleReference GpuResourceManager::GetBufferHandle(const string_view name) const
1327 {
1328     return GetHandle(bufferStore_, name);
1329 }
1330 
GetImageHandle(const string_view name) const1331 RenderHandleReference GpuResourceManager::GetImageHandle(const string_view name) const
1332 {
1333     return GetHandle(imageStore_, name);
1334 }
1335 
GetSamplerHandle(const string_view name) const1336 RenderHandleReference GpuResourceManager::GetSamplerHandle(const string_view name) const
1337 {
1338     return GetHandle(samplerStore_, name);
1339 }
1340 
GetBufferRawHandle(const string_view name) const1341 RenderHandle GpuResourceManager::GetBufferRawHandle(const string_view name) const
1342 {
1343     return GetRawHandle(bufferStore_, name);
1344 }
1345 
GetImageRawHandle(const string_view name) const1346 RenderHandle GpuResourceManager::GetImageRawHandle(const string_view name) const
1347 {
1348     return GetRawHandle(imageStore_, name);
1349 }
1350 
GetSamplerRawHandle(const string_view name) const1351 RenderHandle GpuResourceManager::GetSamplerRawHandle(const string_view name) const
1352 {
1353     return GetRawHandle(samplerStore_, name);
1354 }
1355 
GetBufferDescriptor(const RenderHandle & handle) const1356 GpuBufferDesc GpuResourceManager::GetBufferDescriptor(const RenderHandle& handle) const
1357 {
1358     if (!IsGpuBuffer(handle)) {
1359         return GpuBufferDesc {};
1360     }
1361     {
1362         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1363         const PerManagerStore& store = bufferStore_;
1364         auto const clientLock = std::shared_lock(store.clientMutex);
1365         if (arrayIndex < static_cast<uint32_t>(store.descriptions.size())) {
1366 #if (RENDER_VALIDATION_ENABLED == 1)
1367             if (RenderHandleUtil::GetGenerationIndexPart(store.clientHandles[arrayIndex].GetHandle()) !=
1368                 RenderHandleUtil::GetGenerationIndexPart(handle)) {
1369                 const auto name = GetName(handle);
1370                 PLUGIN_LOG_E(
1371                     "RENDER_VALIDATION: buffer client handle is not current generation (name:%s)", name.c_str());
1372             }
1373 #endif
1374             return store.descriptions[arrayIndex].combinedBufDescriptor.bufferDesc;
1375         }
1376     }
1377     return GpuBufferDesc {};
1378 }
1379 
GetBufferDescriptor(const RenderHandleReference & handle) const1380 GpuBufferDesc GpuResourceManager::GetBufferDescriptor(const RenderHandleReference& handle) const
1381 {
1382     return GetBufferDescriptor(handle.GetHandle());
1383 }
1384 
GetImageDescriptor(const RenderHandle & handle) const1385 GpuImageDesc GpuResourceManager::GetImageDescriptor(const RenderHandle& handle) const
1386 {
1387     if (!IsGpuImage(handle)) {
1388         return GpuImageDesc {};
1389     }
1390     {
1391         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1392         const PerManagerStore& store = imageStore_;
1393         auto const clientLock = std::shared_lock(store.clientMutex);
1394         if (arrayIndex < static_cast<uint32_t>(store.descriptions.size())) {
1395 #if (RENDER_VALIDATION_ENABLED == 1)
1396             if (RenderHandleUtil::GetGenerationIndexPart(store.clientHandles[arrayIndex].GetHandle()) !=
1397                 RenderHandleUtil::GetGenerationIndexPart(handle)) {
1398                 const auto name = GetName(handle);
1399                 PLUGIN_LOG_E(
1400                     "RENDER_VALIDATION: image client handle is not current generation (name:%s)", name.c_str());
1401             }
1402 #endif
1403             return store.descriptions[arrayIndex].imageDescriptor;
1404         }
1405     }
1406     return GpuImageDesc {};
1407 }
1408 
GetImageDescriptor(const RenderHandleReference & handle) const1409 GpuImageDesc GpuResourceManager::GetImageDescriptor(const RenderHandleReference& handle) const
1410 {
1411     return GetImageDescriptor(handle.GetHandle());
1412 }
1413 
GetSamplerDescriptor(const RenderHandle & handle) const1414 GpuSamplerDesc GpuResourceManager::GetSamplerDescriptor(const RenderHandle& handle) const
1415 {
1416     if (!IsGpuSampler(handle)) {
1417         return GpuSamplerDesc {};
1418     }
1419     {
1420         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1421         const PerManagerStore& store = samplerStore_;
1422         auto const clientLock = std::shared_lock(store.clientMutex);
1423         if (arrayIndex < static_cast<uint32_t>(store.descriptions.size())) {
1424 #if (RENDER_VALIDATION_ENABLED == 1)
1425             if (RenderHandleUtil::GetGenerationIndexPart(store.clientHandles[arrayIndex].GetHandle()) !=
1426                 RenderHandleUtil::GetGenerationIndexPart(handle)) {
1427                 const auto name = GetName(handle);
1428                 PLUGIN_LOG_E(
1429                     "RENDER_VALIDATION: sampler client handle is not current generation (name:%s)", name.c_str());
1430             }
1431 #endif
1432             return store.descriptions[arrayIndex].samplerDescriptor;
1433         }
1434     }
1435     return GpuSamplerDesc {};
1436 }
1437 
GetSamplerDescriptor(const RenderHandleReference & handle) const1438 GpuSamplerDesc GpuResourceManager::GetSamplerDescriptor(const RenderHandleReference& handle) const
1439 {
1440     return GetSamplerDescriptor(handle.GetHandle());
1441 }
1442 
GetAccelerationStructureDescriptor(const RenderHandle & handle) const1443 GpuAccelerationStructureDesc GpuResourceManager::GetAccelerationStructureDescriptor(const RenderHandle& handle) const
1444 {
1445     if (!IsGpuAccelerationStructure(handle)) {
1446         return GpuAccelerationStructureDesc {};
1447     }
1448     {
1449         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1450         const PerManagerStore& store = bufferStore_;
1451         auto const clientLock = std::shared_lock(store.clientMutex);
1452         if (arrayIndex < static_cast<uint32_t>(store.descriptions.size())) {
1453 #if (RENDER_VALIDATION_ENABLED == 1)
1454             if (RenderHandleUtil::GetGenerationIndexPart(store.clientHandles[arrayIndex].GetHandle()) !=
1455                 RenderHandleUtil::GetGenerationIndexPart(handle)) {
1456                 const auto name = GetName(handle);
1457                 PLUGIN_LOG_E(
1458                     "RENDER_VALIDATION: acceleration structure client handle is not current generation (name:%s)",
1459                     name.c_str());
1460             }
1461 #endif
1462             return store.descriptions[arrayIndex].combinedBufDescriptor;
1463         }
1464     }
1465     return GpuAccelerationStructureDesc {};
1466 }
1467 
GetAccelerationStructureDescriptor(const RenderHandleReference & handle) const1468 GpuAccelerationStructureDesc GpuResourceManager::GetAccelerationStructureDescriptor(
1469     const RenderHandleReference& handle) const
1470 {
1471     return GetAccelerationStructureDescriptor(handle.GetHandle());
1472 }
1473 
GetName(const RenderHandle & handle) const1474 string GpuResourceManager::GetName(const RenderHandle& handle) const
1475 {
1476     if (RenderHandleUtil::GetHasNamePart(handle) != 0) {
1477         const PerManagerStore* store = nullptr;
1478         const RenderHandleType handleType = RenderHandleUtil::GetHandleType(handle);
1479         if (handleType == RenderHandleType::GPU_BUFFER) {
1480             store = &bufferStore_;
1481         } else if (handleType == RenderHandleType::GPU_IMAGE) {
1482             store = &imageStore_;
1483         } else if (handleType == RenderHandleType::GPU_SAMPLER) {
1484             store = &samplerStore_;
1485         }
1486         if (store) {
1487             const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1488             for (const auto& iter : store->nameToClientIndex) {
1489                 if (arrayIndex == iter.second) {
1490                     return iter.first;
1491                 }
1492             }
1493         }
1494     }
1495     return {};
1496 }
1497 
GetName(const RenderHandleReference & handle) const1498 string GpuResourceManager::GetName(const RenderHandleReference& handle) const
1499 {
1500     return GetName(handle.GetHandle());
1501 }
1502 
GetHandles(const PerManagerStore & store) const1503 vector<RenderHandleReference> GpuResourceManager::GetHandles(const PerManagerStore& store) const
1504 {
1505     const auto lock = std::shared_lock(store.clientMutex);
1506 
1507     vector<RenderHandleReference> res;
1508     res.reserve(store.clientHandles.size());
1509     for (const auto& ref : store.clientHandles) {
1510         if (ref) {
1511             res.push_back(ref);
1512         }
1513     }
1514     return res;
1515 }
1516 
GetGpuBufferHandles() const1517 vector<RenderHandleReference> GpuResourceManager::GetGpuBufferHandles() const
1518 {
1519     return GetHandles(bufferStore_);
1520 }
1521 
GetGpuImageHandles() const1522 vector<RenderHandleReference> GpuResourceManager::GetGpuImageHandles() const
1523 {
1524     return GetHandles(imageStore_);
1525 }
1526 
GetGpuSamplerHandles() const1527 vector<RenderHandleReference> GpuResourceManager::GetGpuSamplerHandles() const
1528 {
1529     return GetHandles(samplerStore_);
1530 }
1531 
SetDefaultGpuBufferCreationFlags(const BufferUsageFlags usageFlags)1532 void GpuResourceManager::SetDefaultGpuBufferCreationFlags(const BufferUsageFlags usageFlags)
1533 {
1534     defaultBufferUsageFlags_ = usageFlags;
1535 }
1536 
SetDefaultGpuImageCreationFlags(const ImageUsageFlags usageFlags)1537 void GpuResourceManager::SetDefaultGpuImageCreationFlags(const ImageUsageFlags usageFlags)
1538 {
1539     defaultImageUsageFlags_ = usageFlags;
1540 }
1541 
CreateGpuResource(const OperationDescription & op,const uint32_t arrayIndex,const RenderHandleType resourceType,const uintptr_t preCreatedResVec)1542 void GpuResourceManager::CreateGpuResource(const OperationDescription& op, const uint32_t arrayIndex,
1543     const RenderHandleType resourceType, const uintptr_t preCreatedResVec)
1544 {
1545     if (resourceType == RenderHandleType::GPU_BUFFER) {
1546         PLUGIN_ASSERT(preCreatedResVec);
1547         if (RenderHandleUtil::IsGpuAccelerationStructure(op.handle)) {
1548             PLUGIN_ASSERT(op.optionalResourceIndex == ~0u);
1549             gpuBufferMgr_->Create<GpuAccelerationStructureDesc>(arrayIndex,
1550                 op.descriptor.combinedBufDescriptor.bufferDesc, {}, true, op.descriptor.combinedBufDescriptor);
1551         } else {
1552             if (op.optionalResourceIndex != ~0u) {
1553                 BufferVector& res = *(reinterpret_cast<BufferVector*>(reinterpret_cast<void*>(preCreatedResVec)));
1554                 gpuBufferMgr_->Create<GpuAccelerationStructureDesc>(arrayIndex,
1555                     op.descriptor.combinedBufDescriptor.bufferDesc, move(res[op.optionalResourceIndex]), false,
1556                     op.descriptor.combinedBufDescriptor);
1557             } else {
1558                 gpuBufferMgr_->Create<GpuAccelerationStructureDesc>(arrayIndex,
1559                     op.descriptor.combinedBufDescriptor.bufferDesc, {}, false, op.descriptor.combinedBufDescriptor);
1560             }
1561         }
1562     } else if (resourceType == RenderHandleType::GPU_IMAGE) {
1563         PLUGIN_ASSERT(preCreatedResVec);
1564         if (op.optionalResourceIndex != ~0u) {
1565             ImageVector& images = *(reinterpret_cast<ImageVector*>(reinterpret_cast<void*>(preCreatedResVec)));
1566             gpuImageMgr_->Create<uint32_t>(
1567                 arrayIndex, op.descriptor.imageDescriptor, move(images[op.optionalResourceIndex]), false, 0);
1568         } else {
1569             gpuImageMgr_->Create<uint32_t>(arrayIndex, op.descriptor.imageDescriptor, {}, false, 0);
1570         }
1571     } else if (resourceType == RenderHandleType::GPU_SAMPLER) {
1572         PLUGIN_ASSERT(preCreatedResVec == 0);
1573         PLUGIN_ASSERT(op.optionalResourceIndex == ~0u);
1574         gpuSamplerMgr_->Create<uint32_t>(arrayIndex, op.descriptor.samplerDescriptor, {}, false, 0);
1575     }
1576 }
1577 
1578 // needs to be locked when called, and call only for valid gpu handles
DestroyGpuResource(const OperationDescription & operation,const uint32_t arrayIndex,const RenderHandleType resourceType,PerManagerStore & store)1579 void GpuResourceManager::DestroyGpuResource(const OperationDescription& operation, const uint32_t arrayIndex,
1580     const RenderHandleType resourceType, PerManagerStore& store)
1581 {
1582     store.mgr->Destroy(arrayIndex);
1583     PLUGIN_ASSERT(arrayIndex < static_cast<uint32_t>(store.gpuHandles.size()));
1584     store.clientHandles[arrayIndex] = {};
1585     store.additionalData[arrayIndex] = {};
1586     store.gpuHandles[arrayIndex] = InvalidateWithGeneration(store.gpuHandles[arrayIndex]);
1587 }
1588 
HandlePendingRemappings(const vector<RemapDescription> & pendingShallowRemappings,vector<EngineResourceHandle> & gpuHandles)1589 void GpuResourceManager::HandlePendingRemappings(
1590     const vector<RemapDescription>& pendingShallowRemappings, vector<EngineResourceHandle>& gpuHandles)
1591 {
1592     for (auto const& shallowRemap : pendingShallowRemappings) {
1593         // find the gpu handle where the client handle wants to point to
1594         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(shallowRemap.resourceClientHandle);
1595         if (arrayIndex < gpuHandles.size()) {
1596             const EngineResourceHandle gpuHandle = gpuHandles[arrayIndex];
1597 
1598             const bool validGpuHandle = RenderHandleUtil::IsValid(gpuHandle);
1599             const uint32_t remapArrayIndex = RenderHandleUtil::GetIndexPart(shallowRemap.shallowClientHandle);
1600             if (validGpuHandle && (remapArrayIndex < static_cast<uint32_t>(gpuHandles.size()))) {
1601                 gpuHandles[remapArrayIndex] = gpuHandle;
1602             } else {
1603                 PLUGIN_LOG_E("gpuimage handle remapping failed; client handle not found");
1604             }
1605         }
1606     }
1607 }
1608 
HandlePendingAllocationsImpl(const bool isFrameEnd)1609 void GpuResourceManager::HandlePendingAllocationsImpl(const bool isFrameEnd)
1610 {
1611     auto handleStorePendingAllocations = [this](PerManagerStore& store, bool isFrameEnd) {
1612         store.clientMutex.lock();
1613         // protect GPU memory allocations
1614         allocationMutex_.lock();
1615         // check for handle destructions
1616         for (const auto& handleRef : store.clientHandles) {
1617             if (handleRef && (handleRef.GetRefCount() <= 1)) {
1618                 Destroy(store, handleRef.GetHandle());
1619             }
1620         }
1621 
1622         const auto pendingAllocations = move(store.pendingData.allocations);
1623         auto pendingBuffers = move(store.pendingData.buffers);
1624         auto pendingImages = move(store.pendingData.images);
1625         auto pendingRemaps = move(store.pendingData.remaps);
1626         uintptr_t pendingRes = 0; // ptr to pending resource vector
1627         if (store.handleType == RenderHandleType::GPU_BUFFER) {
1628             pendingRes = reinterpret_cast<uintptr_t>(static_cast<void*>(&pendingBuffers));
1629         } else if (store.handleType == RenderHandleType::GPU_IMAGE) {
1630             pendingRes = reinterpret_cast<uintptr_t>(static_cast<void*>(&pendingImages));
1631         }
1632 
1633         // increase the gpu handle vector sizes if needed
1634         if (store.clientHandles.size() > store.gpuHandles.size()) {
1635             store.gpuHandles.resize(store.clientHandles.size());
1636             store.mgr->Resize(store.clientHandles.size());
1637         }
1638 
1639         for (auto const& allocation : pendingAllocations) {
1640             const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(allocation.handle);
1641             // NOTE: needs to be cleared here
1642             store.additionalData[arrayIndex].indexToPendingData = ~0u;
1643             // NOTE: immediately created resource replacing should be prevented (ptr might be present still)
1644             if (allocation.allocType == AllocType::REMOVED) {
1645                 PLUGIN_ASSERT(arrayIndex < static_cast<uint32_t>(store.clientHandles.size()));
1646                 store.availableHandleIds.push_back(allocation.handle.id);
1647                 store.clientHandles[arrayIndex] = {};
1648                 store.additionalData[arrayIndex] = {};
1649                 continue;
1650             }
1651 
1652             PLUGIN_ASSERT(arrayIndex < static_cast<uint32_t>(store.gpuHandles.size()));
1653             // first allocation, then dealloc, with dealloc we need to check for deferred destruction
1654             if (allocation.allocType == AllocType::ALLOC) {
1655                 // NOTE: this is essential to get correct, this maps render pass etc. hashing
1656                 // if the generation counter is old there might vulkan image layout issues etc.
1657                 const EngineResourceHandle gpuHandle =
1658                     UnpackNewHandle(store.gpuHandles[arrayIndex], store.handleType, arrayIndex);
1659 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
1660                 LogGpuResource(allocation.handle, gpuHandle);
1661 #endif
1662                 CreateGpuResource(allocation, arrayIndex, store.handleType, pendingRes);
1663                 store.gpuHandles[arrayIndex] = gpuHandle;
1664 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
1665                 store.debugTagAllocations.push_back(allocation.handle);
1666 #endif
1667             } else if (allocation.allocType == AllocType::DEALLOC) {
1668                 if (RenderHandleUtil::IsDeferredDestroy(allocation.handle) && (!isFrameEnd)) {
1669                     // push deferred destroys back to wait for the end of the frame destruction
1670                     store.pendingData.allocations.push_back(allocation);
1671                 } else {
1672                     store.availableHandleIds.push_back(allocation.handle.id);
1673                     DestroyGpuResource(allocation, arrayIndex, store.handleType, store);
1674                 }
1675             }
1676             // there might be undefined type, e.g. for shallow handles
1677 
1678             // render graph frame state reset for trackable and not auto reset frame states
1679             // with alloc there might be a replace which needs this as well as with dealloc
1680             if (RenderHandleUtil::IsDynamicResource(allocation.handle) &&
1681                 (!RenderHandleUtil::IsResetOnFrameBorders(allocation.handle))) {
1682                 PLUGIN_ASSERT((store.handleType == RenderHandleType::GPU_BUFFER) ||
1683                               (store.handleType == RenderHandleType::GPU_IMAGE));
1684                 clientHandleStateDestroy_.resources.push_back(allocation.handle);
1685             }
1686         }
1687 
1688         // inside mutex (calls device)
1689         store.mgr->HandlePendingDeallocations();
1690 
1691         allocationMutex_.unlock();
1692         // although the pendingData was moved and doesn't need locks anymore, other parts of the store are modified
1693         // as well so we need to hold the lock until here.
1694         store.clientMutex.unlock();
1695 
1696         if (store.handleType == RenderHandleType::GPU_IMAGE) {
1697             // no lock needed for gpuHandles access
1698             HandlePendingRemappings(pendingRemaps, store.gpuHandles);
1699         }
1700     };
1701 
1702     handleStorePendingAllocations(bufferStore_, isFrameEnd);
1703     handleStorePendingAllocations(imageStore_, isFrameEnd);
1704     handleStorePendingAllocations(samplerStore_, isFrameEnd);
1705 
1706 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
1707     ProcessDebugTags();
1708 #endif
1709 }
1710 
HandlePendingAllocations()1711 void GpuResourceManager::HandlePendingAllocations()
1712 {
1713     HandlePendingAllocationsImpl(false);
1714 }
1715 
EndFrame()1716 void GpuResourceManager::EndFrame()
1717 {
1718     DestroyFrameStaging();
1719     HandlePendingAllocationsImpl(true);
1720 }
1721 
RenderBackendImmediateRemapGpuImageHandle(const RenderHandle & clientHandle,const RenderHandle & clientHandleGpuResource)1722 void GpuResourceManager::RenderBackendImmediateRemapGpuImageHandle(
1723     const RenderHandle& clientHandle, const RenderHandle& clientHandleGpuResource)
1724 {
1725     const uint32_t clientArrayIndex = RenderHandleUtil::GetIndexPart(clientHandle);
1726     const uint32_t clientResourceArrayIndex = RenderHandleUtil::GetIndexPart(clientHandleGpuResource);
1727     const bool areGpuImages = (RenderHandleUtil::GetHandleType(clientHandle) == RenderHandleType::GPU_IMAGE) &&
1728                               (RenderHandleUtil::GetHandleType(clientHandleGpuResource) == RenderHandleType::GPU_IMAGE);
1729     if (areGpuImages) {
1730         PerManagerStore& store = imageStore_;
1731         auto const clientLock = std::lock_guard(store.clientMutex);
1732 
1733         if ((clientArrayIndex < store.gpuHandles.size()) && (clientResourceArrayIndex < store.gpuHandles.size())) {
1734             // NOTE: old owned gpu resource should be destroyed
1735             PLUGIN_ASSERT(RenderHandleUtil::IsValid(store.gpuHandles[clientResourceArrayIndex]));
1736 
1737             store.gpuHandles[clientArrayIndex] = store.gpuHandles[clientResourceArrayIndex];
1738             store.descriptions[clientArrayIndex] = store.descriptions[clientResourceArrayIndex];
1739         } else {
1740             PLUGIN_LOG_E("invalid backend gpu image remapping indices");
1741         }
1742     } else {
1743         PLUGIN_LOG_E("invalid backend gpu image remapping handles");
1744     }
1745 }
1746 
LockFrameStagingData()1747 void GpuResourceManager::LockFrameStagingData()
1748 {
1749     {
1750         std::lock_guard lock(stagingMutex_);
1751         perFrameStagingData_ = move(stagingOperations_);
1752     }
1753 
1754     // create frame staging buffers and set handles for staging
1755     {
1756         std::lock_guard lock(bufferStore_.clientMutex);
1757 
1758         perFrameStagingBuffers_.reserve(
1759             perFrameStagingData_.bufferToBuffer.size() + perFrameStagingData_.bufferToImage.size());
1760         for (auto& ref : perFrameStagingData_.bufferToBuffer) {
1761             if ((!ref.invalidOperation) && (ref.stagingBufferByteSize > 0)) {
1762                 perFrameStagingBuffers_.push_back(CreateStagingBuffer(GetStagingBufferDesc(ref.stagingBufferByteSize)));
1763                 ref.srcHandle = perFrameStagingBuffers_.back();
1764             }
1765         }
1766         for (auto& ref : perFrameStagingData_.bufferToImage) {
1767             if ((!ref.invalidOperation) && (ref.stagingBufferByteSize > 0)) {
1768                 perFrameStagingBuffers_.push_back(CreateStagingBuffer(GetStagingBufferDesc(ref.stagingBufferByteSize)));
1769                 ref.srcHandle = perFrameStagingBuffers_.back();
1770             }
1771         }
1772     }
1773 
1774     // create image scaling targets and set handles
1775     perFrameStagingScalingImages_.resize(perFrameStagingData_.scalingImageData.scalingImages.size());
1776     for (size_t idx = 0; idx < perFrameStagingData_.scalingImageData.scalingImages.size(); ++idx) {
1777         auto& scalingImageRef = perFrameStagingData_.scalingImageData.scalingImages[idx];
1778         perFrameStagingScalingImages_[idx] = Create(
1779             GetStagingScalingImageDesc(scalingImageRef.format, scalingImageRef.maxWidth, scalingImageRef.maxHeight));
1780         scalingImageRef.handle = perFrameStagingScalingImages_[idx];
1781     }
1782 }
1783 
DestroyFrameStaging()1784 void GpuResourceManager::DestroyFrameStaging()
1785 {
1786     // explicit destruction of staging resources
1787     {
1788         PerManagerStore& store = bufferStore_;
1789         auto const clientLock = std::lock_guard(store.clientMutex);
1790         for (const auto& handleRef : perFrameStagingBuffers_) {
1791             Destroy(store, handleRef.GetHandle());
1792         }
1793         perFrameStagingBuffers_.clear();
1794     }
1795     {
1796         PerManagerStore& store = imageStore_;
1797         auto const clientLock = std::lock_guard(store.clientMutex);
1798         for (const auto& handleRef : perFrameStagingScalingImages_) {
1799             Destroy(store, handleRef.GetHandle());
1800         }
1801         perFrameStagingScalingImages_.clear();
1802     }
1803 }
1804 
HasStagingData() const1805 bool GpuResourceManager::HasStagingData() const
1806 {
1807     if (perFrameStagingData_.bufferToBuffer.empty() && perFrameStagingData_.bufferToImage.empty() &&
1808         perFrameStagingData_.imageToBuffer.empty() && perFrameStagingData_.imageToImage.empty() &&
1809         perFrameStagingData_.cpuToBuffer.empty() && perFrameStagingData_.bufferCopies.empty() &&
1810         perFrameStagingData_.bufferImageCopies.empty() && perFrameStagingData_.imageCopies.empty()) {
1811         return false;
1812     } else {
1813         return true;
1814     }
1815 }
1816 
ConsumeStagingData()1817 StagingConsumeStruct GpuResourceManager::ConsumeStagingData()
1818 {
1819     StagingConsumeStruct staging = move(perFrameStagingData_);
1820     return staging;
1821 }
1822 
ConsumeStateDestroyData()1823 GpuResourceManager::StateDestroyConsumeStruct GpuResourceManager::ConsumeStateDestroyData()
1824 {
1825     StateDestroyConsumeStruct srcs = move(clientHandleStateDestroy_);
1826     return srcs;
1827 }
1828 
MapBuffer(const RenderHandle & handle) const1829 void* GpuResourceManager::MapBuffer(const RenderHandle& handle) const
1830 {
1831     if (GpuBuffer* buffer = GetBuffer(handle); buffer) {
1832         return buffer->Map();
1833     } else {
1834         return nullptr;
1835     }
1836 }
1837 
MapBuffer(const RenderHandleReference & handle) const1838 void* GpuResourceManager::MapBuffer(const RenderHandleReference& handle) const
1839 {
1840     return MapBuffer(handle.GetHandle());
1841 }
1842 
MapBufferMemory(const RenderHandle & handle) const1843 void* GpuResourceManager::MapBufferMemory(const RenderHandle& handle) const
1844 {
1845     const bool isOutsideRendererMappable = RenderHandleUtil::IsMappableOutsideRenderer(handle);
1846     void* data = nullptr;
1847     if (isOutsideRendererMappable) {
1848         const bool isCreatedImmediate = RenderHandleUtil::IsImmediatelyCreated(handle);
1849         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1850         const auto clientLock = std::lock_guard(bufferStore_.clientMutex);
1851         if (isCreatedImmediate && (arrayIndex < static_cast<uint32_t>(bufferStore_.clientHandles.size()))) {
1852 #if (RENDER_VALIDATION_ENABLED == 1)
1853             if (RenderHandleUtil::GetGenerationIndexPart(bufferStore_.clientHandles[arrayIndex].GetHandle()) !=
1854                 RenderHandleUtil::GetGenerationIndexPart(handle)) {
1855                 const string name = GetName(bufferStore_.clientHandles[arrayIndex].GetHandle());
1856                 PLUGIN_LOG_E("RENDER_VALIDATION: client handle is not current generation (name%s)", name.c_str());
1857             }
1858             if (!bufferStore_.additionalData[arrayIndex].resourcePtr) {
1859                 PLUGIN_LOG_E("RENDER_VALIDATION: invalid pointer with mappable GPU buffer MapBufferMemory");
1860             }
1861 #endif
1862             if (bufferStore_.additionalData[arrayIndex].resourcePtr) {
1863                 data = (reinterpret_cast<GpuBuffer*>(bufferStore_.additionalData[arrayIndex].resourcePtr))->MapMemory();
1864             }
1865         }
1866     } else if (GpuBuffer* buffer = GetBuffer(handle); buffer) {
1867         data = buffer->MapMemory();
1868     }
1869     return data;
1870 }
1871 
MapBufferMemory(const RenderHandleReference & handle) const1872 void* GpuResourceManager::MapBufferMemory(const RenderHandleReference& handle) const
1873 {
1874     return MapBufferMemory(handle.GetHandle());
1875 }
1876 
UnmapBuffer(const RenderHandle & handle) const1877 void GpuResourceManager::UnmapBuffer(const RenderHandle& handle) const
1878 {
1879     const bool isOutsideRendererMappable = RenderHandleUtil::IsMappableOutsideRenderer(handle);
1880     if (isOutsideRendererMappable) {
1881         const bool isCreatedImmediate = RenderHandleUtil::IsImmediatelyCreated(handle);
1882         const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
1883         const auto clientLock = std::lock_guard(bufferStore_.clientMutex);
1884         if (isCreatedImmediate && (arrayIndex < static_cast<uint32_t>(bufferStore_.clientHandles.size()))) {
1885 #if (RENDER_VALIDATION_ENABLED == 1)
1886             if (RenderHandleUtil::GetGenerationIndexPart(bufferStore_.clientHandles[arrayIndex].GetHandle()) !=
1887                 RenderHandleUtil::GetGenerationIndexPart(handle)) {
1888                 const string name = GetName(bufferStore_.clientHandles[arrayIndex].GetHandle());
1889                 PLUGIN_LOG_E("RENDER_VALIDATION: client handle is not current generation (name%s)", name.c_str());
1890             }
1891             if (bufferStore_.additionalData[arrayIndex].resourcePtr) {
1892                 (reinterpret_cast<GpuBuffer*>(bufferStore_.additionalData[arrayIndex].resourcePtr))->Unmap();
1893             } else {
1894                 PLUGIN_LOG_E("RENDER_VALIDATION: invalid pointer with mappable GPU buffer UnmapBuffer");
1895             }
1896 #endif
1897         }
1898     } else if (const GpuBuffer* buffer = GetBuffer(handle); buffer) {
1899         buffer->Unmap();
1900     }
1901 }
1902 
UnmapBuffer(const RenderHandleReference & handle) const1903 void GpuResourceManager::UnmapBuffer(const RenderHandleReference& handle) const
1904 {
1905     return UnmapBuffer(handle.GetHandle());
1906 }
1907 
1908 // must be locked when called
CommitPendingData(PerManagerStore & store)1909 GpuResourceManager::PendingData GpuResourceManager::CommitPendingData(PerManagerStore& store)
1910 {
1911     return { move(store.pendingData.allocations), move(store.pendingData.buffers), move(store.pendingData.images),
1912         move(store.pendingData.remaps) };
1913 }
1914 
DebugPrintValidCounts()1915 void GpuResourceManager::DebugPrintValidCounts()
1916 {
1917 #if (RENDER_VALIDATION_ENABLED == 1)
1918     PLUGIN_LOG_D("GPU buffer count: %u", static_cast<uint32_t>(gpuBufferMgr_->GetValidResourceCount()));
1919     PLUGIN_LOG_D("GPU image count: %u", static_cast<uint32_t>(gpuImageMgr_->GetValidResourceCount()));
1920     PLUGIN_LOG_D("GPU sampler count: %u", static_cast<uint32_t>(gpuSamplerMgr_->GetValidResourceCount()));
1921 #endif
1922 }
1923 
WaitForIdleAndDestroyGpuResources()1924 void GpuResourceManager::WaitForIdleAndDestroyGpuResources()
1925 {
1926     PLUGIN_LOG_D("WFIADGR thread id: %" PRIu64, (uint64_t)std::hash<std::thread::id> {}(std::this_thread::get_id()));
1927     device_.Activate();
1928     device_.WaitForIdle();
1929 
1930     // 1. immediate Destroy all the handles that are to be destroyed
1931     // 2. throw away everything else that's pending
1932     auto DestroyPendingData = [this](PerManagerStore& store) {
1933         const auto lock = std::lock_guard(store.clientMutex);
1934         const auto allocLock = std::lock_guard(allocationMutex_);
1935 
1936         auto pd = CommitPendingData(store);
1937         if (store.clientHandles.size() > store.gpuHandles.size()) {
1938             store.gpuHandles.resize(store.clientHandles.size());
1939             store.mgr->Resize(store.clientHandles.size());
1940         }
1941 
1942 #if (RENDER_VALIDATION_ENABLED == 1)
1943         uint32_t dc = 0;
1944 #endif
1945         for (const auto& ref : pd.allocations) {
1946             if (ref.allocType == AllocType::DEALLOC) {
1947                 store.availableHandleIds.push_back(ref.handle.id);
1948                 DestroyImmediate(store, ref.handle);
1949                 // render graph frame state reset for trackable and not auto reset frame states
1950                 if (RenderHandleUtil::IsDynamicResource(ref.handle) &&
1951                     (!RenderHandleUtil::IsResetOnFrameBorders(ref.handle))) {
1952                     PLUGIN_ASSERT((store.handleType == RenderHandleType::GPU_BUFFER) ||
1953                                   (store.handleType == RenderHandleType::GPU_IMAGE));
1954                     clientHandleStateDestroy_.resources.push_back(ref.handle);
1955                 }
1956 #if (RENDER_VALIDATION_ENABLED == 1)
1957                 dc++;
1958             } else if (ref.allocType == AllocType::REMOVED) {
1959                 dc++;
1960 #endif
1961             }
1962         }
1963 #if (RENDER_VALIDATION_ENABLED == 1)
1964         PLUGIN_LOG_D("WFIADGR: d: %u r (type:%u)", dc, static_cast<uint32_t>(store.handleType));
1965         PLUGIN_LOG_D("WFIADGR: pa cl: %u (t:%u)", static_cast<uint32_t>(pd.allocations.size()) - dc,
1966             static_cast<uint32_t>(store.handleType));
1967 #endif
1968 
1969         // inside mutex (calls device)
1970         store.mgr->HandlePendingDeallocationsImmediate();
1971     };
1972 
1973     DestroyPendingData(bufferStore_);
1974     DestroyPendingData(imageStore_);
1975     DestroyPendingData(samplerStore_);
1976 
1977     // make sure that all staging resources are forcefully destroyed
1978     LockFrameStagingData();
1979     ConsumeStagingData(); // consume cpu data
1980     DestroyFrameStaging();
1981 
1982     {
1983         // additional possible staging buffer clean-up
1984         auto& store = bufferStore_;
1985         const auto lock = std::lock_guard(store.clientMutex);
1986         const auto allocLock = std::lock_guard(allocationMutex_);
1987         store.mgr->HandlePendingDeallocationsImmediate();
1988     }
1989 
1990     DebugPrintValidCounts();
1991 
1992     device_.Deactivate();
1993 }
1994 
GetGpuHandle(const PerManagerStore & store,const RenderHandle & clientHandle) const1995 EngineResourceHandle GpuResourceManager::GetGpuHandle(
1996     const PerManagerStore& store, const RenderHandle& clientHandle) const
1997 {
1998     const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(clientHandle);
1999     if (arrayIndex < static_cast<uint32_t>(store.gpuHandles.size())) {
2000 #if (RENDER_VALIDATION_ENABLED == 1)
2001         if (RenderHandleUtil::GetGenerationIndexPart(store.clientHandles[arrayIndex].GetHandle()) !=
2002             RenderHandleUtil::GetGenerationIndexPart(clientHandle)) {
2003             const string name = GetName(store.clientHandles[arrayIndex].GetHandle());
2004             PLUGIN_LOG_E("RENDER_VALIDATION: client handle is not current generation (name%s)", name.c_str());
2005         }
2006         if (!RenderHandleUtil::IsValid(store.gpuHandles[arrayIndex])) {
2007             PLUGIN_LOG_E("RENDER_VALIDATION : invalid gpu handle %" PRIx64, clientHandle.id);
2008         }
2009 #endif
2010         return store.gpuHandles[arrayIndex];
2011     } else {
2012         PLUGIN_LOG_E("No gpu resource handle for client handle : %" PRIx64, clientHandle.id);
2013         return EngineResourceHandle {};
2014     }
2015 }
2016 
GetGpuHandle(const RenderHandle & clientHandle) const2017 EngineResourceHandle GpuResourceManager::GetGpuHandle(const RenderHandle& clientHandle) const
2018 {
2019     const RenderHandleType handleType = RenderHandleUtil::GetHandleType(clientHandle);
2020     if (handleType == RenderHandleType::GPU_BUFFER) {
2021         return GetGpuHandle(bufferStore_, clientHandle);
2022     } else if (handleType == RenderHandleType::GPU_IMAGE) {
2023         return GetGpuHandle(imageStore_, clientHandle);
2024     } else if (handleType == RenderHandleType::GPU_SAMPLER) {
2025         return GetGpuHandle(samplerStore_, clientHandle);
2026     } else {
2027         PLUGIN_LOG_E("invalid gpu resource handle : %" PRIx64, clientHandle.id);
2028         return {};
2029     }
2030 }
2031 
GetBuffer(const RenderHandle & handle) const2032 GpuBuffer* GpuResourceManager::GetBuffer(const RenderHandle& handle) const
2033 {
2034     const EngineResourceHandle resHandle = GetGpuHandle(bufferStore_, handle);
2035     return gpuBufferMgr_->Get(RenderHandleUtil::GetIndexPart(resHandle));
2036 }
2037 
GetImage(const RenderHandle & handle) const2038 GpuImage* GpuResourceManager::GetImage(const RenderHandle& handle) const
2039 {
2040     const EngineResourceHandle resHandle = GetGpuHandle(imageStore_, handle);
2041     return gpuImageMgr_->Get(RenderHandleUtil::GetIndexPart(resHandle));
2042 }
2043 
GetSampler(const RenderHandle & handle) const2044 GpuSampler* GpuResourceManager::GetSampler(const RenderHandle& handle) const
2045 {
2046     const EngineResourceHandle resHandle = GetGpuHandle(samplerStore_, handle);
2047     return gpuSamplerMgr_->Get(RenderHandleUtil::GetIndexPart(resHandle));
2048 }
2049 
GetBufferHandleCount() const2050 uint32_t GpuResourceManager::GetBufferHandleCount() const
2051 {
2052     return static_cast<uint32_t>(bufferStore_.gpuHandles.size());
2053 }
2054 
GetImageHandleCount() const2055 uint32_t GpuResourceManager::GetImageHandleCount() const
2056 {
2057     return static_cast<uint32_t>(imageStore_.gpuHandles.size());
2058 }
2059 
CreateClientHandle(const RenderHandleType type,const ResourceDescriptor & resourceDescriptor,const uint64_t handleId,const uint32_t hasNameId,const uint32_t additionalInfoFlags)2060 RenderHandle GpuResourceManager::CreateClientHandle(const RenderHandleType type,
2061     const ResourceDescriptor& resourceDescriptor, const uint64_t handleId, const uint32_t hasNameId,
2062     const uint32_t additionalInfoFlags)
2063 {
2064     RenderHandle handle;
2065 
2066     const uint32_t index = RenderHandleUtil::GetIndexPart(handleId);
2067     const uint32_t generationIndex = RenderHandleUtil::GetGenerationIndexPart(handleId) + 1; // next gen
2068     if (type == RenderHandleType::GPU_BUFFER) {
2069         // NOTE: additional flags for GPU acceleration structure might be needed
2070         const auto& rd = resourceDescriptor.combinedBufDescriptor.bufferDesc;
2071         RenderHandleInfoFlags infoFlags = (rd.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_DYNAMIC_BARRIERS)
2072                                               ? CORE_RESOURCE_HANDLE_DYNAMIC_TRACK
2073                                               : 0u;
2074         infoFlags |= (rd.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_CREATE_IMMEDIATE)
2075                          ? CORE_RESOURCE_HANDLE_IMMEDIATELY_CREATED
2076                          : 0;
2077         infoFlags |= (rd.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_DEFERRED_DESTROY)
2078                          ? CORE_RESOURCE_HANDLE_DEFERRED_DESTROY
2079                          : 0;
2080         infoFlags |= (rd.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_MAP_OUTSIDE_RENDERER)
2081                          ? CORE_RESOURCE_HANDLE_MAP_OUTSIDE_RENDERER
2082                          : 0;
2083         infoFlags |= additionalInfoFlags;
2084         handle = RenderHandleUtil::CreateGpuResourceHandle(type, infoFlags, index, generationIndex, hasNameId);
2085     } else if (type == RenderHandleType::GPU_IMAGE) {
2086         const auto& rd = resourceDescriptor.imageDescriptor;
2087         RenderHandleInfoFlags infoFlags {};
2088         infoFlags |= (rd.engineCreationFlags & CORE_ENGINE_IMAGE_CREATION_DYNAMIC_BARRIERS)
2089                          ? CORE_RESOURCE_HANDLE_DYNAMIC_TRACK
2090                          : 0u;
2091         infoFlags |= (rd.engineCreationFlags & CORE_ENGINE_IMAGE_CREATION_DEFERRED_DESTROY)
2092                          ? CORE_RESOURCE_HANDLE_DEFERRED_DESTROY
2093                          : 0;
2094         infoFlags |= ((rd.engineCreationFlags & CORE_ENGINE_IMAGE_CREATION_DYNAMIC_BARRIERS) &&
2095                          (resourceDescriptor.imageDescriptor.mipCount > 1U))
2096                          ? CORE_RESOURCE_HANDLE_DYNAMIC_ADDITIONAL_STATE
2097                          : 0u;
2098         // force transient attachments to be state reset on frame borders
2099         infoFlags |= ((rd.engineCreationFlags & CORE_ENGINE_IMAGE_CREATION_RESET_STATE_ON_FRAME_BORDERS) ||
2100                          (rd.usageFlags & CORE_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT))
2101                          ? CORE_RESOURCE_HANDLE_RESET_ON_FRAME_BORDERS
2102                          : 0u;
2103         infoFlags |= GetAdditionalImageFlagsFromFormat(rd.format);
2104         infoFlags |= additionalInfoFlags;
2105         handle = RenderHandleUtil::CreateGpuResourceHandle(type, infoFlags, index, generationIndex, hasNameId);
2106     } else if (type == RenderHandleType::GPU_SAMPLER) {
2107         handle = RenderHandleUtil::CreateGpuResourceHandle(type, 0, index, generationIndex, hasNameId);
2108     }
2109     return handle;
2110 }
2111 
2112 // needs to be locked when called
GetNextAvailableHandleId(PerManagerStore & store)2113 uint64_t GpuResourceManager::GetNextAvailableHandleId(PerManagerStore& store)
2114 {
2115     uint64_t handleId = INVALID_RESOURCE_HANDLE;
2116     auto& availableHandleIds = store.availableHandleIds;
2117     if (!availableHandleIds.empty()) {
2118         handleId = availableHandleIds.back();
2119         availableHandleIds.pop_back();
2120     } else {
2121         handleId = static_cast<uint32_t>(store.clientHandles.size())
2122                    << RenderHandleUtil::RES_HANDLE_ID_SHIFT; // next idx
2123     }
2124     return handleId;
2125 }
2126 
2127 // needs to be locked when called
2128 // staging cannot be locked when called
StoreAllocation(PerManagerStore & store,const StoreAllocationInfo & info)2129 GpuResourceManager::StoreAllocationData GpuResourceManager::StoreAllocation(
2130     PerManagerStore& store, const StoreAllocationInfo& info)
2131 {
2132     // NOTE: PerManagerStore gpu handles cannot be touched here
2133     StoreAllocationData data;
2134 
2135     // there cannot be both (valid name and a valid replaced handle)
2136     const uint32_t replaceArrayIndex = RenderHandleUtil::GetIndexPart(info.replacedHandle);
2137     bool hasReplaceHandle = (replaceArrayIndex < static_cast<uint32_t>(store.clientHandles.size()));
2138     const uint32_t hasNameId = (!info.name.empty()) ? 1u : 0u;
2139     if (hasReplaceHandle) {
2140         data.handle = store.clientHandles[replaceArrayIndex];
2141         // NOTE: should be documented better, and prevented
2142         PLUGIN_ASSERT_MSG(!RenderHandleUtil::IsDeferredDestroy(data.handle.GetHandle()),
2143             "deferred desctruction resources cannot be replaced");
2144         if (!RenderHandleUtil::IsValid(data.handle.GetHandle())) {
2145 #if (RENDER_VALIDATION_ENABLED == 1)
2146             PLUGIN_LOG_E("RENDER_VALIDATION: invalid replaced handle given to GPU resource manager, creating new");
2147 #endif
2148             const uint64_t handleId = GetNextAvailableHandleId(store);
2149             data.handle = RenderHandleReference(
2150                 CreateClientHandle(info.type, info.descriptor, handleId, hasNameId, info.addHandleFlags),
2151                 IRenderReferenceCounter::Ptr(new RenderReferenceCounter()));
2152             hasReplaceHandle = false;
2153             if (hasNameId) {
2154                 // NOTE: should remove old name if it was in use
2155                 store.nameToClientIndex[info.name] = RenderHandleUtil::GetIndexPart(data.handle.GetHandle());
2156             }
2157         }
2158     } else if (hasNameId != 0u) {
2159         if (auto const iter = store.nameToClientIndex.find(info.name); iter != store.nameToClientIndex.cend()) {
2160             PLUGIN_ASSERT(iter->second < static_cast<uint32_t>(store.clientHandles.size()));
2161             data.handle = store.clientHandles[iter->second];
2162             PLUGIN_ASSERT_MSG(!RenderHandleUtil::IsDeferredDestroy(data.handle.GetHandle()),
2163                 "deferred desctruction resources cannot be replaced");
2164         } else {
2165             const uint64_t handleId = GetNextAvailableHandleId(store);
2166             data.handle = RenderHandleReference(
2167                 CreateClientHandle(info.type, info.descriptor, handleId, hasNameId, info.addHandleFlags),
2168                 IRenderReferenceCounter::Ptr(new RenderReferenceCounter()));
2169             store.nameToClientIndex[info.name] = RenderHandleUtil::GetIndexPart(data.handle.GetHandle());
2170         }
2171     } else {
2172         const uint64_t handleId = GetNextAvailableHandleId(store);
2173         data.handle = RenderHandleReference(
2174             CreateClientHandle(info.type, info.descriptor, handleId, hasNameId, info.addHandleFlags),
2175             IRenderReferenceCounter::Ptr(new RenderReferenceCounter()));
2176     }
2177 
2178     PLUGIN_ASSERT(data.handle);
2179     const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(data.handle.GetHandle());
2180     PLUGIN_ASSERT(store.clientHandles.size() == store.descriptions.size());
2181     PLUGIN_ASSERT(store.clientHandles.size() == store.additionalData.size());
2182     if (arrayIndex >= static_cast<uint32_t>(store.clientHandles.size())) {
2183         store.clientHandles.push_back(data.handle);
2184         store.additionalData.push_back({});
2185         store.descriptions.push_back(info.descriptor);
2186     } else {
2187         store.clientHandles[arrayIndex] = data.handle;
2188         // store.additionalData[arrayIndex] cannot be cleared here (staging cleaned based on this)
2189         store.descriptions[arrayIndex] = info.descriptor;
2190     }
2191 
2192 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
2193     PLUGIN_LOG_E("gpu resource allocation %" PRIx64 " (name: %s)", data.handle.GetHandle().id, info.name.data());
2194 #endif
2195 
2196     // store allocation for GPU allocation
2197 
2198     // needs to find the allocation and replace
2199     if (hasReplaceHandle || (hasNameId != 0)) {
2200         if (const uint32_t pendingArrIndex = store.additionalData[arrayIndex].indexToPendingData;
2201             (pendingArrIndex != INVALID_PENDING_INDEX) && (pendingArrIndex < store.pendingData.allocations.size())) {
2202             data.allocationIndex = pendingArrIndex;
2203         }
2204     }
2205     if (data.allocationIndex == ~0u) {
2206         data.allocationIndex = store.pendingData.allocations.size();
2207         store.additionalData[arrayIndex].indexToPendingData = static_cast<uint32_t>(data.allocationIndex);
2208         store.pendingData.allocations.emplace_back(
2209             data.handle.GetHandle(), info.descriptor, info.allocType, info.optResourceIndex);
2210     } else { // quite rare case and slow path
2211         // replace this frame's allocation
2212         auto& allocOp = store.pendingData.allocations[data.allocationIndex];
2213         // invalid flag should be only needed and all the allocations would be done later
2214         // i.e. create staging buffers and fetch based on index in render node staging
2215         RemoveStagingOperations(allocOp);
2216         store.pendingData.allocations[data.allocationIndex] = { data.handle.GetHandle(), info.descriptor,
2217             info.allocType, info.optResourceIndex };
2218     }
2219 
2220     return data;
2221 }
2222 
IsGpuBuffer(const RenderHandleReference & handle) const2223 bool GpuResourceManager::IsGpuBuffer(const RenderHandleReference& handle) const
2224 {
2225     return IsGpuBuffer(handle.GetHandle());
2226 }
2227 
IsGpuImage(const RenderHandleReference & handle) const2228 bool GpuResourceManager::IsGpuImage(const RenderHandleReference& handle) const
2229 {
2230     return IsGpuImage(handle.GetHandle());
2231 }
2232 
IsGpuSampler(const RenderHandleReference & handle) const2233 bool GpuResourceManager::IsGpuSampler(const RenderHandleReference& handle) const
2234 {
2235     return IsGpuSampler(handle.GetHandle());
2236 }
2237 
IsGpuAccelerationStructure(const RenderHandleReference & handle) const2238 bool GpuResourceManager::IsGpuAccelerationStructure(const RenderHandleReference& handle) const
2239 {
2240     return IsGpuAccelerationStructure(handle.GetHandle());
2241 }
2242 
IsSwapchain(const RenderHandleReference & handle) const2243 bool GpuResourceManager::IsSwapchain(const RenderHandleReference& handle) const
2244 {
2245     return IsSwapchain(handle.GetHandle());
2246 }
2247 
IsMappableOutsideRender(const RenderHandleReference & handle) const2248 bool GpuResourceManager::IsMappableOutsideRender(const RenderHandleReference& handle) const
2249 {
2250     return RenderHandleUtil::IsMappableOutsideRenderer(handle.GetHandle());
2251 }
2252 
IsGpuBuffer(const RenderHandle & handle) const2253 bool GpuResourceManager::IsGpuBuffer(const RenderHandle& handle) const
2254 {
2255     return RenderHandleUtil::IsGpuBuffer(handle);
2256 }
2257 
IsGpuImage(const RenderHandle & handle) const2258 bool GpuResourceManager::IsGpuImage(const RenderHandle& handle) const
2259 {
2260     return RenderHandleUtil::IsGpuImage(handle);
2261 }
2262 
IsGpuSampler(const RenderHandle & handle) const2263 bool GpuResourceManager::IsGpuSampler(const RenderHandle& handle) const
2264 {
2265     return RenderHandleUtil::IsGpuSampler(handle);
2266 }
2267 
IsGpuAccelerationStructure(const RenderHandle & handle) const2268 bool GpuResourceManager::IsGpuAccelerationStructure(const RenderHandle& handle) const
2269 {
2270     return RenderHandleUtil::IsGpuAccelerationStructure(handle);
2271 }
2272 
IsSwapchain(const RenderHandle & handle) const2273 bool GpuResourceManager::IsSwapchain(const RenderHandle& handle) const
2274 {
2275     return RenderHandleUtil::IsSwapchain(handle);
2276 }
2277 
IsValid(const RenderHandle & handle) const2278 bool GpuResourceManager::IsValid(const RenderHandle& handle) const
2279 {
2280     return RenderHandleUtil::IsValid(handle);
2281 }
2282 
GetFormatProperties(const RenderHandle & handle) const2283 FormatProperties GpuResourceManager::GetFormatProperties(const RenderHandle& handle) const
2284 {
2285     const RenderHandleType type = RenderHandleUtil::GetHandleType(handle);
2286     Format format = Format::BASE_FORMAT_UNDEFINED;
2287     if (type == RenderHandleType::GPU_BUFFER) {
2288         const GpuBufferDesc desc = GetBufferDescriptor(handle);
2289         format = desc.format;
2290     } else if (type == RenderHandleType::GPU_IMAGE) {
2291         const GpuImageDesc desc = GetImageDescriptor(handle);
2292         format = desc.format;
2293     }
2294     return device_.GetFormatProperties(format);
2295 }
2296 
GetFormatProperties(const RenderHandleReference & handle) const2297 FormatProperties GpuResourceManager::GetFormatProperties(const RenderHandleReference& handle) const
2298 {
2299     return GetFormatProperties(handle.GetHandle());
2300 }
2301 
GetFormatProperties(const Format format) const2302 FormatProperties GpuResourceManager::GetFormatProperties(const Format format) const
2303 {
2304     return device_.GetFormatProperties(format);
2305 }
2306 
GetImageAspectFlags(const RenderHandleReference & handle) const2307 ImageAspectFlags GpuResourceManager::GetImageAspectFlags(const RenderHandleReference& handle) const
2308 {
2309     return GetImageAspectFlags(handle.GetHandle());
2310 }
2311 
GetImageAspectFlags(const RenderHandle & handle) const2312 ImageAspectFlags GpuResourceManager::GetImageAspectFlags(const RenderHandle& handle) const
2313 {
2314     const RenderHandleType type = RenderHandleUtil::GetHandleType(handle);
2315     Format format = Format::BASE_FORMAT_UNDEFINED;
2316     if (type == RenderHandleType::GPU_BUFFER) {
2317         const GpuBufferDesc desc = GetBufferDescriptor(handle);
2318         format = desc.format;
2319     } else if (type == RenderHandleType::GPU_IMAGE) {
2320         const GpuImageDesc desc = GetImageDescriptor(handle);
2321         format = desc.format;
2322     }
2323     return GetImageAspectFlags(format);
2324 }
2325 
GetImageAspectFlags(const BASE_NS::Format format) const2326 ImageAspectFlags GpuResourceManager::GetImageAspectFlags(const BASE_NS::Format format) const
2327 {
2328     ImageAspectFlags flags {};
2329     const bool isDepthFormat = ((format == BASE_FORMAT_D16_UNORM) || (format == BASE_FORMAT_X8_D24_UNORM_PACK32) ||
2330                                    (format == BASE_FORMAT_D32_SFLOAT) || (format == BASE_FORMAT_D16_UNORM_S8_UINT) ||
2331                                    (format == BASE_FORMAT_D24_UNORM_S8_UINT))
2332                                    ? true
2333                                    : false;
2334     if (isDepthFormat) {
2335         flags |= ImageAspectFlagBits::CORE_IMAGE_ASPECT_DEPTH_BIT;
2336 
2337         const bool isStencilFormat =
2338             ((format == BASE_FORMAT_S8_UINT) || (format == BASE_FORMAT_D16_UNORM_S8_UINT) ||
2339                 (format == BASE_FORMAT_D24_UNORM_S8_UINT) || (format == BASE_FORMAT_D32_SFLOAT_S8_UINT))
2340                 ? true
2341                 : false;
2342         if (isStencilFormat) {
2343             flags |= ImageAspectFlagBits::CORE_IMAGE_ASPECT_STENCIL_BIT;
2344         }
2345 
2346     } else if (format == BASE_FORMAT_S8_UINT) {
2347         flags |= ImageAspectFlagBits::CORE_IMAGE_ASPECT_STENCIL_BIT;
2348     } else if (format != BASE_FORMAT_UNDEFINED) {
2349         flags |= ImageAspectFlagBits::CORE_IMAGE_ASPECT_COLOR_BIT;
2350     }
2351 
2352     return flags;
2353 }
2354 
CreateGpuImageDesc(const CORE_NS::IImageContainer::ImageDesc & desc) const2355 GpuImageDesc GpuResourceManager::CreateGpuImageDesc(const CORE_NS::IImageContainer::ImageDesc& desc) const
2356 {
2357     GpuImageDesc gpuImageDesc;
2358     // default values for loaded images
2359     gpuImageDesc.imageTiling = CORE_IMAGE_TILING_OPTIMAL;
2360     gpuImageDesc.usageFlags |= CORE_IMAGE_USAGE_SAMPLED_BIT | CORE_IMAGE_USAGE_TRANSFER_DST_BIT;
2361     gpuImageDesc.memoryPropertyFlags = CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
2362 
2363     if (desc.imageFlags & IImageContainer::ImageFlags::FLAGS_CUBEMAP_BIT) {
2364         gpuImageDesc.createFlags |= ImageCreateFlagBits::CORE_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2365     }
2366     if ((desc.imageFlags & IImageContainer::ImageFlags::FLAGS_REQUESTING_MIPMAPS_BIT) && (desc.mipCount > 1)) {
2367         gpuImageDesc.engineCreationFlags |= EngineImageCreationFlagBits::CORE_ENGINE_IMAGE_CREATION_GENERATE_MIPS;
2368         gpuImageDesc.usageFlags |= CORE_IMAGE_USAGE_TRANSFER_SRC_BIT;
2369     }
2370     gpuImageDesc.imageType = static_cast<RENDER_NS::ImageType>(desc.imageType);
2371     gpuImageDesc.imageViewType = static_cast<RENDER_NS::ImageViewType>(desc.imageViewType);
2372     gpuImageDesc.format = desc.format;
2373     gpuImageDesc.width = desc.width;
2374     gpuImageDesc.height = desc.height;
2375     gpuImageDesc.depth = desc.depth;
2376     gpuImageDesc.mipCount = desc.mipCount;
2377     gpuImageDesc.layerCount = desc.layerCount;
2378     return gpuImageDesc;
2379 }
2380 
GetGpuResourceCache() const2381 IGpuResourceCache& GpuResourceManager::GetGpuResourceCache() const
2382 {
2383     return *gpuResourceCache_;
2384 }
2385 
2386 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
ProcessDebugTags()2387 void GpuResourceManager::ProcessDebugTags()
2388 {
2389     // NOTE: there's a minor possibility that client data has changed before these are locked
2390     // but the GPU resource is the correct one
2391     // define CORE_EXTENT_DEBUG_GPU_RESOURCE_MGR_NAMES for more precise debugging
2392 #if defined(CORE_EXTENT_DEBUG_GPU_RESOURCE_MGR_NAMES)
2393     const auto frameIdx = to_string(device_.GetFrameCount());
2394 #endif
2395     auto AddDebugTags = [&](PerManagerStore& store, const RenderHandleType handleType) {
2396         const auto lock = std::lock_guard(store.clientMutex);
2397         vector<RenderHandle> allocs = move(store.debugTagAllocations);
2398         for (const auto& handle : allocs) {
2399             if (RenderHandleUtil::GetHasNamePart(handle) == 0) {
2400                 continue;
2401             }
2402             const uint32_t arrayIndex = RenderHandleUtil::GetIndexPart(handle);
2403             string name = GetName(store.clientHandles[arrayIndex].GetHandle());
2404 #if defined(CORE_EXTENT_DEBUG_GPU_RESOURCE_MGR_NAMES)
2405             const auto extentName =
2406                 "_chandle:" + to_string(handle.id) + "_idx:" + to_string(arrayIndex) + "_fr:" + frameIdx;
2407             name += extentName;
2408 #endif
2409             if (handleType == RenderHandleType::GPU_BUFFER) {
2410                 GpuResourceUtil::DebugBufferName(device_, *gpuBufferMgr_->Get(arrayIndex), name);
2411             } else if (handleType == RenderHandleType::GPU_IMAGE) {
2412                 GpuResourceUtil::DebugImageName(device_, *gpuImageMgr_->Get(arrayIndex), name);
2413             } else if (handleType == RenderHandleType::GPU_SAMPLER) {
2414                 GpuResourceUtil::DebugSamplerName(device_, *gpuSamplerMgr_->Get(arrayIndex), name);
2415             }
2416         }
2417     };
2418     AddDebugTags(bufferStore_, RenderHandleType::GPU_BUFFER);
2419     AddDebugTags(imageStore_, RenderHandleType::GPU_IMAGE);
2420     AddDebugTags(samplerStore_, RenderHandleType::GPU_SAMPLER);
2421 }
2422 #endif
2423 
RenderNodeGpuResourceManager(GpuResourceManager & gpuResourceManager)2424 RenderNodeGpuResourceManager::RenderNodeGpuResourceManager(GpuResourceManager& gpuResourceManager)
2425     : gpuResourceMgr_(gpuResourceManager)
2426 {}
2427 
~RenderNodeGpuResourceManager()2428 RenderNodeGpuResourceManager::~RenderNodeGpuResourceManager() {}
2429 
Get(const RenderHandle & handle) const2430 RenderHandleReference RenderNodeGpuResourceManager::Get(const RenderHandle& handle) const
2431 {
2432     return gpuResourceMgr_.Get(handle);
2433 }
2434 
Create(const GpuBufferDesc & desc)2435 RenderHandleReference RenderNodeGpuResourceManager::Create(const GpuBufferDesc& desc)
2436 {
2437     return gpuResourceMgr_.Create(desc);
2438 }
2439 
Create(const string_view name,const GpuBufferDesc & desc)2440 RenderHandleReference RenderNodeGpuResourceManager::Create(const string_view name, const GpuBufferDesc& desc)
2441 {
2442     return gpuResourceMgr_.Create(name, desc);
2443 }
2444 
Create(const RenderHandleReference & handle,const GpuBufferDesc & desc)2445 RenderHandleReference RenderNodeGpuResourceManager::Create(
2446     const RenderHandleReference& handle, const GpuBufferDesc& desc)
2447 {
2448     return gpuResourceMgr_.Create(handle, desc);
2449 }
2450 
Create(const string_view name,const GpuBufferDesc & desc,const array_view<const uint8_t> data)2451 RenderHandleReference RenderNodeGpuResourceManager::Create(
2452     const string_view name, const GpuBufferDesc& desc, const array_view<const uint8_t> data)
2453 {
2454     return gpuResourceMgr_.Create(name, desc, data);
2455 }
2456 
Create(const GpuImageDesc & desc)2457 RenderHandleReference RenderNodeGpuResourceManager::Create(const GpuImageDesc& desc)
2458 {
2459     return gpuResourceMgr_.Create(desc);
2460 }
2461 
Create(const string_view name,const GpuImageDesc & desc)2462 RenderHandleReference RenderNodeGpuResourceManager::Create(const string_view name, const GpuImageDesc& desc)
2463 {
2464     return gpuResourceMgr_.Create(name, desc);
2465 }
2466 
Create(const RenderHandleReference & handle,const GpuImageDesc & desc)2467 RenderHandleReference RenderNodeGpuResourceManager::Create(
2468     const RenderHandleReference& handle, const GpuImageDesc& desc)
2469 {
2470     return gpuResourceMgr_.Create(handle, desc);
2471 }
2472 
Create(const string_view name,const GpuImageDesc & desc,const array_view<const uint8_t> data)2473 RenderHandleReference RenderNodeGpuResourceManager::Create(
2474     const string_view name, const GpuImageDesc& desc, const array_view<const uint8_t> data)
2475 {
2476     return gpuResourceMgr_.Create(name, desc, data);
2477 }
2478 
Create(const string_view name,const GpuSamplerDesc & desc)2479 RenderHandleReference RenderNodeGpuResourceManager::Create(const string_view name, const GpuSamplerDesc& desc)
2480 {
2481     return gpuResourceMgr_.Create(name, desc);
2482 }
2483 
Create(const RenderHandleReference & handle,const GpuSamplerDesc & desc)2484 RenderHandleReference RenderNodeGpuResourceManager::Create(
2485     const RenderHandleReference& handle, const GpuSamplerDesc& desc)
2486 {
2487     return gpuResourceMgr_.Create(handle, desc);
2488 }
2489 
Create(const GpuSamplerDesc & desc)2490 RenderHandleReference RenderNodeGpuResourceManager::Create(const GpuSamplerDesc& desc)
2491 {
2492     return gpuResourceMgr_.Create(desc);
2493 }
2494 
Create(const string_view name,const GpuAccelerationStructureDesc & desc)2495 RenderHandleReference RenderNodeGpuResourceManager::Create(
2496     const string_view name, const GpuAccelerationStructureDesc& desc)
2497 {
2498     return gpuResourceMgr_.Create(name, desc);
2499 }
2500 
Create(const RenderHandleReference & handle,const GpuAccelerationStructureDesc & desc)2501 RenderHandleReference RenderNodeGpuResourceManager::Create(
2502     const RenderHandleReference& handle, const GpuAccelerationStructureDesc& desc)
2503 {
2504     return gpuResourceMgr_.Create(handle, desc);
2505 }
2506 
Create(const GpuAccelerationStructureDesc & desc)2507 RenderHandleReference RenderNodeGpuResourceManager::Create(const GpuAccelerationStructureDesc& desc)
2508 {
2509     return gpuResourceMgr_.Create(desc);
2510 }
2511 
GetBufferHandle(const string_view name) const2512 RenderHandle RenderNodeGpuResourceManager::GetBufferHandle(const string_view name) const
2513 {
2514     return gpuResourceMgr_.GetBufferRawHandle(name);
2515 }
2516 
GetImageHandle(const string_view name) const2517 RenderHandle RenderNodeGpuResourceManager::GetImageHandle(const string_view name) const
2518 {
2519     return gpuResourceMgr_.GetImageRawHandle(name);
2520 }
2521 
GetSamplerHandle(const string_view name) const2522 RenderHandle RenderNodeGpuResourceManager::GetSamplerHandle(const string_view name) const
2523 {
2524     return gpuResourceMgr_.GetSamplerRawHandle(name);
2525 }
2526 
GetBufferDescriptor(const RenderHandle & handle) const2527 GpuBufferDesc RenderNodeGpuResourceManager::GetBufferDescriptor(const RenderHandle& handle) const
2528 {
2529     return gpuResourceMgr_.GetBufferDescriptor(handle);
2530 }
2531 
GetImageDescriptor(const RenderHandle & handle) const2532 GpuImageDesc RenderNodeGpuResourceManager::GetImageDescriptor(const RenderHandle& handle) const
2533 {
2534     return gpuResourceMgr_.GetImageDescriptor(handle);
2535 }
2536 
GetSamplerDescriptor(const RenderHandle & handle) const2537 GpuSamplerDesc RenderNodeGpuResourceManager::GetSamplerDescriptor(const RenderHandle& handle) const
2538 {
2539     return gpuResourceMgr_.GetSamplerDescriptor(handle);
2540 }
2541 
GetAccelerationStructureDescriptor(const RenderHandle & handle) const2542 GpuAccelerationStructureDesc RenderNodeGpuResourceManager::GetAccelerationStructureDescriptor(
2543     const RenderHandle& handle) const
2544 {
2545     return gpuResourceMgr_.GetAccelerationStructureDescriptor(handle);
2546 }
2547 
HasStagingData() const2548 bool RenderNodeGpuResourceManager::HasStagingData() const
2549 {
2550     return gpuResourceMgr_.HasStagingData();
2551 }
2552 
ConsumeStagingData()2553 StagingConsumeStruct RenderNodeGpuResourceManager::ConsumeStagingData()
2554 {
2555     return gpuResourceMgr_.ConsumeStagingData();
2556 }
2557 
MapBuffer(const RenderHandle & handle) const2558 void* RenderNodeGpuResourceManager::MapBuffer(const RenderHandle& handle) const
2559 {
2560     return gpuResourceMgr_.MapBuffer(handle);
2561 }
2562 
MapBufferMemory(const RenderHandle & handle) const2563 void* RenderNodeGpuResourceManager::MapBufferMemory(const RenderHandle& handle) const
2564 {
2565     return gpuResourceMgr_.MapBufferMemory(handle);
2566 }
2567 
UnmapBuffer(const RenderHandle & handle) const2568 void RenderNodeGpuResourceManager::UnmapBuffer(const RenderHandle& handle) const
2569 {
2570     gpuResourceMgr_.UnmapBuffer(handle);
2571 }
2572 
GetGpuResourceManager()2573 GpuResourceManager& RenderNodeGpuResourceManager::GetGpuResourceManager()
2574 {
2575     return gpuResourceMgr_;
2576 }
2577 
IsValid(const RenderHandle & handle) const2578 bool RenderNodeGpuResourceManager::IsValid(const RenderHandle& handle) const
2579 {
2580     return RenderHandleUtil::IsValid(handle);
2581 }
2582 
IsGpuBuffer(const RenderHandle & handle) const2583 bool RenderNodeGpuResourceManager::IsGpuBuffer(const RenderHandle& handle) const
2584 {
2585     return RenderHandleUtil::IsGpuBuffer(handle);
2586 }
2587 
IsGpuImage(const RenderHandle & handle) const2588 bool RenderNodeGpuResourceManager::IsGpuImage(const RenderHandle& handle) const
2589 {
2590     return RenderHandleUtil::IsGpuImage(handle);
2591 }
2592 
IsGpuSampler(const RenderHandle & handle) const2593 bool RenderNodeGpuResourceManager::IsGpuSampler(const RenderHandle& handle) const
2594 {
2595     return RenderHandleUtil::IsGpuSampler(handle);
2596 }
2597 
IsGpuAccelerationStructure(const RenderHandle & handle) const2598 bool RenderNodeGpuResourceManager::IsGpuAccelerationStructure(const RenderHandle& handle) const
2599 {
2600     return RenderHandleUtil::IsGpuAccelerationStructure(handle);
2601 }
2602 
IsSwapchain(const RenderHandle & handle) const2603 bool RenderNodeGpuResourceManager::IsSwapchain(const RenderHandle& handle) const
2604 {
2605     return RenderHandleUtil::IsSwapchain(handle);
2606 }
2607 
GetFormatProperties(const RenderHandle & handle) const2608 FormatProperties RenderNodeGpuResourceManager::GetFormatProperties(const RenderHandle& handle) const
2609 {
2610     return gpuResourceMgr_.GetFormatProperties(handle);
2611 }
2612 
GetFormatProperties(const Format format) const2613 FormatProperties RenderNodeGpuResourceManager::GetFormatProperties(const Format format) const
2614 {
2615     return gpuResourceMgr_.GetFormatProperties(format);
2616 }
2617 
GetImageAspectFlags(const RenderHandle & handle) const2618 ImageAspectFlags RenderNodeGpuResourceManager::GetImageAspectFlags(const RenderHandle& handle) const
2619 {
2620     return gpuResourceMgr_.GetImageAspectFlags(handle);
2621 }
2622 
GetImageAspectFlags(const Format format) const2623 ImageAspectFlags RenderNodeGpuResourceManager::GetImageAspectFlags(const Format format) const
2624 {
2625     return gpuResourceMgr_.GetImageAspectFlags(format);
2626 }
2627 
GetName(const RenderHandle & handle) const2628 string RenderNodeGpuResourceManager::GetName(const RenderHandle& handle) const
2629 {
2630     return gpuResourceMgr_.GetName(handle);
2631 }
2632 
GetGpuResourceCache() const2633 IGpuResourceCache& RenderNodeGpuResourceManager::GetGpuResourceCache() const
2634 {
2635     return gpuResourceMgr_.GetGpuResourceCache();
2636 }
2637 
2638 RENDER_END_NAMESPACE()
2639