1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "render_node_default_acceleration_structure_staging.h"
17
18 #include <cinttypes>
19
20 #include <render/datastore/intf_render_data_store_manager.h>
21 #include <render/device/intf_gpu_resource_manager.h>
22 #include <render/namespace.h>
23 #include <render/nodecontext/intf_node_context_pso_manager.h>
24 #include <render/nodecontext/intf_render_command_list.h>
25 #include <render/nodecontext/intf_render_node_context_manager.h>
26
27 #include "datastore/render_data_store_default_acceleration_structure_staging.h"
28
29 #if RENDER_HAS_VULKAN_BACKEND
30 #include <vulkan/vulkan_core.h>
31
32 #include "device/gpu_resource_manager.h"
33 #include "vulkan/gpu_buffer_vk.h"
34 #endif
35
36 #include "util/log.h"
37
38 using namespace BASE_NS;
39
40 RENDER_BEGIN_NAMESPACE()
41 namespace {} // namespace
42
InitNode(IRenderNodeContextManager & renderNodeContextMgr)43 void RenderNodeDefaultAccelerationStructureStaging::InitNode(IRenderNodeContextManager& renderNodeContextMgr)
44 {
45 renderNodeContextMgr_ = &renderNodeContextMgr;
46
47 const auto& renderNodeGraphData = renderNodeContextMgr_->GetRenderNodeGraphData();
48 dsName_ = renderNodeGraphData.renderNodeGraphName + "RenderDataStoreDefaultAccelerationStructureStaging";
49 }
50
PreExecuteFrame()51 void RenderNodeDefaultAccelerationStructureStaging::PreExecuteFrame()
52 {
53 // re-create needed gpu resources
54 }
55
ExecuteFrame(IRenderCommandList & cmdList)56 void RenderNodeDefaultAccelerationStructureStaging::ExecuteFrame(IRenderCommandList& cmdList)
57 {
58 if (renderNodeContextMgr_->GetRenderNodeGraphData().renderingConfiguration.renderBackend !=
59 DeviceBackendType::VULKAN) {
60 return;
61 }
62 #if (RENDER_VULKAN_RT_ENABLED == 1)
63 const IRenderNodeRenderDataStoreManager& rdsMgr = renderNodeContextMgr_->GetRenderDataStoreManager();
64 if (RenderDataStoreDefaultAccelerationStructureStaging* ds =
65 static_cast<RenderDataStoreDefaultAccelerationStructureStaging*>(rdsMgr.GetRenderDataStore(dsName_));
66 ds) {
67 if (ds->HasStagingData()) {
68 // order does not matter ATM (the former is deferred to device the latter is coherently copied)
69 AccelerationStructureBuildConsumeStruct stagingBuildData = ds->ConsumeStagingBuildData();
70 const auto& triangles = stagingBuildData.triangles;
71 const auto& aabbs = stagingBuildData.aabbs;
72 const auto& instances = stagingBuildData.instances;
73 for (const auto geomRef : stagingBuildData.geometry) {
74 const uint32_t startIndex = geomRef.startIndex;
75 const uint32_t count = geomRef.count;
76 PLUGIN_ASSERT(count <= 1);
77 AccelerationStructureBuildGeometryData geometry { { geomRef.data.info },
78 geomRef.data.srcAccelerationStructure.GetHandle(),
79 geomRef.data.dstAccelerationStructure.GetHandle(),
80 { geomRef.data.scratchBuffer.handle.GetHandle(), geomRef.data.scratchBuffer.offset } };
81 if ((geomRef.geometryType == GeometryType::CORE_GEOMETRY_TYPE_TRIANGLES) &&
82 (startIndex + count <= static_cast<uint32_t>(triangles.size()))) {
83 const auto& triRef = triangles[startIndex];
84 AccelerationStructureGeometryTrianglesData triData { triRef.info,
85 { triRef.vertexData.handle.GetHandle(), triRef.vertexData.offset },
86 { triRef.indexData.handle.GetHandle(), triRef.indexData.offset },
87 { triRef.transformData.handle.GetHandle(), triRef.transformData.offset } };
88 cmdList.BuildAccelerationStructures(move(geometry), { &triData, 1u }, {}, {});
89 } else if (geomRef.geometryType == GeometryType::CORE_GEOMETRY_TYPE_AABBS &&
90 (startIndex + count <= static_cast<uint32_t>(aabbs.size()))) {
91 const auto& aabbRef = aabbs[startIndex];
92 AccelerationStructureGeometryAabbsData aabbData { aabbRef.info,
93 { aabbRef.data.handle.GetHandle(), aabbRef.data.offset } };
94 cmdList.BuildAccelerationStructures(move(geometry), {}, { &aabbData, count }, {});
95 } else if ((geomRef.geometryType == GeometryType::CORE_GEOMETRY_TYPE_INSTANCES) &&
96 (startIndex + count <= static_cast<uint32_t>(instances.size()))) {
97 const auto& instanceRef = instances[startIndex];
98 AccelerationStructureGeometryInstancesData instanceData { instanceRef.info,
99 { instanceRef.data.handle.GetHandle(), instanceRef.data.offset } };
100 cmdList.BuildAccelerationStructures(move(geometry), {}, {}, { &instanceData, count });
101 }
102 }
103 ExecuteFrameProcessInstanceData(ds);
104 }
105 }
106 #endif
107 }
108
ExecuteFrameProcessInstanceData(RenderDataStoreDefaultAccelerationStructureStaging * dataStore)109 void RenderNodeDefaultAccelerationStructureStaging::ExecuteFrameProcessInstanceData(
110 RenderDataStoreDefaultAccelerationStructureStaging* dataStore)
111 {
112 #if (RENDER_VULKAN_RT_ENABLED == 1)
113 #if (RENDER_HAS_VULKAN_BACKEND == 1)
114 if (dataStore) {
115 AccelerationStructureInstanceConsumeStruct stagingInstanceData = dataStore->ConsumeStagingInstanceData();
116 auto& gpuResourceMgr =
117 static_cast<RenderNodeGpuResourceManager&>(renderNodeContextMgr_->GetGpuResourceManager());
118 const auto& gpuResourceMgrImpl = static_cast<const GpuResourceManager&>(gpuResourceMgr.GetGpuResourceManager());
119 for (const auto dataRef : stagingInstanceData.copyInfo) {
120 if (dataRef.bufferOffset.handle && (dataRef.count > 0)) {
121 const RenderHandle dstHandle = dataRef.bufferOffset.handle.GetHandle();
122 const GpuBufferDesc dstBufferDesc = gpuResourceMgr.GetBufferDescriptor(dstHandle);
123 if (uint8_t* dstDataBegin = static_cast<uint8_t*>(gpuResourceMgr.MapBuffer(dstHandle)); dstDataBegin) {
124 const uint8_t* dstDataEnd = dstDataBegin + dstBufferDesc.byteSize;
125 // loop and copy all instances
126 for (uint32_t idx = 0; idx < dataRef.count; ++idx) {
127 const auto& instanceRef = stagingInstanceData.instances[dataRef.startIndex + idx];
128 uint64_t accelerationStructureReference = 0;
129 if (const GpuBufferVk* accelPtr = gpuResourceMgrImpl.GetBuffer<GpuBufferVk>(
130 instanceRef.accelerationStructureReference.GetHandle());
131 accelPtr) {
132 accelerationStructureReference =
133 accelPtr->GetPlatformDataAccelerationStructure().deviceAddress;
134 }
135 VkTransformMatrixKHR transformMatrix = { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
136 0.0f, 1.0f, 0.0f };
137
138 VkAccelerationStructureInstanceKHR instance {
139 transformMatrix, // transform;
140 instanceRef.instanceCustomIndex, // instanceCustomIndex : 24
141 instanceRef.mask, // mask : 8
142 0, // instanceShaderBindingTableRecordOffset : 24
143 VkGeometryInstanceFlagsKHR(instanceRef.flags), // flags : 8
144 accelerationStructureReference, // accelerationStructureReference
145 };
146 const size_t byteSize = sizeof(VkAccelerationStructureInstanceKHR);
147 uint8_t* dstData = dstDataBegin + byteSize * idx;
148 CloneData(dstData, size_t(dstDataEnd - dstData), &instance, byteSize);
149 }
150
151 gpuResourceMgr.UnmapBuffer(dstHandle);
152 } else {
153 PLUGIN_LOG_E("accel staging: dstHandle %" PRIu64, dstHandle.id);
154 }
155 }
156 }
157 }
158 #endif
159 #endif
160 }
161
162 // for plugin / factory interface
Create()163 IRenderNode* RenderNodeDefaultAccelerationStructureStaging::Create()
164 {
165 return new RenderNodeDefaultAccelerationStructureStaging();
166 }
167
Destroy(IRenderNode * instance)168 void RenderNodeDefaultAccelerationStructureStaging::Destroy(IRenderNode* instance)
169 {
170 delete static_cast<RenderNodeDefaultAccelerationStructureStaging*>(instance);
171 }
172 RENDER_END_NAMESPACE()
173