Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_pbr/src/render/gpu_preprocess.rs
9330 views
1
//! GPU mesh preprocessing.
2
//!
3
//! This is an optional pass that uses a compute shader to reduce the amount of
4
//! data that has to be transferred from the CPU to the GPU. When enabled,
5
//! instead of transferring [`MeshUniform`]s to the GPU, we transfer the smaller
6
//! [`MeshInputUniform`]s instead and use the GPU to calculate the remaining
7
//! derived fields in [`MeshUniform`].
8
9
use core::num::{NonZero, NonZeroU64};
10
11
use bevy_app::{App, Plugin};
12
use bevy_asset::{embedded_asset, load_embedded_asset, Handle};
13
use bevy_core_pipeline::{
14
deferred::node::late_deferred_prepass,
15
mip_generation::experimental::depth::{early_downsample_depth, ViewDepthPyramid},
16
prepass::{
17
node::{early_prepass, late_prepass},
18
DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms,
19
},
20
schedule::{Core3d, Core3dSystems},
21
};
22
use bevy_derive::{Deref, DerefMut};
23
use bevy_ecs::{
24
component::Component,
25
entity::Entity,
26
prelude::resource_exists,
27
query::{Has, Or, With, Without},
28
resource::Resource,
29
schedule::{common_conditions::any_match_filter, IntoScheduleConfigs as _},
30
system::{Commands, Query, Res, ResMut},
31
world::{FromWorld, World},
32
};
33
use bevy_log::warn_once;
34
use bevy_render::{
35
batching::gpu_preprocessing::{
36
BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingMode,
37
GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
38
IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, IndirectParametersIndexed,
39
IndirectParametersNonIndexed, LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem,
40
PreprocessWorkItemBuffers, UntypedPhaseBatchedInstanceBuffers,
41
UntypedPhaseIndirectParametersBuffers,
42
},
43
diagnostic::RecordDiagnostics as _,
44
occlusion_culling::OcclusionCulling,
45
render_resource::{
46
binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
47
BindGroup, BindGroupEntries, BindGroupLayoutDescriptor, BindingResource, Buffer,
48
BufferBinding, CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
49
DynamicBindGroupLayoutEntries, PipelineCache, RawBufferVec, ShaderStages, ShaderType,
50
SpecializedComputePipeline, SpecializedComputePipelines, TextureSampleType,
51
UninitBufferVec,
52
},
53
renderer::{RenderContext, RenderDevice, RenderQueue, ViewQuery},
54
settings::WgpuFeatures,
55
view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
56
Render, RenderApp, RenderSystems,
57
};
58
use bevy_shader::Shader;
59
use bevy_utils::{default, TypeIdMap};
60
use bitflags::bitflags;
61
use smallvec::{smallvec, SmallVec};
62
use tracing::warn;
63
64
use crate::{MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform};
65
66
use super::{ShadowView, ViewLightEntities};
67
68
/// The GPU workgroup size.
69
const WORKGROUP_SIZE: usize = 64;
70
71
/// A plugin that builds mesh uniforms on GPU.
72
///
73
/// This will only be added if the platform supports compute shaders (e.g. not
74
/// on WebGL 2).
75
pub struct GpuMeshPreprocessPlugin {
76
/// Whether we're building [`MeshUniform`]s on GPU.
77
///
78
/// This requires compute shader support and so will be forcibly disabled if
79
/// the platform doesn't support those.
80
pub use_gpu_instance_buffer_builder: bool,
81
}
82
83
/// The compute shader pipelines for the GPU mesh preprocessing and indirect
84
/// parameter building passes.
85
#[derive(Resource)]
86
pub struct PreprocessPipelines {
87
/// The pipeline used for CPU culling. This pipeline doesn't populate
88
/// indirect parameter metadata.
89
pub direct_preprocess: PreprocessPipeline,
90
/// The pipeline used for mesh preprocessing when GPU frustum culling is in
91
/// use, but occlusion culling isn't.
92
///
93
/// This pipeline populates indirect parameter metadata.
94
pub gpu_frustum_culling_preprocess: PreprocessPipeline,
95
/// The pipeline used for the first phase of occlusion culling.
96
///
97
/// This pipeline culls, transforms meshes, and populates indirect parameter
98
/// metadata.
99
pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
100
/// The pipeline used for the second phase of occlusion culling.
101
///
102
/// This pipeline culls, transforms meshes, and populates indirect parameter
103
/// metadata.
104
pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
105
/// The pipeline that builds indirect draw parameters for indexed meshes,
106
/// when frustum culling is enabled but occlusion culling *isn't* enabled.
107
pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
108
/// The pipeline that builds indirect draw parameters for non-indexed
109
/// meshes, when frustum culling is enabled but occlusion culling *isn't*
110
/// enabled.
111
pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
112
/// Compute shader pipelines for the early prepass phase that draws meshes
113
/// visible in the previous frame.
114
pub early_phase: PreprocessPhasePipelines,
115
/// Compute shader pipelines for the late prepass phase that draws meshes
116
/// that weren't visible in the previous frame, but became visible this
117
/// frame.
118
pub late_phase: PreprocessPhasePipelines,
119
/// Compute shader pipelines for the main color phase.
120
pub main_phase: PreprocessPhasePipelines,
121
}
122
123
/// Compute shader pipelines for a specific phase: early, late, or main.
124
///
125
/// The distinction between these phases is relevant for occlusion culling.
126
#[derive(Clone)]
127
pub struct PreprocessPhasePipelines {
128
/// The pipeline that resets the indirect draw counts used in
129
/// `multi_draw_indirect_count` to 0 in preparation for a new pass.
130
pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
131
/// The pipeline used for indexed indirect parameter building.
132
///
133
/// This pipeline converts indirect parameter metadata into indexed indirect
134
/// parameters.
135
pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
136
/// The pipeline used for non-indexed indirect parameter building.
137
///
138
/// This pipeline converts indirect parameter metadata into non-indexed
139
/// indirect parameters.
140
pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
141
}
142
143
/// The pipeline for the GPU mesh preprocessing shader.
144
pub struct PreprocessPipeline {
145
/// The bind group layout for the compute shader.
146
pub bind_group_layout: BindGroupLayoutDescriptor,
147
/// The shader asset handle.
148
pub shader: Handle<Shader>,
149
/// The pipeline ID for the compute shader.
150
///
151
/// This gets filled in `prepare_preprocess_pipelines`.
152
pub pipeline_id: Option<CachedComputePipelineId>,
153
}
154
155
/// The pipeline for the batch set count reset shader.
156
///
157
/// This shader resets the indirect batch set count to 0 for each view. It runs
158
/// in between every phase (early, late, and main).
159
#[derive(Clone)]
160
pub struct ResetIndirectBatchSetsPipeline {
161
/// The bind group layout for the compute shader.
162
pub bind_group_layout: BindGroupLayoutDescriptor,
163
/// The shader asset handle.
164
pub shader: Handle<Shader>,
165
/// The pipeline ID for the compute shader.
166
///
167
/// This gets filled in `prepare_preprocess_pipelines`.
168
pub pipeline_id: Option<CachedComputePipelineId>,
169
}
170
171
/// The pipeline for the indirect parameter building shader.
172
#[derive(Clone)]
173
pub struct BuildIndirectParametersPipeline {
174
/// The bind group layout for the compute shader.
175
pub bind_group_layout: BindGroupLayoutDescriptor,
176
/// The shader asset handle.
177
pub shader: Handle<Shader>,
178
/// The pipeline ID for the compute shader.
179
///
180
/// This gets filled in `prepare_preprocess_pipelines`.
181
pub pipeline_id: Option<CachedComputePipelineId>,
182
}
183
184
bitflags! {
185
/// Specifies variants of the mesh preprocessing shader.
186
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
187
pub struct PreprocessPipelineKey: u8 {
188
/// Whether GPU frustum culling is in use.
189
///
190
/// This `#define`'s `FRUSTUM_CULLING` in the shader.
191
const FRUSTUM_CULLING = 1;
192
/// Whether GPU two-phase occlusion culling is in use.
193
///
194
/// This `#define`'s `OCCLUSION_CULLING` in the shader.
195
const OCCLUSION_CULLING = 2;
196
/// Whether this is the early phase of GPU two-phase occlusion culling.
197
///
198
/// This `#define`'s `EARLY_PHASE` in the shader.
199
const EARLY_PHASE = 4;
200
}
201
202
/// Specifies variants of the indirect parameter building shader.
203
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
204
pub struct BuildIndirectParametersPipelineKey: u8 {
205
/// Whether the indirect parameter building shader is processing indexed
206
/// meshes (those that have index buffers).
207
///
208
/// This defines `INDEXED` in the shader.
209
const INDEXED = 1;
210
/// Whether the GPU and driver supports `multi_draw_indirect_count`.
211
///
212
/// This defines `MULTI_DRAW_INDIRECT_COUNT_SUPPORTED` in the shader.
213
const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
214
/// Whether GPU two-phase occlusion culling is in use.
215
///
216
/// This `#define`'s `OCCLUSION_CULLING` in the shader.
217
const OCCLUSION_CULLING = 4;
218
/// Whether this is the early phase of GPU two-phase occlusion culling.
219
///
220
/// This `#define`'s `EARLY_PHASE` in the shader.
221
const EARLY_PHASE = 8;
222
/// Whether this is the late phase of GPU two-phase occlusion culling.
223
///
224
/// This `#define`'s `LATE_PHASE` in the shader.
225
const LATE_PHASE = 16;
226
/// Whether this is the phase that runs after the early and late phases,
227
/// and right before the main drawing logic, when GPU two-phase
228
/// occlusion culling is in use.
229
///
230
/// This `#define`'s `MAIN_PHASE` in the shader.
231
const MAIN_PHASE = 32;
232
}
233
}
234
235
/// The compute shader bind group for the mesh preprocessing pass for each
236
/// render phase.
237
///
238
/// This goes on the view. It maps the [`core::any::TypeId`] of a render phase
239
/// (e.g. [`bevy_core_pipeline::core_3d::Opaque3d`]) to the
240
/// [`PhasePreprocessBindGroups`] for that phase.
241
#[derive(Component, Clone, Deref, DerefMut)]
242
pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
243
244
/// The compute shader bind group for the mesh preprocessing step for a single
245
/// render phase on a single view.
246
#[derive(Clone)]
247
pub enum PhasePreprocessBindGroups {
248
/// The bind group used for the single invocation of the compute shader when
249
/// indirect drawing is *not* being used.
250
///
251
/// Because direct drawing doesn't require splitting the meshes into indexed
252
/// and non-indexed meshes, there's only one bind group in this case.
253
Direct(BindGroup),
254
255
/// The bind groups used for the compute shader when indirect drawing is
256
/// being used, but occlusion culling isn't being used.
257
///
258
/// Because indirect drawing requires splitting the meshes into indexed and
259
/// non-indexed meshes, there are two bind groups here.
260
IndirectFrustumCulling {
261
/// The bind group for indexed meshes.
262
indexed: Option<BindGroup>,
263
/// The bind group for non-indexed meshes.
264
non_indexed: Option<BindGroup>,
265
},
266
267
/// The bind groups used for the compute shader when indirect drawing is
268
/// being used, but occlusion culling isn't being used.
269
///
270
/// Because indirect drawing requires splitting the meshes into indexed and
271
/// non-indexed meshes, and because occlusion culling requires splitting
272
/// this phase into early and late versions, there are four bind groups
273
/// here.
274
IndirectOcclusionCulling {
275
/// The bind group for indexed meshes during the early mesh
276
/// preprocessing phase.
277
early_indexed: Option<BindGroup>,
278
/// The bind group for non-indexed meshes during the early mesh
279
/// preprocessing phase.
280
early_non_indexed: Option<BindGroup>,
281
/// The bind group for indexed meshes during the late mesh preprocessing
282
/// phase.
283
late_indexed: Option<BindGroup>,
284
/// The bind group for non-indexed meshes during the late mesh
285
/// preprocessing phase.
286
late_non_indexed: Option<BindGroup>,
287
},
288
}
289
290
/// The bind groups for the compute shaders that reset indirect draw counts and
291
/// build indirect parameters.
292
///
293
/// There's one set of bind group for each phase. Phases are keyed off their
294
/// [`core::any::TypeId`].
295
#[derive(Resource, Default, Deref, DerefMut)]
296
pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
297
298
impl BuildIndirectParametersBindGroups {
299
/// Creates a new, empty [`BuildIndirectParametersBindGroups`] table.
300
pub fn new() -> BuildIndirectParametersBindGroups {
301
Self::default()
302
}
303
}
304
305
/// The per-phase set of bind groups for the compute shaders that reset indirect
306
/// draw counts and build indirect parameters.
307
pub struct PhaseBuildIndirectParametersBindGroups {
308
/// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for
309
/// indexed meshes.
310
reset_indexed_indirect_batch_sets: Option<BindGroup>,
311
/// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for
312
/// non-indexed meshes.
313
reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
314
/// The bind group for the `build_indirect_params.wgsl` shader, for indexed
315
/// meshes.
316
build_indexed_indirect: Option<BindGroup>,
317
/// The bind group for the `build_indirect_params.wgsl` shader, for
318
/// non-indexed meshes.
319
build_non_indexed_indirect: Option<BindGroup>,
320
}
321
322
/// Stops the `GpuPreprocessNode` attempting to generate the buffer for this view
323
/// useful to avoid duplicating effort if the bind group is shared between views
324
#[derive(Component, Default)]
325
pub struct SkipGpuPreprocess;
326
327
impl Plugin for GpuMeshPreprocessPlugin {
328
fn build(&self, app: &mut App) {
329
embedded_asset!(app, "mesh_preprocess.wgsl");
330
embedded_asset!(app, "reset_indirect_batch_sets.wgsl");
331
embedded_asset!(app, "build_indirect_params.wgsl");
332
}
333
334
fn finish(&self, app: &mut App) {
335
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
336
return;
337
};
338
339
// This plugin does nothing if GPU instance buffer building isn't in
340
// use.
341
let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
342
if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
343
return;
344
}
345
346
render_app
347
.init_resource::<PreprocessPipelines>()
348
.init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
349
.init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
350
.init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
351
.add_systems(
352
Render,
353
(
354
prepare_preprocess_pipelines.in_set(RenderSystems::Prepare),
355
prepare_preprocess_bind_groups
356
.run_if(resource_exists::<BatchedInstanceBuffers<
357
MeshUniform,
358
MeshInputUniform
359
>>)
360
.in_set(RenderSystems::PrepareBindGroups),
361
write_mesh_culling_data_buffer.in_set(RenderSystems::PrepareResourcesFlush),
362
),
363
)
364
.add_systems(
365
Core3d,
366
(
367
(
368
clear_indirect_parameters_metadata,
369
early_gpu_preprocess,
370
early_prepass_build_indirect_parameters.run_if(any_match_filter::<(
371
With<PreprocessBindGroups>,
372
Without<SkipGpuPreprocess>,
373
Without<NoIndirectDrawing>,
374
Or<(With<DepthPrepass>, With<ShadowView>)>,
375
)>),
376
)
377
.chain()
378
.before(early_prepass),
379
(
380
late_gpu_preprocess,
381
late_prepass_build_indirect_parameters.run_if(any_match_filter::<(
382
With<PreprocessBindGroups>,
383
Without<SkipGpuPreprocess>,
384
Without<NoIndirectDrawing>,
385
Or<(With<DepthPrepass>, With<ShadowView>)>,
386
With<OcclusionCulling>,
387
)>),
388
)
389
.chain()
390
.after(early_downsample_depth)
391
.before(late_prepass),
392
main_build_indirect_parameters
393
.run_if(any_match_filter::<(
394
With<PreprocessBindGroups>,
395
Without<SkipGpuPreprocess>,
396
Without<NoIndirectDrawing>,
397
)>)
398
.after(late_prepass_build_indirect_parameters)
399
.after(late_deferred_prepass)
400
.before(Core3dSystems::MainPass),
401
),
402
);
403
}
404
}
405
406
pub fn clear_indirect_parameters_metadata(
407
indirect_parameters_buffers: Option<Res<IndirectParametersBuffers>>,
408
mut ctx: RenderContext,
409
) {
410
let Some(indirect_parameters_buffers) = indirect_parameters_buffers else {
411
return;
412
};
413
414
// Clear out each indexed and non-indexed GPU-side buffer.
415
for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
416
if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
417
.indexed
418
.gpu_metadata_buffer()
419
{
420
ctx.command_encoder().clear_buffer(
421
indexed_gpu_metadata_buffer,
422
0,
423
Some(
424
phase_indirect_parameters_buffers.indexed.batch_count() as u64
425
* size_of::<IndirectParametersGpuMetadata>() as u64,
426
),
427
);
428
}
429
430
if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
431
.non_indexed
432
.gpu_metadata_buffer()
433
{
434
ctx.command_encoder().clear_buffer(
435
non_indexed_gpu_metadata_buffer,
436
0,
437
Some(
438
phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
439
* size_of::<IndirectParametersGpuMetadata>() as u64,
440
),
441
);
442
}
443
}
444
}
445
446
pub fn early_gpu_preprocess(
447
current_view: ViewQuery<Option<&ViewLightEntities>, Without<SkipGpuPreprocess>>,
448
view_query: Query<
449
(
450
&ExtractedView,
451
Option<&PreprocessBindGroups>,
452
Option<&ViewUniformOffset>,
453
Has<NoIndirectDrawing>,
454
Has<OcclusionCulling>,
455
),
456
Without<SkipGpuPreprocess>,
457
>,
458
batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
459
pipeline_cache: Res<PipelineCache>,
460
preprocess_pipelines: Res<PreprocessPipelines>,
461
mut ctx: RenderContext,
462
) {
463
let diagnostics = ctx.diagnostic_recorder();
464
let diagnostics = diagnostics.as_deref();
465
466
let command_encoder = ctx.command_encoder();
467
468
let mut compute_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
469
label: Some("early_mesh_preprocessing"),
470
timestamp_writes: None,
471
});
472
473
let pass_span = diagnostics.pass_span(&mut compute_pass, "early_mesh_preprocessing");
474
475
let view_entity = current_view.entity();
476
let shadow_cascade_views = current_view.into_inner();
477
let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
478
all_views.push(view_entity);
479
if let Some(shadow_cascade_views) = shadow_cascade_views {
480
all_views.extend(shadow_cascade_views.lights.iter().copied());
481
}
482
483
// Run the compute passes.
484
for view_entity in all_views {
485
let Ok((view, bind_groups, view_uniform_offset, no_indirect_drawing, occlusion_culling)) =
486
view_query.get(view_entity)
487
else {
488
continue;
489
};
490
491
let Some(bind_groups) = bind_groups else {
492
continue;
493
};
494
let Some(view_uniform_offset) = view_uniform_offset else {
495
continue;
496
};
497
498
// Select the right pipeline, depending on whether GPU culling is in
499
// use.
500
let maybe_pipeline_id = if no_indirect_drawing {
501
preprocess_pipelines.direct_preprocess.pipeline_id
502
} else if occlusion_culling {
503
preprocess_pipelines
504
.early_gpu_occlusion_culling_preprocess
505
.pipeline_id
506
} else {
507
preprocess_pipelines
508
.gpu_frustum_culling_preprocess
509
.pipeline_id
510
};
511
512
// Fetch the pipeline.
513
let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
514
warn!("The build mesh uniforms pipeline wasn't ready");
515
continue;
516
};
517
518
let Some(preprocess_pipeline) = pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
519
else {
520
// This will happen while the pipeline is being compiled and is fine.
521
continue;
522
};
523
524
compute_pass.set_pipeline(preprocess_pipeline);
525
526
// Loop over each render phase.
527
for (phase_type_id, batched_phase_instance_buffers) in
528
&batched_instance_buffers.phase_instance_buffers
529
{
530
// Grab the work item buffers for this view.
531
let Some(work_item_buffers) = batched_phase_instance_buffers
532
.work_item_buffers
533
.get(&view.retained_view_entity)
534
else {
535
continue;
536
};
537
538
// Fetch the bind group for the render phase.
539
let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
540
continue;
541
};
542
543
// Make sure the mesh preprocessing shader has access to the
544
// view info it needs to do culling and motion vector
545
// computation.
546
let dynamic_offsets = [view_uniform_offset.offset];
547
548
// Are we drawing directly or indirectly?
549
match *phase_bind_groups {
550
PhasePreprocessBindGroups::Direct(ref bind_group) => {
551
// Invoke the mesh preprocessing shader to transform
552
// meshes only, but not cull.
553
let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
554
else {
555
continue;
556
};
557
compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
558
let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
559
if workgroup_count > 0 {
560
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
561
}
562
}
563
564
PhasePreprocessBindGroups::IndirectFrustumCulling {
565
indexed: ref maybe_indexed_bind_group,
566
non_indexed: ref maybe_non_indexed_bind_group,
567
}
568
| PhasePreprocessBindGroups::IndirectOcclusionCulling {
569
early_indexed: ref maybe_indexed_bind_group,
570
early_non_indexed: ref maybe_non_indexed_bind_group,
571
..
572
} => {
573
// Invoke the mesh preprocessing shader to transform and
574
// cull the meshes.
575
let PreprocessWorkItemBuffers::Indirect {
576
indexed: indexed_buffer,
577
non_indexed: non_indexed_buffer,
578
..
579
} = work_item_buffers
580
else {
581
continue;
582
};
583
584
// Transform and cull indexed meshes if there are any.
585
if let Some(indexed_bind_group) = maybe_indexed_bind_group {
586
if let PreprocessWorkItemBuffers::Indirect {
587
gpu_occlusion_culling:
588
Some(GpuOcclusionCullingWorkItemBuffers {
589
late_indirect_parameters_indexed_offset,
590
..
591
}),
592
..
593
} = *work_item_buffers
594
{
595
compute_pass.set_immediates(
596
0,
597
bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
598
);
599
}
600
601
compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
602
let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
603
if workgroup_count > 0 {
604
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
605
}
606
}
607
608
// Transform and cull non-indexed meshes if there are any.
609
if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
610
if let PreprocessWorkItemBuffers::Indirect {
611
gpu_occlusion_culling:
612
Some(GpuOcclusionCullingWorkItemBuffers {
613
late_indirect_parameters_non_indexed_offset,
614
..
615
}),
616
..
617
} = *work_item_buffers
618
{
619
compute_pass.set_immediates(
620
0,
621
bytemuck::bytes_of(&late_indirect_parameters_non_indexed_offset),
622
);
623
}
624
625
compute_pass.set_bind_group(0, non_indexed_bind_group, &dynamic_offsets);
626
let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
627
if workgroup_count > 0 {
628
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
629
}
630
}
631
}
632
}
633
}
634
}
635
636
pass_span.end(&mut compute_pass);
637
}
638
639
pub fn late_gpu_preprocess(
640
current_view: ViewQuery<
641
(&ExtractedView, &PreprocessBindGroups, &ViewUniformOffset),
642
(
643
Without<SkipGpuPreprocess>,
644
Without<NoIndirectDrawing>,
645
With<OcclusionCulling>,
646
With<DepthPrepass>,
647
),
648
>,
649
batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
650
pipeline_cache: Res<PipelineCache>,
651
preprocess_pipelines: Res<PreprocessPipelines>,
652
mut ctx: RenderContext,
653
) {
654
let (view, bind_groups, view_uniform_offset) = current_view.into_inner();
655
656
// Fetch the pipeline BEFORE starting diagnostic spans to avoid panic on early return
657
let maybe_pipeline_id = preprocess_pipelines
658
.late_gpu_occlusion_culling_preprocess
659
.pipeline_id;
660
661
let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
662
warn_once!("The build mesh uniforms pipeline wasn't ready");
663
return;
664
};
665
666
let Some(preprocess_pipeline) = pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
667
else {
668
// This will happen while the pipeline is being compiled and is fine.
669
return;
670
};
671
672
let diagnostics = ctx.diagnostic_recorder();
673
let diagnostics = diagnostics.as_deref();
674
675
let command_encoder = ctx.command_encoder();
676
677
let mut compute_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
678
label: Some("late_mesh_preprocessing"),
679
timestamp_writes: None,
680
});
681
682
let pass_span = diagnostics.pass_span(&mut compute_pass, "late_mesh_preprocessing");
683
684
compute_pass.set_pipeline(preprocess_pipeline);
685
686
// Loop over each phase. Because we built the phases in parallel,
687
// each phase has a separate set of instance buffers.
688
for (phase_type_id, batched_phase_instance_buffers) in
689
&batched_instance_buffers.phase_instance_buffers
690
{
691
let UntypedPhaseBatchedInstanceBuffers {
692
ref work_item_buffers,
693
ref late_indexed_indirect_parameters_buffer,
694
ref late_non_indexed_indirect_parameters_buffer,
695
..
696
} = *batched_phase_instance_buffers;
697
698
// Grab the work item buffers for this view.
699
let Some(phase_work_item_buffers) = work_item_buffers.get(&view.retained_view_entity)
700
else {
701
continue;
702
};
703
704
let (
705
PreprocessWorkItemBuffers::Indirect {
706
gpu_occlusion_culling:
707
Some(GpuOcclusionCullingWorkItemBuffers {
708
late_indirect_parameters_indexed_offset,
709
late_indirect_parameters_non_indexed_offset,
710
..
711
}),
712
..
713
},
714
Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
715
late_indexed: maybe_late_indexed_bind_group,
716
late_non_indexed: maybe_late_non_indexed_bind_group,
717
..
718
}),
719
Some(late_indexed_indirect_parameters_buffer),
720
Some(late_non_indexed_indirect_parameters_buffer),
721
) = (
722
phase_work_item_buffers,
723
bind_groups.get(phase_type_id),
724
late_indexed_indirect_parameters_buffer.buffer(),
725
late_non_indexed_indirect_parameters_buffer.buffer(),
726
)
727
else {
728
continue;
729
};
730
731
let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![];
732
dynamic_offsets.push(view_uniform_offset.offset);
733
734
// If there's no space reserved for work items, then don't
735
// bother doing the dispatch, as there can't possibly be any
736
// meshes of the given class (indexed or non-indexed) in this
737
// phase.
738
739
// Transform and cull indexed meshes if there are any.
740
if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group {
741
compute_pass.set_immediates(
742
0,
743
bytemuck::bytes_of(late_indirect_parameters_indexed_offset),
744
);
745
746
compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets);
747
compute_pass.dispatch_workgroups_indirect(
748
late_indexed_indirect_parameters_buffer,
749
(*late_indirect_parameters_indexed_offset as u64)
750
* (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
751
);
752
}
753
754
// Transform and cull non-indexed meshes if there are any.
755
if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group {
756
compute_pass.set_immediates(
757
0,
758
bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset),
759
);
760
761
compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets);
762
compute_pass.dispatch_workgroups_indirect(
763
late_non_indexed_indirect_parameters_buffer,
764
(*late_indirect_parameters_non_indexed_offset as u64)
765
* (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
766
);
767
}
768
}
769
770
pass_span.end(&mut compute_pass);
771
}
772
773
pub fn early_prepass_build_indirect_parameters(
774
preprocess_pipelines: Res<PreprocessPipelines>,
775
build_indirect_params_bind_groups: Option<Res<BuildIndirectParametersBindGroups>>,
776
pipeline_cache: Res<PipelineCache>,
777
indirect_parameters_buffers: Option<Res<IndirectParametersBuffers>>,
778
mut ctx: RenderContext,
779
) {
780
run_build_indirect_parameters(
781
&mut ctx,
782
build_indirect_params_bind_groups.as_deref(),
783
&pipeline_cache,
784
indirect_parameters_buffers.as_deref(),
785
&preprocess_pipelines.early_phase,
786
"early_prepass_indirect_parameters_building",
787
);
788
}
789
790
pub fn late_prepass_build_indirect_parameters(
791
preprocess_pipelines: Res<PreprocessPipelines>,
792
build_indirect_params_bind_groups: Option<Res<BuildIndirectParametersBindGroups>>,
793
pipeline_cache: Res<PipelineCache>,
794
indirect_parameters_buffers: Option<Res<IndirectParametersBuffers>>,
795
mut ctx: RenderContext,
796
) {
797
run_build_indirect_parameters(
798
&mut ctx,
799
build_indirect_params_bind_groups.as_deref(),
800
&pipeline_cache,
801
indirect_parameters_buffers.as_deref(),
802
&preprocess_pipelines.late_phase,
803
"late_prepass_indirect_parameters_building",
804
);
805
}
806
807
pub fn main_build_indirect_parameters(
808
preprocess_pipelines: Res<PreprocessPipelines>,
809
build_indirect_params_bind_groups: Option<Res<BuildIndirectParametersBindGroups>>,
810
pipeline_cache: Res<PipelineCache>,
811
indirect_parameters_buffers: Option<Res<IndirectParametersBuffers>>,
812
mut ctx: RenderContext,
813
) {
814
run_build_indirect_parameters(
815
&mut ctx,
816
build_indirect_params_bind_groups.as_deref(),
817
&pipeline_cache,
818
indirect_parameters_buffers.as_deref(),
819
&preprocess_pipelines.main_phase,
820
"main_indirect_parameters_building",
821
);
822
}
823
824
fn run_build_indirect_parameters(
825
ctx: &mut RenderContext,
826
build_indirect_params_bind_groups: Option<&BuildIndirectParametersBindGroups>,
827
pipeline_cache: &PipelineCache,
828
indirect_parameters_buffers: Option<&IndirectParametersBuffers>,
829
preprocess_phase_pipelines: &PreprocessPhasePipelines,
830
label: &'static str,
831
) {
832
let Some(build_indirect_params_bind_groups) = build_indirect_params_bind_groups else {
833
return;
834
};
835
836
let Some(indirect_parameters_buffers) = indirect_parameters_buffers else {
837
return;
838
};
839
840
let command_encoder = ctx.command_encoder();
841
842
let mut compute_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor {
843
label: Some(label),
844
timestamp_writes: None,
845
});
846
847
// Fetch the pipeline.
848
let (
849
Some(reset_indirect_batch_sets_pipeline_id),
850
Some(build_indexed_indirect_params_pipeline_id),
851
Some(build_non_indexed_indirect_params_pipeline_id),
852
) = (
853
preprocess_phase_pipelines
854
.reset_indirect_batch_sets
855
.pipeline_id,
856
preprocess_phase_pipelines
857
.gpu_occlusion_culling_build_indexed_indirect_params
858
.pipeline_id,
859
preprocess_phase_pipelines
860
.gpu_occlusion_culling_build_non_indexed_indirect_params
861
.pipeline_id,
862
)
863
else {
864
warn!("The build indirect parameters pipelines weren't ready");
865
return;
866
};
867
868
let (
869
Some(reset_indirect_batch_sets_pipeline),
870
Some(build_indexed_indirect_params_pipeline),
871
Some(build_non_indexed_indirect_params_pipeline),
872
) = (
873
pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id),
874
pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id),
875
pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id),
876
)
877
else {
878
// This will happen while the pipeline is being compiled and is fine.
879
return;
880
};
881
882
// Loop over each phase. As each has as separate set of buffers, we need to
883
// build indirect parameters individually for each phase.
884
for (phase_type_id, phase_build_indirect_params_bind_groups) in
885
build_indirect_params_bind_groups.iter()
886
{
887
let Some(phase_indirect_parameters_buffers) =
888
indirect_parameters_buffers.get(phase_type_id)
889
else {
890
continue;
891
};
892
893
// Build indexed indirect parameters.
894
if let (
895
Some(reset_indexed_indirect_batch_sets_bind_group),
896
Some(build_indirect_indexed_params_bind_group),
897
) = (
898
&phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets,
899
&phase_build_indirect_params_bind_groups.build_indexed_indirect,
900
) {
901
compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
902
compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]);
903
let workgroup_count = phase_indirect_parameters_buffers
904
.batch_set_count(true)
905
.div_ceil(WORKGROUP_SIZE);
906
if workgroup_count > 0 {
907
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
908
}
909
910
compute_pass.set_pipeline(build_indexed_indirect_params_pipeline);
911
compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]);
912
let workgroup_count = phase_indirect_parameters_buffers
913
.indexed
914
.batch_count()
915
.div_ceil(WORKGROUP_SIZE);
916
if workgroup_count > 0 {
917
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
918
}
919
}
920
921
// Build non-indexed indirect parameters.
922
if let (
923
Some(reset_non_indexed_indirect_batch_sets_bind_group),
924
Some(build_indirect_non_indexed_params_bind_group),
925
) = (
926
&phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets,
927
&phase_build_indirect_params_bind_groups.build_non_indexed_indirect,
928
) {
929
compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
930
compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]);
931
let workgroup_count = phase_indirect_parameters_buffers
932
.batch_set_count(false)
933
.div_ceil(WORKGROUP_SIZE);
934
if workgroup_count > 0 {
935
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
936
}
937
938
compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline);
939
compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]);
940
let workgroup_count = phase_indirect_parameters_buffers
941
.non_indexed
942
.batch_count()
943
.div_ceil(WORKGROUP_SIZE);
944
if workgroup_count > 0 {
945
compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
946
}
947
}
948
}
949
}
950
951
impl PreprocessPipelines {
952
/// Returns true if the preprocessing and indirect parameters pipelines have
953
/// been loaded or false otherwise.
954
pub(crate) fn pipelines_are_loaded(
955
&self,
956
pipeline_cache: &PipelineCache,
957
preprocessing_support: &GpuPreprocessingSupport,
958
) -> bool {
959
match preprocessing_support.max_supported_mode {
960
GpuPreprocessingMode::None => false,
961
GpuPreprocessingMode::PreprocessingOnly => {
962
self.direct_preprocess.is_loaded(pipeline_cache)
963
&& self
964
.gpu_frustum_culling_preprocess
965
.is_loaded(pipeline_cache)
966
}
967
GpuPreprocessingMode::Culling => {
968
self.direct_preprocess.is_loaded(pipeline_cache)
969
&& self
970
.gpu_frustum_culling_preprocess
971
.is_loaded(pipeline_cache)
972
&& self
973
.early_gpu_occlusion_culling_preprocess
974
.is_loaded(pipeline_cache)
975
&& self
976
.late_gpu_occlusion_culling_preprocess
977
.is_loaded(pipeline_cache)
978
&& self
979
.gpu_frustum_culling_build_indexed_indirect_params
980
.is_loaded(pipeline_cache)
981
&& self
982
.gpu_frustum_culling_build_non_indexed_indirect_params
983
.is_loaded(pipeline_cache)
984
&& self.early_phase.is_loaded(pipeline_cache)
985
&& self.late_phase.is_loaded(pipeline_cache)
986
&& self.main_phase.is_loaded(pipeline_cache)
987
}
988
}
989
}
990
}
991
992
impl PreprocessPhasePipelines {
993
fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
994
self.reset_indirect_batch_sets.is_loaded(pipeline_cache)
995
&& self
996
.gpu_occlusion_culling_build_indexed_indirect_params
997
.is_loaded(pipeline_cache)
998
&& self
999
.gpu_occlusion_culling_build_non_indexed_indirect_params
1000
.is_loaded(pipeline_cache)
1001
}
1002
}
1003
1004
impl PreprocessPipeline {
1005
fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1006
self.pipeline_id
1007
.is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1008
}
1009
}
1010
1011
impl ResetIndirectBatchSetsPipeline {
1012
fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1013
self.pipeline_id
1014
.is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1015
}
1016
}
1017
1018
impl BuildIndirectParametersPipeline {
1019
/// Returns true if this pipeline has been loaded into the pipeline cache or
1020
/// false otherwise.
1021
fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1022
self.pipeline_id
1023
.is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1024
}
1025
}
1026
1027
impl SpecializedComputePipeline for PreprocessPipeline {
1028
type Key = PreprocessPipelineKey;
1029
1030
fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1031
let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()];
1032
if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1033
shader_defs.push("INDIRECT".into());
1034
shader_defs.push("FRUSTUM_CULLING".into());
1035
}
1036
if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1037
shader_defs.push("OCCLUSION_CULLING".into());
1038
if key.contains(PreprocessPipelineKey::EARLY_PHASE) {
1039
shader_defs.push("EARLY_PHASE".into());
1040
} else {
1041
shader_defs.push("LATE_PHASE".into());
1042
}
1043
}
1044
1045
ComputePipelineDescriptor {
1046
label: Some(
1047
format!(
1048
"mesh preprocessing ({})",
1049
if key.contains(
1050
PreprocessPipelineKey::OCCLUSION_CULLING
1051
| PreprocessPipelineKey::EARLY_PHASE
1052
) {
1053
"early GPU occlusion culling"
1054
} else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1055
"late GPU occlusion culling"
1056
} else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1057
"GPU frustum culling"
1058
} else {
1059
"direct"
1060
}
1061
)
1062
.into(),
1063
),
1064
layout: vec![self.bind_group_layout.clone()],
1065
immediate_size: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1066
4
1067
} else {
1068
0
1069
},
1070
shader: self.shader.clone(),
1071
shader_defs,
1072
..default()
1073
}
1074
}
1075
}
1076
1077
impl FromWorld for PreprocessPipelines {
1078
fn from_world(world: &mut World) -> Self {
1079
// GPU culling bind group parameters are a superset of those in the CPU
1080
// culling (direct) shader.
1081
let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries();
1082
let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries();
1083
let gpu_early_occlusion_culling_bind_group_layout_entries =
1084
gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices((
1085
(
1086
11,
1087
storage_buffer::<PreprocessWorkItem>(/*has_dynamic_offset=*/ false),
1088
),
1089
(
1090
12,
1091
storage_buffer::<LatePreprocessWorkItemIndirectParameters>(
1092
/*has_dynamic_offset=*/ false,
1093
),
1094
),
1095
));
1096
let gpu_late_occlusion_culling_bind_group_layout_entries =
1097
gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices(((
1098
12,
1099
storage_buffer_read_only::<LatePreprocessWorkItemIndirectParameters>(
1100
/*has_dynamic_offset=*/ false,
1101
),
1102
),));
1103
1104
let reset_indirect_batch_sets_bind_group_layout_entries =
1105
DynamicBindGroupLayoutEntries::sequential(
1106
ShaderStages::COMPUTE,
1107
(storage_buffer::<IndirectBatchSet>(false),),
1108
);
1109
1110
// Indexed and non-indexed bind group parameters share all the bind
1111
// group layout entries except the final one.
1112
let build_indexed_indirect_params_bind_group_layout_entries =
1113
build_indirect_params_bind_group_layout_entries()
1114
.extend_sequential((storage_buffer::<IndirectParametersIndexed>(false),));
1115
let build_non_indexed_indirect_params_bind_group_layout_entries =
1116
build_indirect_params_bind_group_layout_entries()
1117
.extend_sequential((storage_buffer::<IndirectParametersNonIndexed>(false),));
1118
1119
// Create the bind group layouts.
1120
let direct_bind_group_layout = BindGroupLayoutDescriptor::new(
1121
"build mesh uniforms direct bind group layout",
1122
&direct_bind_group_layout_entries,
1123
);
1124
let gpu_frustum_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1125
"build mesh uniforms GPU frustum culling bind group layout",
1126
&gpu_frustum_culling_bind_group_layout_entries,
1127
);
1128
let gpu_early_occlusion_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1129
"build mesh uniforms GPU early occlusion culling bind group layout",
1130
&gpu_early_occlusion_culling_bind_group_layout_entries,
1131
);
1132
let gpu_late_occlusion_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1133
"build mesh uniforms GPU late occlusion culling bind group layout",
1134
&gpu_late_occlusion_culling_bind_group_layout_entries,
1135
);
1136
let reset_indirect_batch_sets_bind_group_layout = BindGroupLayoutDescriptor::new(
1137
"reset indirect batch sets bind group layout",
1138
&reset_indirect_batch_sets_bind_group_layout_entries,
1139
);
1140
let build_indexed_indirect_params_bind_group_layout = BindGroupLayoutDescriptor::new(
1141
"build indexed indirect parameters bind group layout",
1142
&build_indexed_indirect_params_bind_group_layout_entries,
1143
);
1144
let build_non_indexed_indirect_params_bind_group_layout = BindGroupLayoutDescriptor::new(
1145
"build non-indexed indirect parameters bind group layout",
1146
&build_non_indexed_indirect_params_bind_group_layout_entries,
1147
);
1148
1149
let preprocess_shader = load_embedded_asset!(world, "mesh_preprocess.wgsl");
1150
let reset_indirect_batch_sets_shader =
1151
load_embedded_asset!(world, "reset_indirect_batch_sets.wgsl");
1152
let build_indirect_params_shader =
1153
load_embedded_asset!(world, "build_indirect_params.wgsl");
1154
1155
let preprocess_phase_pipelines = PreprocessPhasePipelines {
1156
reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline {
1157
bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(),
1158
shader: reset_indirect_batch_sets_shader,
1159
pipeline_id: None,
1160
},
1161
gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1162
bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1163
shader: build_indirect_params_shader.clone(),
1164
pipeline_id: None,
1165
},
1166
gpu_occlusion_culling_build_non_indexed_indirect_params:
1167
BuildIndirectParametersPipeline {
1168
bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1169
shader: build_indirect_params_shader.clone(),
1170
pipeline_id: None,
1171
},
1172
};
1173
1174
PreprocessPipelines {
1175
direct_preprocess: PreprocessPipeline {
1176
bind_group_layout: direct_bind_group_layout,
1177
shader: preprocess_shader.clone(),
1178
pipeline_id: None,
1179
},
1180
gpu_frustum_culling_preprocess: PreprocessPipeline {
1181
bind_group_layout: gpu_frustum_culling_bind_group_layout,
1182
shader: preprocess_shader.clone(),
1183
pipeline_id: None,
1184
},
1185
early_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1186
bind_group_layout: gpu_early_occlusion_culling_bind_group_layout,
1187
shader: preprocess_shader.clone(),
1188
pipeline_id: None,
1189
},
1190
late_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1191
bind_group_layout: gpu_late_occlusion_culling_bind_group_layout,
1192
shader: preprocess_shader,
1193
pipeline_id: None,
1194
},
1195
gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1196
bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1197
shader: build_indirect_params_shader.clone(),
1198
pipeline_id: None,
1199
},
1200
gpu_frustum_culling_build_non_indexed_indirect_params:
1201
BuildIndirectParametersPipeline {
1202
bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1203
shader: build_indirect_params_shader,
1204
pipeline_id: None,
1205
},
1206
early_phase: preprocess_phase_pipelines.clone(),
1207
late_phase: preprocess_phase_pipelines.clone(),
1208
main_phase: preprocess_phase_pipelines.clone(),
1209
}
1210
}
1211
}
1212
1213
fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1214
DynamicBindGroupLayoutEntries::new_with_indices(
1215
ShaderStages::COMPUTE,
1216
(
1217
// `view`
1218
(
1219
0,
1220
uniform_buffer::<ViewUniform>(/* has_dynamic_offset= */ true),
1221
),
1222
// `current_input`
1223
(3, storage_buffer_read_only::<MeshInputUniform>(false)),
1224
// `previous_input`
1225
(4, storage_buffer_read_only::<MeshInputUniform>(false)),
1226
// `indices`
1227
(5, storage_buffer_read_only::<PreprocessWorkItem>(false)),
1228
// `output`
1229
(6, storage_buffer::<MeshUniform>(false)),
1230
),
1231
)
1232
}
1233
1234
// Returns the first 4 bind group layout entries shared between all invocations
1235
// of the indirect parameters building shader.
1236
fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1237
DynamicBindGroupLayoutEntries::new_with_indices(
1238
ShaderStages::COMPUTE,
1239
(
1240
(0, storage_buffer_read_only::<MeshInputUniform>(false)),
1241
(
1242
1,
1243
storage_buffer_read_only::<IndirectParametersCpuMetadata>(false),
1244
),
1245
(
1246
2,
1247
storage_buffer_read_only::<IndirectParametersGpuMetadata>(false),
1248
),
1249
(3, storage_buffer::<IndirectBatchSet>(false)),
1250
),
1251
)
1252
}
1253
1254
/// A system that specializes the `mesh_preprocess.wgsl` and
1255
/// `build_indirect_params.wgsl` pipelines if necessary.
1256
fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1257
// GPU culling bind group parameters are a superset of those in the CPU
1258
// culling (direct) shader.
1259
preprocess_direct_bind_group_layout_entries().extend_with_indices((
1260
// `indirect_parameters_cpu_metadata`
1261
(
1262
7,
1263
storage_buffer_read_only::<IndirectParametersCpuMetadata>(
1264
/* has_dynamic_offset= */ false,
1265
),
1266
),
1267
// `indirect_parameters_gpu_metadata`
1268
(
1269
8,
1270
storage_buffer::<IndirectParametersGpuMetadata>(/* has_dynamic_offset= */ false),
1271
),
1272
// `mesh_culling_data`
1273
(
1274
9,
1275
storage_buffer_read_only::<MeshCullingData>(/* has_dynamic_offset= */ false),
1276
),
1277
))
1278
}
1279
1280
fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1281
gpu_culling_bind_group_layout_entries().extend_with_indices((
1282
(
1283
2,
1284
uniform_buffer::<PreviousViewData>(/*has_dynamic_offset=*/ false),
1285
),
1286
(
1287
10,
1288
texture_2d(TextureSampleType::Float { filterable: true }),
1289
),
1290
))
1291
}
1292
1293
/// A system that specializes the `mesh_preprocess.wgsl` pipelines if necessary.
1294
pub fn prepare_preprocess_pipelines(
1295
pipeline_cache: Res<PipelineCache>,
1296
render_device: Res<RenderDevice>,
1297
mut specialized_preprocess_pipelines: ResMut<SpecializedComputePipelines<PreprocessPipeline>>,
1298
mut specialized_reset_indirect_batch_sets_pipelines: ResMut<
1299
SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1300
>,
1301
mut specialized_build_indirect_parameters_pipelines: ResMut<
1302
SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1303
>,
1304
preprocess_pipelines: ResMut<PreprocessPipelines>,
1305
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1306
) {
1307
let preprocess_pipelines = preprocess_pipelines.into_inner();
1308
1309
preprocess_pipelines.direct_preprocess.prepare(
1310
&pipeline_cache,
1311
&mut specialized_preprocess_pipelines,
1312
PreprocessPipelineKey::empty(),
1313
);
1314
preprocess_pipelines.gpu_frustum_culling_preprocess.prepare(
1315
&pipeline_cache,
1316
&mut specialized_preprocess_pipelines,
1317
PreprocessPipelineKey::FRUSTUM_CULLING,
1318
);
1319
1320
if gpu_preprocessing_support.is_culling_supported() {
1321
preprocess_pipelines
1322
.early_gpu_occlusion_culling_preprocess
1323
.prepare(
1324
&pipeline_cache,
1325
&mut specialized_preprocess_pipelines,
1326
PreprocessPipelineKey::FRUSTUM_CULLING
1327
| PreprocessPipelineKey::OCCLUSION_CULLING
1328
| PreprocessPipelineKey::EARLY_PHASE,
1329
);
1330
preprocess_pipelines
1331
.late_gpu_occlusion_culling_preprocess
1332
.prepare(
1333
&pipeline_cache,
1334
&mut specialized_preprocess_pipelines,
1335
PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING,
1336
);
1337
}
1338
1339
let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty();
1340
1341
// If the GPU and driver support `multi_draw_indirect_count`, tell the
1342
// shader that.
1343
if render_device
1344
.wgpu_device()
1345
.features()
1346
.contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT)
1347
{
1348
build_indirect_parameters_pipeline_key
1349
.insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED);
1350
}
1351
1352
preprocess_pipelines
1353
.gpu_frustum_culling_build_indexed_indirect_params
1354
.prepare(
1355
&pipeline_cache,
1356
&mut specialized_build_indirect_parameters_pipelines,
1357
build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED,
1358
);
1359
preprocess_pipelines
1360
.gpu_frustum_culling_build_non_indexed_indirect_params
1361
.prepare(
1362
&pipeline_cache,
1363
&mut specialized_build_indirect_parameters_pipelines,
1364
build_indirect_parameters_pipeline_key,
1365
);
1366
1367
if !gpu_preprocessing_support.is_culling_supported() {
1368
return;
1369
}
1370
1371
for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [
1372
(
1373
&mut preprocess_pipelines.early_phase,
1374
BuildIndirectParametersPipelineKey::EARLY_PHASE,
1375
),
1376
(
1377
&mut preprocess_pipelines.late_phase,
1378
BuildIndirectParametersPipelineKey::LATE_PHASE,
1379
),
1380
(
1381
&mut preprocess_pipelines.main_phase,
1382
BuildIndirectParametersPipelineKey::MAIN_PHASE,
1383
),
1384
] {
1385
preprocess_phase_pipelines
1386
.reset_indirect_batch_sets
1387
.prepare(
1388
&pipeline_cache,
1389
&mut specialized_reset_indirect_batch_sets_pipelines,
1390
);
1391
preprocess_phase_pipelines
1392
.gpu_occlusion_culling_build_indexed_indirect_params
1393
.prepare(
1394
&pipeline_cache,
1395
&mut specialized_build_indirect_parameters_pipelines,
1396
build_indirect_parameters_pipeline_key
1397
| build_indirect_parameters_phase_pipeline_key
1398
| BuildIndirectParametersPipelineKey::INDEXED
1399
| BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1400
);
1401
preprocess_phase_pipelines
1402
.gpu_occlusion_culling_build_non_indexed_indirect_params
1403
.prepare(
1404
&pipeline_cache,
1405
&mut specialized_build_indirect_parameters_pipelines,
1406
build_indirect_parameters_pipeline_key
1407
| build_indirect_parameters_phase_pipeline_key
1408
| BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1409
);
1410
}
1411
}
1412
1413
impl PreprocessPipeline {
1414
fn prepare(
1415
&mut self,
1416
pipeline_cache: &PipelineCache,
1417
pipelines: &mut SpecializedComputePipelines<PreprocessPipeline>,
1418
key: PreprocessPipelineKey,
1419
) {
1420
if self.pipeline_id.is_some() {
1421
return;
1422
}
1423
1424
let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1425
self.pipeline_id = Some(preprocess_pipeline_id);
1426
}
1427
}
1428
1429
impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline {
1430
type Key = ();
1431
1432
fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor {
1433
ComputePipelineDescriptor {
1434
label: Some("reset indirect batch sets".into()),
1435
layout: vec![self.bind_group_layout.clone()],
1436
shader: self.shader.clone(),
1437
..default()
1438
}
1439
}
1440
}
1441
1442
impl SpecializedComputePipeline for BuildIndirectParametersPipeline {
1443
type Key = BuildIndirectParametersPipelineKey;
1444
1445
fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1446
let mut shader_defs = vec![];
1447
if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1448
shader_defs.push("INDEXED".into());
1449
}
1450
if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) {
1451
shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into());
1452
}
1453
if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1454
shader_defs.push("OCCLUSION_CULLING".into());
1455
}
1456
if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1457
shader_defs.push("EARLY_PHASE".into());
1458
}
1459
if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1460
shader_defs.push("LATE_PHASE".into());
1461
}
1462
if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) {
1463
shader_defs.push("MAIN_PHASE".into());
1464
}
1465
1466
let label = format!(
1467
"{} build {}indexed indirect parameters",
1468
if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1469
"frustum culling"
1470
} else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1471
"early occlusion culling"
1472
} else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1473
"late occlusion culling"
1474
} else {
1475
"main occlusion culling"
1476
},
1477
if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1478
""
1479
} else {
1480
"non-"
1481
}
1482
);
1483
1484
ComputePipelineDescriptor {
1485
label: Some(label.into()),
1486
layout: vec![self.bind_group_layout.clone()],
1487
shader: self.shader.clone(),
1488
shader_defs,
1489
..default()
1490
}
1491
}
1492
}
1493
1494
impl ResetIndirectBatchSetsPipeline {
1495
fn prepare(
1496
&mut self,
1497
pipeline_cache: &PipelineCache,
1498
pipelines: &mut SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1499
) {
1500
if self.pipeline_id.is_some() {
1501
return;
1502
}
1503
1504
let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ());
1505
self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id);
1506
}
1507
}
1508
1509
impl BuildIndirectParametersPipeline {
1510
fn prepare(
1511
&mut self,
1512
pipeline_cache: &PipelineCache,
1513
pipelines: &mut SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1514
key: BuildIndirectParametersPipelineKey,
1515
) {
1516
if self.pipeline_id.is_some() {
1517
return;
1518
}
1519
1520
let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1521
self.pipeline_id = Some(build_indirect_parameters_pipeline_id);
1522
}
1523
}
1524
1525
/// A system that attaches the mesh uniform buffers to the bind groups for the
1526
/// variants of the mesh preprocessing compute shader.
1527
#[expect(
1528
clippy::too_many_arguments,
1529
reason = "it's a system that needs a lot of arguments"
1530
)]
1531
pub fn prepare_preprocess_bind_groups(
1532
mut commands: Commands,
1533
views: Query<(Entity, &ExtractedView)>,
1534
view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1535
render_device: Res<RenderDevice>,
1536
pipeline_cache: Res<PipelineCache>,
1537
batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
1538
indirect_parameters_buffers: Res<IndirectParametersBuffers>,
1539
mesh_culling_data_buffer: Res<MeshCullingDataBuffer>,
1540
view_uniforms: Res<ViewUniforms>,
1541
previous_view_uniforms: Res<PreviousViewUniforms>,
1542
pipelines: Res<PreprocessPipelines>,
1543
) {
1544
// Grab the `BatchedInstanceBuffers`.
1545
let BatchedInstanceBuffers {
1546
current_input_buffer: current_input_buffer_vec,
1547
previous_input_buffer: previous_input_buffer_vec,
1548
phase_instance_buffers,
1549
} = batched_instance_buffers.into_inner();
1550
1551
let (Some(current_input_buffer), Some(previous_input_buffer)) = (
1552
current_input_buffer_vec.buffer().buffer(),
1553
previous_input_buffer_vec.buffer().buffer(),
1554
) else {
1555
return;
1556
};
1557
1558
// Record whether we have any meshes that are to be drawn indirectly. If we
1559
// don't, then we can skip building indirect parameters.
1560
let mut any_indirect = false;
1561
1562
// Loop over each view.
1563
for (view_entity, view) in &views {
1564
let mut bind_groups = TypeIdMap::default();
1565
1566
// Loop over each phase.
1567
for (phase_type_id, phase_instance_buffers) in phase_instance_buffers {
1568
let UntypedPhaseBatchedInstanceBuffers {
1569
data_buffer: ref data_buffer_vec,
1570
ref work_item_buffers,
1571
ref late_indexed_indirect_parameters_buffer,
1572
ref late_non_indexed_indirect_parameters_buffer,
1573
} = *phase_instance_buffers;
1574
1575
let Some(data_buffer) = data_buffer_vec.buffer() else {
1576
continue;
1577
};
1578
1579
// Grab the indirect parameters buffers for this phase.
1580
let Some(phase_indirect_parameters_buffers) =
1581
indirect_parameters_buffers.get(phase_type_id)
1582
else {
1583
continue;
1584
};
1585
1586
let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else {
1587
continue;
1588
};
1589
1590
// Create the `PreprocessBindGroupBuilder`.
1591
let preprocess_bind_group_builder = PreprocessBindGroupBuilder {
1592
view: view_entity,
1593
late_indexed_indirect_parameters_buffer,
1594
late_non_indexed_indirect_parameters_buffer,
1595
render_device: &render_device,
1596
pipeline_cache: &pipeline_cache,
1597
phase_indirect_parameters_buffers,
1598
mesh_culling_data_buffer: &mesh_culling_data_buffer,
1599
view_uniforms: &view_uniforms,
1600
previous_view_uniforms: &previous_view_uniforms,
1601
pipelines: &pipelines,
1602
current_input_buffer,
1603
previous_input_buffer,
1604
data_buffer,
1605
};
1606
1607
// Depending on the type of work items we have, construct the
1608
// appropriate bind groups.
1609
let (was_indirect, bind_group) = match *work_item_buffers {
1610
PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => (
1611
false,
1612
preprocess_bind_group_builder
1613
.create_direct_preprocess_bind_groups(work_item_buffer),
1614
),
1615
1616
PreprocessWorkItemBuffers::Indirect {
1617
indexed: ref indexed_work_item_buffer,
1618
non_indexed: ref non_indexed_work_item_buffer,
1619
gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers),
1620
} => (
1621
true,
1622
preprocess_bind_group_builder
1623
.create_indirect_occlusion_culling_preprocess_bind_groups(
1624
&view_depth_pyramids,
1625
indexed_work_item_buffer,
1626
non_indexed_work_item_buffer,
1627
gpu_occlusion_culling_work_item_buffers,
1628
),
1629
),
1630
1631
PreprocessWorkItemBuffers::Indirect {
1632
indexed: ref indexed_work_item_buffer,
1633
non_indexed: ref non_indexed_work_item_buffer,
1634
gpu_occlusion_culling: None,
1635
} => (
1636
true,
1637
preprocess_bind_group_builder
1638
.create_indirect_frustum_culling_preprocess_bind_groups(
1639
indexed_work_item_buffer,
1640
non_indexed_work_item_buffer,
1641
),
1642
),
1643
};
1644
1645
// Write that bind group in.
1646
if let Some(bind_group) = bind_group {
1647
any_indirect = any_indirect || was_indirect;
1648
bind_groups.insert(*phase_type_id, bind_group);
1649
}
1650
}
1651
1652
// Save the bind groups.
1653
commands
1654
.entity(view_entity)
1655
.insert(PreprocessBindGroups(bind_groups));
1656
}
1657
1658
// Now, if there were any indirect draw commands, create the bind groups for
1659
// the indirect parameters building shader.
1660
if any_indirect {
1661
create_build_indirect_parameters_bind_groups(
1662
&mut commands,
1663
&render_device,
1664
&pipeline_cache,
1665
&pipelines,
1666
current_input_buffer,
1667
&indirect_parameters_buffers,
1668
);
1669
}
1670
}
1671
1672
/// A temporary structure that stores all the information needed to construct
1673
/// bind groups for the mesh preprocessing shader.
1674
struct PreprocessBindGroupBuilder<'a> {
1675
/// The render-world entity corresponding to the current view.
1676
view: Entity,
1677
/// The indirect compute dispatch parameters buffer for indexed meshes in
1678
/// the late prepass.
1679
late_indexed_indirect_parameters_buffer:
1680
&'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1681
/// The indirect compute dispatch parameters buffer for non-indexed meshes
1682
/// in the late prepass.
1683
late_non_indexed_indirect_parameters_buffer:
1684
&'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1685
/// The device.
1686
render_device: &'a RenderDevice,
1687
/// The pipeline cache
1688
pipeline_cache: &'a PipelineCache,
1689
/// The buffers that store indirect draw parameters.
1690
phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers,
1691
/// The GPU buffer that stores the information needed to cull each mesh.
1692
mesh_culling_data_buffer: &'a MeshCullingDataBuffer,
1693
/// The GPU buffer that stores information about the view.
1694
view_uniforms: &'a ViewUniforms,
1695
/// The GPU buffer that stores information about the view from last frame.
1696
previous_view_uniforms: &'a PreviousViewUniforms,
1697
/// The pipelines for the mesh preprocessing shader.
1698
pipelines: &'a PreprocessPipelines,
1699
/// The GPU buffer containing the list of [`MeshInputUniform`]s for the
1700
/// current frame.
1701
current_input_buffer: &'a Buffer,
1702
/// The GPU buffer containing the list of [`MeshInputUniform`]s for the
1703
/// previous frame.
1704
previous_input_buffer: &'a Buffer,
1705
/// The GPU buffer containing the list of [`MeshUniform`]s for the current
1706
/// frame.
1707
///
1708
/// This is the buffer containing the mesh's final transforms that the
1709
/// shaders will write to.
1710
data_buffer: &'a Buffer,
1711
}
1712
1713
impl<'a> PreprocessBindGroupBuilder<'a> {
1714
/// Creates the bind groups for mesh preprocessing when GPU frustum culling
1715
/// and GPU occlusion culling are both disabled.
1716
fn create_direct_preprocess_bind_groups(
1717
&self,
1718
work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1719
) -> Option<PhasePreprocessBindGroups> {
1720
// Don't use `as_entire_binding()` here; the shader reads the array
1721
// length and the underlying buffer may be longer than the actual size
1722
// of the vector.
1723
let work_item_buffer_size = NonZero::<u64>::try_from(
1724
work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
1725
)
1726
.ok();
1727
1728
Some(PhasePreprocessBindGroups::Direct(
1729
self.render_device.create_bind_group(
1730
"preprocess_direct_bind_group",
1731
&self
1732
.pipeline_cache
1733
.get_bind_group_layout(&self.pipelines.direct_preprocess.bind_group_layout),
1734
&BindGroupEntries::with_indices((
1735
(0, self.view_uniforms.uniforms.binding()?),
1736
(3, self.current_input_buffer.as_entire_binding()),
1737
(4, self.previous_input_buffer.as_entire_binding()),
1738
(
1739
5,
1740
BindingResource::Buffer(BufferBinding {
1741
buffer: work_item_buffer.buffer()?,
1742
offset: 0,
1743
size: work_item_buffer_size,
1744
}),
1745
),
1746
(6, self.data_buffer.as_entire_binding()),
1747
)),
1748
),
1749
))
1750
}
1751
1752
/// Creates the bind groups for mesh preprocessing when GPU occlusion
1753
/// culling is enabled.
1754
fn create_indirect_occlusion_culling_preprocess_bind_groups(
1755
&self,
1756
view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1757
indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1758
non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1759
gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers,
1760
) -> Option<PhasePreprocessBindGroups> {
1761
let GpuOcclusionCullingWorkItemBuffers {
1762
late_indexed: ref late_indexed_work_item_buffer,
1763
late_non_indexed: ref late_non_indexed_work_item_buffer,
1764
..
1765
} = *gpu_occlusion_culling_work_item_buffers;
1766
1767
let (view_depth_pyramid, previous_view_uniform_offset) =
1768
view_depth_pyramids.get(self.view).ok()?;
1769
1770
Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
1771
early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group(
1772
view_depth_pyramid,
1773
previous_view_uniform_offset,
1774
indexed_work_item_buffer,
1775
late_indexed_work_item_buffer,
1776
),
1777
1778
early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group(
1779
view_depth_pyramid,
1780
previous_view_uniform_offset,
1781
non_indexed_work_item_buffer,
1782
late_non_indexed_work_item_buffer,
1783
),
1784
1785
late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group(
1786
view_depth_pyramid,
1787
previous_view_uniform_offset,
1788
late_indexed_work_item_buffer,
1789
),
1790
1791
late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group(
1792
view_depth_pyramid,
1793
previous_view_uniform_offset,
1794
late_non_indexed_work_item_buffer,
1795
),
1796
})
1797
}
1798
1799
/// Creates the bind group for the first phase of mesh preprocessing of
1800
/// indexed meshes when GPU occlusion culling is enabled.
1801
fn create_indirect_occlusion_culling_early_indexed_bind_group(
1802
&self,
1803
view_depth_pyramid: &ViewDepthPyramid,
1804
previous_view_uniform_offset: &PreviousViewUniformOffset,
1805
indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1806
late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
1807
) -> Option<BindGroup> {
1808
let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
1809
let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
1810
let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
1811
1812
match (
1813
self.phase_indirect_parameters_buffers
1814
.indexed
1815
.cpu_metadata_buffer(),
1816
self.phase_indirect_parameters_buffers
1817
.indexed
1818
.gpu_metadata_buffer(),
1819
indexed_work_item_buffer.buffer(),
1820
late_indexed_work_item_buffer.buffer(),
1821
self.late_indexed_indirect_parameters_buffer.buffer(),
1822
) {
1823
(
1824
Some(indexed_cpu_metadata_buffer),
1825
Some(indexed_gpu_metadata_buffer),
1826
Some(indexed_work_item_gpu_buffer),
1827
Some(late_indexed_work_item_gpu_buffer),
1828
Some(late_indexed_indirect_parameters_buffer),
1829
) => {
1830
// Don't use `as_entire_binding()` here; the shader reads the array
1831
// length and the underlying buffer may be longer than the actual size
1832
// of the vector.
1833
let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
1834
indexed_work_item_buffer.len() as u64
1835
* u64::from(PreprocessWorkItem::min_size()),
1836
)
1837
.ok();
1838
1839
Some(
1840
self.render_device.create_bind_group(
1841
"preprocess_early_indexed_gpu_occlusion_culling_bind_group",
1842
&self.pipeline_cache.get_bind_group_layout(
1843
&self
1844
.pipelines
1845
.early_gpu_occlusion_culling_preprocess
1846
.bind_group_layout,
1847
),
1848
&BindGroupEntries::with_indices((
1849
(3, self.current_input_buffer.as_entire_binding()),
1850
(4, self.previous_input_buffer.as_entire_binding()),
1851
(
1852
5,
1853
BindingResource::Buffer(BufferBinding {
1854
buffer: indexed_work_item_gpu_buffer,
1855
offset: 0,
1856
size: indexed_work_item_buffer_size,
1857
}),
1858
),
1859
(6, self.data_buffer.as_entire_binding()),
1860
(7, indexed_cpu_metadata_buffer.as_entire_binding()),
1861
(8, indexed_gpu_metadata_buffer.as_entire_binding()),
1862
(9, mesh_culling_data_buffer.as_entire_binding()),
1863
(0, view_uniforms_binding.clone()),
1864
(10, &view_depth_pyramid.all_mips),
1865
(
1866
2,
1867
BufferBinding {
1868
buffer: previous_view_buffer,
1869
offset: previous_view_uniform_offset.offset as u64,
1870
size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
1871
},
1872
),
1873
(
1874
11,
1875
BufferBinding {
1876
buffer: late_indexed_work_item_gpu_buffer,
1877
offset: 0,
1878
size: indexed_work_item_buffer_size,
1879
},
1880
),
1881
(
1882
12,
1883
BufferBinding {
1884
buffer: late_indexed_indirect_parameters_buffer,
1885
offset: 0,
1886
size: NonZeroU64::new(
1887
late_indexed_indirect_parameters_buffer.size(),
1888
),
1889
},
1890
),
1891
)),
1892
),
1893
)
1894
}
1895
_ => None,
1896
}
1897
}
1898
1899
/// Creates the bind group for the first phase of mesh preprocessing of
1900
/// non-indexed meshes when GPU occlusion culling is enabled.
1901
fn create_indirect_occlusion_culling_early_non_indexed_bind_group(
1902
&self,
1903
view_depth_pyramid: &ViewDepthPyramid,
1904
previous_view_uniform_offset: &PreviousViewUniformOffset,
1905
non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1906
late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
1907
) -> Option<BindGroup> {
1908
let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
1909
let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
1910
let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
1911
1912
match (
1913
self.phase_indirect_parameters_buffers
1914
.non_indexed
1915
.cpu_metadata_buffer(),
1916
self.phase_indirect_parameters_buffers
1917
.non_indexed
1918
.gpu_metadata_buffer(),
1919
non_indexed_work_item_buffer.buffer(),
1920
late_non_indexed_work_item_buffer.buffer(),
1921
self.late_non_indexed_indirect_parameters_buffer.buffer(),
1922
) {
1923
(
1924
Some(non_indexed_cpu_metadata_buffer),
1925
Some(non_indexed_gpu_metadata_buffer),
1926
Some(non_indexed_work_item_gpu_buffer),
1927
Some(late_non_indexed_work_item_buffer),
1928
Some(late_non_indexed_indirect_parameters_buffer),
1929
) => {
1930
// Don't use `as_entire_binding()` here; the shader reads the array
1931
// length and the underlying buffer may be longer than the actual size
1932
// of the vector.
1933
let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
1934
non_indexed_work_item_buffer.len() as u64
1935
* u64::from(PreprocessWorkItem::min_size()),
1936
)
1937
.ok();
1938
1939
Some(
1940
self.render_device.create_bind_group(
1941
"preprocess_early_non_indexed_gpu_occlusion_culling_bind_group",
1942
&self.pipeline_cache.get_bind_group_layout(
1943
&self
1944
.pipelines
1945
.early_gpu_occlusion_culling_preprocess
1946
.bind_group_layout,
1947
),
1948
&BindGroupEntries::with_indices((
1949
(3, self.current_input_buffer.as_entire_binding()),
1950
(4, self.previous_input_buffer.as_entire_binding()),
1951
(
1952
5,
1953
BindingResource::Buffer(BufferBinding {
1954
buffer: non_indexed_work_item_gpu_buffer,
1955
offset: 0,
1956
size: non_indexed_work_item_buffer_size,
1957
}),
1958
),
1959
(6, self.data_buffer.as_entire_binding()),
1960
(7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
1961
(8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
1962
(9, mesh_culling_data_buffer.as_entire_binding()),
1963
(0, view_uniforms_binding.clone()),
1964
(10, &view_depth_pyramid.all_mips),
1965
(
1966
2,
1967
BufferBinding {
1968
buffer: previous_view_buffer,
1969
offset: previous_view_uniform_offset.offset as u64,
1970
size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
1971
},
1972
),
1973
(
1974
11,
1975
BufferBinding {
1976
buffer: late_non_indexed_work_item_buffer,
1977
offset: 0,
1978
size: non_indexed_work_item_buffer_size,
1979
},
1980
),
1981
(
1982
12,
1983
BufferBinding {
1984
buffer: late_non_indexed_indirect_parameters_buffer,
1985
offset: 0,
1986
size: NonZeroU64::new(
1987
late_non_indexed_indirect_parameters_buffer.size(),
1988
),
1989
},
1990
),
1991
)),
1992
),
1993
)
1994
}
1995
_ => None,
1996
}
1997
}
1998
1999
/// Creates the bind group for the second phase of mesh preprocessing of
2000
/// indexed meshes when GPU occlusion culling is enabled.
2001
fn create_indirect_occlusion_culling_late_indexed_bind_group(
2002
&self,
2003
view_depth_pyramid: &ViewDepthPyramid,
2004
previous_view_uniform_offset: &PreviousViewUniformOffset,
2005
late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2006
) -> Option<BindGroup> {
2007
let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2008
let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2009
let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2010
2011
match (
2012
self.phase_indirect_parameters_buffers
2013
.indexed
2014
.cpu_metadata_buffer(),
2015
self.phase_indirect_parameters_buffers
2016
.indexed
2017
.gpu_metadata_buffer(),
2018
late_indexed_work_item_buffer.buffer(),
2019
self.late_indexed_indirect_parameters_buffer.buffer(),
2020
) {
2021
(
2022
Some(indexed_cpu_metadata_buffer),
2023
Some(indexed_gpu_metadata_buffer),
2024
Some(late_indexed_work_item_gpu_buffer),
2025
Some(late_indexed_indirect_parameters_buffer),
2026
) => {
2027
// Don't use `as_entire_binding()` here; the shader reads the array
2028
// length and the underlying buffer may be longer than the actual size
2029
// of the vector.
2030
let late_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2031
late_indexed_work_item_buffer.len() as u64
2032
* u64::from(PreprocessWorkItem::min_size()),
2033
)
2034
.ok();
2035
2036
Some(
2037
self.render_device.create_bind_group(
2038
"preprocess_late_indexed_gpu_occlusion_culling_bind_group",
2039
&self.pipeline_cache.get_bind_group_layout(
2040
&self
2041
.pipelines
2042
.late_gpu_occlusion_culling_preprocess
2043
.bind_group_layout,
2044
),
2045
&BindGroupEntries::with_indices((
2046
(3, self.current_input_buffer.as_entire_binding()),
2047
(4, self.previous_input_buffer.as_entire_binding()),
2048
(
2049
5,
2050
BindingResource::Buffer(BufferBinding {
2051
buffer: late_indexed_work_item_gpu_buffer,
2052
offset: 0,
2053
size: late_indexed_work_item_buffer_size,
2054
}),
2055
),
2056
(6, self.data_buffer.as_entire_binding()),
2057
(7, indexed_cpu_metadata_buffer.as_entire_binding()),
2058
(8, indexed_gpu_metadata_buffer.as_entire_binding()),
2059
(9, mesh_culling_data_buffer.as_entire_binding()),
2060
(0, view_uniforms_binding.clone()),
2061
(10, &view_depth_pyramid.all_mips),
2062
(
2063
2,
2064
BufferBinding {
2065
buffer: previous_view_buffer,
2066
offset: previous_view_uniform_offset.offset as u64,
2067
size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2068
},
2069
),
2070
(
2071
12,
2072
BufferBinding {
2073
buffer: late_indexed_indirect_parameters_buffer,
2074
offset: 0,
2075
size: NonZeroU64::new(
2076
late_indexed_indirect_parameters_buffer.size(),
2077
),
2078
},
2079
),
2080
)),
2081
),
2082
)
2083
}
2084
_ => None,
2085
}
2086
}
2087
2088
/// Creates the bind group for the second phase of mesh preprocessing of
2089
/// non-indexed meshes when GPU occlusion culling is enabled.
2090
fn create_indirect_occlusion_culling_late_non_indexed_bind_group(
2091
&self,
2092
view_depth_pyramid: &ViewDepthPyramid,
2093
previous_view_uniform_offset: &PreviousViewUniformOffset,
2094
late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2095
) -> Option<BindGroup> {
2096
let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2097
let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2098
let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2099
2100
match (
2101
self.phase_indirect_parameters_buffers
2102
.non_indexed
2103
.cpu_metadata_buffer(),
2104
self.phase_indirect_parameters_buffers
2105
.non_indexed
2106
.gpu_metadata_buffer(),
2107
late_non_indexed_work_item_buffer.buffer(),
2108
self.late_non_indexed_indirect_parameters_buffer.buffer(),
2109
) {
2110
(
2111
Some(non_indexed_cpu_metadata_buffer),
2112
Some(non_indexed_gpu_metadata_buffer),
2113
Some(non_indexed_work_item_gpu_buffer),
2114
Some(late_non_indexed_indirect_parameters_buffer),
2115
) => {
2116
// Don't use `as_entire_binding()` here; the shader reads the array
2117
// length and the underlying buffer may be longer than the actual size
2118
// of the vector.
2119
let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2120
late_non_indexed_work_item_buffer.len() as u64
2121
* u64::from(PreprocessWorkItem::min_size()),
2122
)
2123
.ok();
2124
2125
Some(
2126
self.render_device.create_bind_group(
2127
"preprocess_late_non_indexed_gpu_occlusion_culling_bind_group",
2128
&self.pipeline_cache.get_bind_group_layout(
2129
&self
2130
.pipelines
2131
.late_gpu_occlusion_culling_preprocess
2132
.bind_group_layout,
2133
),
2134
&BindGroupEntries::with_indices((
2135
(3, self.current_input_buffer.as_entire_binding()),
2136
(4, self.previous_input_buffer.as_entire_binding()),
2137
(
2138
5,
2139
BindingResource::Buffer(BufferBinding {
2140
buffer: non_indexed_work_item_gpu_buffer,
2141
offset: 0,
2142
size: non_indexed_work_item_buffer_size,
2143
}),
2144
),
2145
(6, self.data_buffer.as_entire_binding()),
2146
(7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2147
(8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2148
(9, mesh_culling_data_buffer.as_entire_binding()),
2149
(0, view_uniforms_binding.clone()),
2150
(10, &view_depth_pyramid.all_mips),
2151
(
2152
2,
2153
BufferBinding {
2154
buffer: previous_view_buffer,
2155
offset: previous_view_uniform_offset.offset as u64,
2156
size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2157
},
2158
),
2159
(
2160
12,
2161
BufferBinding {
2162
buffer: late_non_indexed_indirect_parameters_buffer,
2163
offset: 0,
2164
size: NonZeroU64::new(
2165
late_non_indexed_indirect_parameters_buffer.size(),
2166
),
2167
},
2168
),
2169
)),
2170
),
2171
)
2172
}
2173
_ => None,
2174
}
2175
}
2176
2177
/// Creates the bind groups for mesh preprocessing when GPU frustum culling
2178
/// is enabled, but GPU occlusion culling is disabled.
2179
fn create_indirect_frustum_culling_preprocess_bind_groups(
2180
&self,
2181
indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2182
non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2183
) -> Option<PhasePreprocessBindGroups> {
2184
Some(PhasePreprocessBindGroups::IndirectFrustumCulling {
2185
indexed: self
2186
.create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer),
2187
non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group(
2188
non_indexed_work_item_buffer,
2189
),
2190
})
2191
}
2192
2193
/// Creates the bind group for mesh preprocessing of indexed meshes when GPU
2194
/// frustum culling is enabled, but GPU occlusion culling is disabled.
2195
fn create_indirect_frustum_culling_indexed_bind_group(
2196
&self,
2197
indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2198
) -> Option<BindGroup> {
2199
let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2200
let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2201
2202
match (
2203
self.phase_indirect_parameters_buffers
2204
.indexed
2205
.cpu_metadata_buffer(),
2206
self.phase_indirect_parameters_buffers
2207
.indexed
2208
.gpu_metadata_buffer(),
2209
indexed_work_item_buffer.buffer(),
2210
) {
2211
(
2212
Some(indexed_cpu_metadata_buffer),
2213
Some(indexed_gpu_metadata_buffer),
2214
Some(indexed_work_item_gpu_buffer),
2215
) => {
2216
// Don't use `as_entire_binding()` here; the shader reads the array
2217
// length and the underlying buffer may be longer than the actual size
2218
// of the vector.
2219
let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2220
indexed_work_item_buffer.len() as u64
2221
* u64::from(PreprocessWorkItem::min_size()),
2222
)
2223
.ok();
2224
2225
Some(
2226
self.render_device.create_bind_group(
2227
"preprocess_gpu_indexed_frustum_culling_bind_group",
2228
&self.pipeline_cache.get_bind_group_layout(
2229
&self
2230
.pipelines
2231
.gpu_frustum_culling_preprocess
2232
.bind_group_layout,
2233
),
2234
&BindGroupEntries::with_indices((
2235
(3, self.current_input_buffer.as_entire_binding()),
2236
(4, self.previous_input_buffer.as_entire_binding()),
2237
(
2238
5,
2239
BindingResource::Buffer(BufferBinding {
2240
buffer: indexed_work_item_gpu_buffer,
2241
offset: 0,
2242
size: indexed_work_item_buffer_size,
2243
}),
2244
),
2245
(6, self.data_buffer.as_entire_binding()),
2246
(7, indexed_cpu_metadata_buffer.as_entire_binding()),
2247
(8, indexed_gpu_metadata_buffer.as_entire_binding()),
2248
(9, mesh_culling_data_buffer.as_entire_binding()),
2249
(0, view_uniforms_binding.clone()),
2250
)),
2251
),
2252
)
2253
}
2254
_ => None,
2255
}
2256
}
2257
2258
/// Creates the bind group for mesh preprocessing of non-indexed meshes when
2259
/// GPU frustum culling is enabled, but GPU occlusion culling is disabled.
2260
fn create_indirect_frustum_culling_non_indexed_bind_group(
2261
&self,
2262
non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2263
) -> Option<BindGroup> {
2264
let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2265
let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2266
2267
match (
2268
self.phase_indirect_parameters_buffers
2269
.non_indexed
2270
.cpu_metadata_buffer(),
2271
self.phase_indirect_parameters_buffers
2272
.non_indexed
2273
.gpu_metadata_buffer(),
2274
non_indexed_work_item_buffer.buffer(),
2275
) {
2276
(
2277
Some(non_indexed_cpu_metadata_buffer),
2278
Some(non_indexed_gpu_metadata_buffer),
2279
Some(non_indexed_work_item_gpu_buffer),
2280
) => {
2281
// Don't use `as_entire_binding()` here; the shader reads the array
2282
// length and the underlying buffer may be longer than the actual size
2283
// of the vector.
2284
let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2285
non_indexed_work_item_buffer.len() as u64
2286
* u64::from(PreprocessWorkItem::min_size()),
2287
)
2288
.ok();
2289
2290
Some(
2291
self.render_device.create_bind_group(
2292
"preprocess_gpu_non_indexed_frustum_culling_bind_group",
2293
&self.pipeline_cache.get_bind_group_layout(
2294
&self
2295
.pipelines
2296
.gpu_frustum_culling_preprocess
2297
.bind_group_layout,
2298
),
2299
&BindGroupEntries::with_indices((
2300
(3, self.current_input_buffer.as_entire_binding()),
2301
(4, self.previous_input_buffer.as_entire_binding()),
2302
(
2303
5,
2304
BindingResource::Buffer(BufferBinding {
2305
buffer: non_indexed_work_item_gpu_buffer,
2306
offset: 0,
2307
size: non_indexed_work_item_buffer_size,
2308
}),
2309
),
2310
(6, self.data_buffer.as_entire_binding()),
2311
(7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2312
(8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2313
(9, mesh_culling_data_buffer.as_entire_binding()),
2314
(0, view_uniforms_binding.clone()),
2315
)),
2316
),
2317
)
2318
}
2319
_ => None,
2320
}
2321
}
2322
}
2323
2324
/// A system that creates bind groups from the indirect parameters metadata and
2325
/// data buffers for the indirect batch set reset shader and the indirect
2326
/// parameter building shader.
2327
fn create_build_indirect_parameters_bind_groups(
2328
commands: &mut Commands,
2329
render_device: &RenderDevice,
2330
pipeline_cache: &PipelineCache,
2331
pipelines: &PreprocessPipelines,
2332
current_input_buffer: &Buffer,
2333
indirect_parameters_buffers: &IndirectParametersBuffers,
2334
) {
2335
let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new();
2336
2337
for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() {
2338
build_indirect_parameters_bind_groups.insert(
2339
*phase_type_id,
2340
PhaseBuildIndirectParametersBindGroups {
2341
reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2342
.indexed
2343
.batch_sets_buffer(),)
2344
{
2345
(Some(indexed_batch_sets_buffer),) => Some(
2346
render_device.create_bind_group(
2347
"reset_indexed_indirect_batch_sets_bind_group",
2348
// The early bind group is good for the main phase and late
2349
// phase too. They bind the same buffers.
2350
&pipeline_cache.get_bind_group_layout(
2351
&pipelines
2352
.early_phase
2353
.reset_indirect_batch_sets
2354
.bind_group_layout,
2355
),
2356
&BindGroupEntries::sequential((
2357
indexed_batch_sets_buffer.as_entire_binding(),
2358
)),
2359
),
2360
),
2361
_ => None,
2362
},
2363
2364
reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2365
.non_indexed
2366
.batch_sets_buffer(),)
2367
{
2368
(Some(non_indexed_batch_sets_buffer),) => Some(
2369
render_device.create_bind_group(
2370
"reset_non_indexed_indirect_batch_sets_bind_group",
2371
// The early bind group is good for the main phase and late
2372
// phase too. They bind the same buffers.
2373
&pipeline_cache.get_bind_group_layout(
2374
&pipelines
2375
.early_phase
2376
.reset_indirect_batch_sets
2377
.bind_group_layout,
2378
),
2379
&BindGroupEntries::sequential((
2380
non_indexed_batch_sets_buffer.as_entire_binding(),
2381
)),
2382
),
2383
),
2384
_ => None,
2385
},
2386
2387
build_indexed_indirect: match (
2388
phase_indirect_parameters_buffer
2389
.indexed
2390
.cpu_metadata_buffer(),
2391
phase_indirect_parameters_buffer
2392
.indexed
2393
.gpu_metadata_buffer(),
2394
phase_indirect_parameters_buffer.indexed.data_buffer(),
2395
phase_indirect_parameters_buffer.indexed.batch_sets_buffer(),
2396
) {
2397
(
2398
Some(indexed_indirect_parameters_cpu_metadata_buffer),
2399
Some(indexed_indirect_parameters_gpu_metadata_buffer),
2400
Some(indexed_indirect_parameters_data_buffer),
2401
Some(indexed_batch_sets_buffer),
2402
) => Some(
2403
render_device.create_bind_group(
2404
"build_indexed_indirect_parameters_bind_group",
2405
// The frustum culling bind group is good for occlusion culling
2406
// too. They bind the same buffers.
2407
&pipeline_cache.get_bind_group_layout(
2408
&pipelines
2409
.gpu_frustum_culling_build_indexed_indirect_params
2410
.bind_group_layout,
2411
),
2412
&BindGroupEntries::sequential((
2413
current_input_buffer.as_entire_binding(),
2414
// Don't use `as_entire_binding` here; the shader reads
2415
// the length and `RawBufferVec` overallocates.
2416
BufferBinding {
2417
buffer: indexed_indirect_parameters_cpu_metadata_buffer,
2418
offset: 0,
2419
size: NonZeroU64::new(
2420
phase_indirect_parameters_buffer.indexed.batch_count()
2421
as u64
2422
* size_of::<IndirectParametersCpuMetadata>() as u64,
2423
),
2424
},
2425
BufferBinding {
2426
buffer: indexed_indirect_parameters_gpu_metadata_buffer,
2427
offset: 0,
2428
size: NonZeroU64::new(
2429
phase_indirect_parameters_buffer.indexed.batch_count()
2430
as u64
2431
* size_of::<IndirectParametersGpuMetadata>() as u64,
2432
),
2433
},
2434
indexed_batch_sets_buffer.as_entire_binding(),
2435
indexed_indirect_parameters_data_buffer.as_entire_binding(),
2436
)),
2437
),
2438
),
2439
_ => None,
2440
},
2441
2442
build_non_indexed_indirect: match (
2443
phase_indirect_parameters_buffer
2444
.non_indexed
2445
.cpu_metadata_buffer(),
2446
phase_indirect_parameters_buffer
2447
.non_indexed
2448
.gpu_metadata_buffer(),
2449
phase_indirect_parameters_buffer.non_indexed.data_buffer(),
2450
phase_indirect_parameters_buffer
2451
.non_indexed
2452
.batch_sets_buffer(),
2453
) {
2454
(
2455
Some(non_indexed_indirect_parameters_cpu_metadata_buffer),
2456
Some(non_indexed_indirect_parameters_gpu_metadata_buffer),
2457
Some(non_indexed_indirect_parameters_data_buffer),
2458
Some(non_indexed_batch_sets_buffer),
2459
) => Some(
2460
render_device.create_bind_group(
2461
"build_non_indexed_indirect_parameters_bind_group",
2462
// The frustum culling bind group is good for occlusion culling
2463
// too. They bind the same buffers.
2464
&pipeline_cache.get_bind_group_layout(
2465
&pipelines
2466
.gpu_frustum_culling_build_non_indexed_indirect_params
2467
.bind_group_layout,
2468
),
2469
&BindGroupEntries::sequential((
2470
current_input_buffer.as_entire_binding(),
2471
// Don't use `as_entire_binding` here; the shader reads
2472
// the length and `RawBufferVec` overallocates.
2473
BufferBinding {
2474
buffer: non_indexed_indirect_parameters_cpu_metadata_buffer,
2475
offset: 0,
2476
size: NonZeroU64::new(
2477
phase_indirect_parameters_buffer.non_indexed.batch_count()
2478
as u64
2479
* size_of::<IndirectParametersCpuMetadata>() as u64,
2480
),
2481
},
2482
BufferBinding {
2483
buffer: non_indexed_indirect_parameters_gpu_metadata_buffer,
2484
offset: 0,
2485
size: NonZeroU64::new(
2486
phase_indirect_parameters_buffer.non_indexed.batch_count()
2487
as u64
2488
* size_of::<IndirectParametersGpuMetadata>() as u64,
2489
),
2490
},
2491
non_indexed_batch_sets_buffer.as_entire_binding(),
2492
non_indexed_indirect_parameters_data_buffer.as_entire_binding(),
2493
)),
2494
),
2495
),
2496
_ => None,
2497
},
2498
},
2499
);
2500
}
2501
2502
commands.insert_resource(build_indirect_parameters_bind_groups);
2503
}
2504
2505
/// Writes the information needed to do GPU mesh culling to the GPU.
2506
pub fn write_mesh_culling_data_buffer(
2507
render_device: Res<RenderDevice>,
2508
render_queue: Res<RenderQueue>,
2509
mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
2510
) {
2511
mesh_culling_data_buffer.write_buffer(&render_device, &render_queue);
2512
}
2513
2514