Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_pbr/src/render/mesh.rs
9421 views
1
use crate::contact_shadows::ViewContactShadowsUniformOffset;
2
use crate::{
3
material_bind_groups::{MaterialBindGroupIndex, MaterialBindGroupSlot},
4
resources::write_atmosphere_buffer,
5
skin::skin_uniforms_from_world,
6
};
7
use bevy_asset::{embedded_asset, load_embedded_asset, AssetId};
8
use bevy_camera::{
9
primitives::Aabb,
10
visibility::{NoFrustumCulling, RenderLayers, ViewVisibility, VisibilityRange},
11
Camera, Projection,
12
};
13
use bevy_core_pipeline::{
14
core_3d::{AlphaMask3d, Opaque3d, Transparent3d, CORE_3D_DEPTH_FORMAT},
15
deferred::{AlphaMask3dDeferred, Opaque3dDeferred},
16
oit::{prepare_oit_buffers, OrderIndependentTransparencySettingsOffset},
17
prepass::MotionVectorPrepass,
18
};
19
use bevy_derive::{Deref, DerefMut};
20
use bevy_diagnostic::FrameCount;
21
use bevy_ecs::{
22
entity::EntityHashSet,
23
prelude::*,
24
query::{QueryData, ROQueryItem},
25
relationship::RelationshipSourceCollection,
26
system::{lifetimeless::*, SystemParamItem, SystemState},
27
};
28
use bevy_image::{BevyDefault, ImageSampler, TextureFormatPixelInfo};
29
use bevy_light::{
30
EnvironmentMapLight, IrradianceVolume, NotShadowCaster, NotShadowReceiver,
31
ShadowFilteringMethod, TransmittedShadowReceiver,
32
};
33
use bevy_math::{Affine3, Affine3Ext, Rect, UVec2, Vec3, Vec4};
34
use bevy_mesh::{
35
skinning::SkinnedMesh, BaseMeshPipelineKey, Mesh, Mesh3d, MeshTag, MeshVertexBufferLayoutRef,
36
VertexAttributeDescriptor,
37
};
38
use bevy_platform::collections::{hash_map::Entry, HashMap};
39
use bevy_render::{
40
batching::{
41
gpu_preprocessing::{
42
self, GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
43
IndirectParametersCpuMetadata, IndirectParametersIndexed, IndirectParametersNonIndexed,
44
InstanceInputUniformBuffer, UntypedPhaseIndirectParametersBuffers,
45
},
46
no_gpu_preprocessing, GetBatchData, GetFullBatchData, NoAutomaticBatching,
47
},
48
mesh::{allocator::MeshAllocator, RenderMesh, RenderMeshBufferInfo},
49
render_asset::RenderAssets,
50
render_phase::{
51
BinnedRenderPhasePlugin, InputUniformIndex, PhaseItem, PhaseItemExtraIndex, RenderCommand,
52
RenderCommandResult, SortedRenderPhasePlugin, TrackedRenderPass,
53
},
54
render_resource::*,
55
renderer::{RenderAdapter, RenderDevice, RenderQueue},
56
sync_world::MainEntityHashSet,
57
texture::{DefaultImageSampler, GpuImage},
58
view::{
59
self, NoIndirectDrawing, RenderVisibilityRanges, RetainedViewEntity, ViewTarget,
60
ViewUniformOffset,
61
},
62
Extract,
63
};
64
use bevy_shader::{load_shader_library, Shader, ShaderDefVal, ShaderSettings};
65
use bevy_transform::components::GlobalTransform;
66
use bevy_utils::{default, BufferedChannel, Parallel, TypeIdMap};
67
use core::any::TypeId;
68
use core::mem::size_of;
69
use material_bind_groups::MaterialBindingId;
70
use tracing::{error, info_span, warn, Instrument};
71
72
use self::irradiance_volume::IRRADIANCE_VOLUMES_ARE_USABLE;
73
use crate::{
74
render::{
75
morph::{
76
extract_morphs, no_automatic_morph_batching, prepare_morphs, MorphIndices,
77
MorphUniforms,
78
},
79
skin::no_automatic_skin_batching,
80
},
81
*,
82
};
83
use bevy_core_pipeline::oit::OrderIndependentTransparencySettings;
84
use bevy_core_pipeline::prepass::{DeferredPrepass, DepthPrepass, NormalPrepass};
85
use bevy_core_pipeline::tonemapping::{DebandDither, Tonemapping};
86
use bevy_ecs::change_detection::Tick;
87
use bevy_ecs::system::SystemChangeTick;
88
use bevy_render::camera::TemporalJitter;
89
use bevy_render::prelude::Msaa;
90
use bevy_render::sync_world::{MainEntity, MainEntityHashMap};
91
use bevy_render::view::ExtractedView;
92
use bevy_render::RenderSystems::PrepareAssets;
93
use bevy_tasks::ComputeTaskPool;
94
95
use bytemuck::{Pod, Zeroable};
96
use nonmax::{NonMaxU16, NonMaxU32};
97
use smallvec::{smallvec, SmallVec};
98
use static_assertions::const_assert_eq;
99
100
/// Provides support for rendering 3D meshes.
101
pub struct MeshRenderPlugin {
102
/// Whether we're building [`MeshUniform`]s on GPU.
103
///
104
/// This requires compute shader support and so will be forcibly disabled if
105
/// the platform doesn't support those.
106
pub use_gpu_instance_buffer_builder: bool,
107
/// Debugging flags that can optionally be set when constructing the renderer.
108
pub debug_flags: RenderDebugFlags,
109
}
110
111
impl MeshRenderPlugin {
112
/// Creates a new [`MeshRenderPlugin`] with the given debug flags.
113
pub fn new(debug_flags: RenderDebugFlags) -> MeshRenderPlugin {
114
MeshRenderPlugin {
115
use_gpu_instance_buffer_builder: false,
116
debug_flags,
117
}
118
}
119
}
120
121
/// How many textures are allowed in the view bind group layout (`@group(0)`) before
122
/// broader compatibility with WebGL and WebGPU is at risk, due to the minimum guaranteed
123
/// values for `MAX_TEXTURE_IMAGE_UNITS` (in WebGL) and `maxSampledTexturesPerShaderStage` (in WebGPU),
124
/// currently both at 16.
125
///
126
/// We use 10 here because it still leaves us, in a worst case scenario, with 6 textures for the other bind groups.
127
///
128
/// See: <https://gpuweb.github.io/gpuweb/#limits>
129
#[cfg(debug_assertions)]
130
pub const MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES: usize = 10;
131
132
impl Plugin for MeshRenderPlugin {
133
fn build(&self, app: &mut App) {
134
load_shader_library!(app, "forward_io.wgsl");
135
load_shader_library!(app, "mesh_view_types.wgsl", |settings| *settings =
136
ShaderSettings {
137
shader_defs: vec![
138
ShaderDefVal::UInt(
139
"MAX_DIRECTIONAL_LIGHTS".into(),
140
MAX_DIRECTIONAL_LIGHTS as u32
141
),
142
ShaderDefVal::UInt(
143
"MAX_CASCADES_PER_LIGHT".into(),
144
MAX_CASCADES_PER_LIGHT as u32,
145
)
146
]
147
});
148
load_shader_library!(app, "mesh_view_bindings.wgsl");
149
load_shader_library!(app, "mesh_types.wgsl");
150
load_shader_library!(app, "mesh_functions.wgsl");
151
load_shader_library!(app, "skinning.wgsl");
152
load_shader_library!(app, "morph.wgsl");
153
load_shader_library!(app, "occlusion_culling.wgsl");
154
155
embedded_asset!(app, "mesh.wgsl");
156
157
if app.get_sub_app(RenderApp).is_none() {
158
return;
159
}
160
161
app.add_systems(
162
PostUpdate,
163
(no_automatic_skin_batching, no_automatic_morph_batching),
164
)
165
.add_plugins((
166
BinnedRenderPhasePlugin::<Opaque3d, MeshPipeline>::new(self.debug_flags),
167
BinnedRenderPhasePlugin::<AlphaMask3d, MeshPipeline>::new(self.debug_flags),
168
BinnedRenderPhasePlugin::<Shadow, MeshPipeline>::new(self.debug_flags),
169
BinnedRenderPhasePlugin::<Opaque3dDeferred, MeshPipeline>::new(self.debug_flags),
170
BinnedRenderPhasePlugin::<AlphaMask3dDeferred, MeshPipeline>::new(self.debug_flags),
171
SortedRenderPhasePlugin::<Transmissive3d, MeshPipeline>::new(self.debug_flags),
172
SortedRenderPhasePlugin::<Transparent3d, MeshPipeline>::new(self.debug_flags),
173
));
174
175
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
176
render_app
177
.init_resource::<MorphUniforms>()
178
.init_resource::<MorphIndices>()
179
.init_resource::<MeshCullingDataBuffer>()
180
.init_resource::<RenderMaterialInstances>()
181
.configure_sets(
182
ExtractSchedule,
183
MeshExtractionSystems
184
.after(view::extract_visibility_ranges)
185
.after(late_sweep_material_instances),
186
)
187
.add_systems(
188
ExtractSchedule,
189
(
190
extract_skins,
191
extract_morphs,
192
gpu_preprocessing::clear_batched_gpu_instance_buffers::<MeshPipeline>
193
.before(MeshExtractionSystems),
194
),
195
)
196
.add_systems(
197
Render,
198
(
199
set_mesh_motion_vector_flags.in_set(RenderSystems::PrepareMeshes),
200
prepare_skins.in_set(RenderSystems::PrepareResources),
201
prepare_morphs.in_set(RenderSystems::PrepareResources),
202
prepare_mesh_bind_groups.in_set(RenderSystems::PrepareBindGroups),
203
prepare_mesh_view_bind_groups
204
.in_set(RenderSystems::PrepareBindGroups)
205
.after(prepare_oit_buffers)
206
.after(write_atmosphere_buffer),
207
no_gpu_preprocessing::clear_batched_cpu_instance_buffers::<MeshPipeline>
208
.in_set(RenderSystems::Cleanup)
209
.after(RenderSystems::Render),
210
),
211
);
212
}
213
}
214
215
fn finish(&self, app: &mut App) {
216
let mut mesh_bindings_shader_defs = Vec::with_capacity(1);
217
218
if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
219
render_app
220
.init_resource::<ViewKeyCache>()
221
.init_resource::<ViewSpecializationTicks>()
222
.init_resource::<GpuPreprocessingSupport>()
223
.add_systems(RenderStartup, skin_uniforms_from_world)
224
.add_systems(
225
Render,
226
check_views_need_specialization.in_set(PrepareAssets),
227
);
228
229
let gpu_preprocessing_support =
230
render_app.world().resource::<GpuPreprocessingSupport>();
231
let use_gpu_instance_buffer_builder =
232
self.use_gpu_instance_buffer_builder && gpu_preprocessing_support.is_available();
233
234
let render_mesh_instances = RenderMeshInstances::new(use_gpu_instance_buffer_builder);
235
render_app.insert_resource(render_mesh_instances);
236
237
if use_gpu_instance_buffer_builder {
238
render_app
239
.init_resource::<gpu_preprocessing::BatchedInstanceBuffers<
240
MeshUniform,
241
MeshInputUniform
242
>>()
243
.init_resource::<RenderMeshInstanceGpuQueues>()
244
.init_resource::<MeshesToReextractNextFrame>()
245
.add_systems(
246
ExtractSchedule,
247
extract_meshes_for_gpu_building.in_set(MeshExtractionSystems),
248
)
249
.add_systems(
250
Render,
251
(
252
gpu_preprocessing::write_batched_instance_buffers::<MeshPipeline>
253
.in_set(RenderSystems::PrepareResourcesFlush),
254
gpu_preprocessing::delete_old_work_item_buffers::<MeshPipeline>
255
.in_set(RenderSystems::PrepareResources),
256
collect_meshes_for_gpu_building
257
.in_set(RenderSystems::PrepareMeshes)
258
// This must be before
259
// `set_mesh_motion_vector_flags` so it doesn't
260
// overwrite those flags.
261
.before(set_mesh_motion_vector_flags),
262
),
263
);
264
} else {
265
let render_device = render_app.world().resource::<RenderDevice>();
266
let cpu_batched_instance_buffer = no_gpu_preprocessing::BatchedInstanceBuffer::<
267
MeshUniform,
268
>::new(&render_device.limits());
269
render_app
270
.insert_resource(cpu_batched_instance_buffer)
271
.add_systems(
272
ExtractSchedule,
273
extract_meshes_for_cpu_building.in_set(MeshExtractionSystems),
274
)
275
.add_systems(
276
Render,
277
no_gpu_preprocessing::write_batched_instance_buffer::<MeshPipeline>
278
.in_set(RenderSystems::PrepareResourcesFlush),
279
);
280
};
281
282
let render_device = render_app.world().resource::<RenderDevice>();
283
if let Some(per_object_buffer_batch_size) =
284
GpuArrayBuffer::<MeshUniform>::batch_size(&render_device.limits())
285
{
286
mesh_bindings_shader_defs.push(ShaderDefVal::UInt(
287
"PER_OBJECT_BUFFER_BATCH_SIZE".into(),
288
per_object_buffer_batch_size,
289
));
290
}
291
292
render_app
293
.init_resource::<MeshPipelineViewLayouts>()
294
.init_resource::<MeshPipeline>();
295
}
296
297
// Load the mesh_bindings shader module here as it depends on runtime information about
298
// whether storage buffers are supported, or the maximum uniform buffer binding size.
299
load_shader_library!(app, "mesh_bindings.wgsl", move |settings| *settings =
300
ShaderSettings {
301
shader_defs: mesh_bindings_shader_defs.clone(),
302
});
303
}
304
}
305
306
/// This resource caches [`MeshPipelineKey`]s for each view with pre-enabled features needed to properly
307
/// setup the [`MeshViewBindGroup`] layout in specialized [`MeshPipeline`]s.
308
#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)]
309
pub struct ViewKeyCache(HashMap<RetainedViewEntity, MeshPipelineKey>);
310
311
#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)]
312
pub struct ViewSpecializationTicks(HashMap<RetainedViewEntity, Tick>);
313
314
pub fn check_views_need_specialization(
315
mut view_key_cache: ResMut<ViewKeyCache>,
316
mut view_specialization_ticks: ResMut<ViewSpecializationTicks>,
317
mut views: Query<(
318
&ExtractedView,
319
&Msaa,
320
Option<&Tonemapping>,
321
Option<&DebandDither>,
322
Option<&ShadowFilteringMethod>,
323
Has<ScreenSpaceAmbientOcclusion>,
324
(
325
Has<NormalPrepass>,
326
Has<DepthPrepass>,
327
Has<MotionVectorPrepass>,
328
Has<DeferredPrepass>,
329
),
330
Option<&ScreenSpaceTransmission>,
331
Has<TemporalJitter>,
332
Option<&Projection>,
333
Has<DistanceFog>,
334
(
335
Has<RenderViewLightProbes<EnvironmentMapLight>>,
336
Has<RenderViewLightProbes<IrradianceVolume>>,
337
),
338
Has<OrderIndependentTransparencySettings>,
339
Has<ExtractedAtmosphere>,
340
Has<ScreenSpaceReflectionsUniform>,
341
)>,
342
ticks: SystemChangeTick,
343
) {
344
for (
345
view,
346
msaa,
347
tonemapping,
348
dither,
349
shadow_filter_method,
350
ssao,
351
(normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass),
352
transmission,
353
temporal_jitter,
354
projection,
355
distance_fog,
356
(has_environment_maps, has_irradiance_volumes),
357
has_oit,
358
has_atmosphere,
359
has_ssr,
360
) in views.iter_mut()
361
{
362
let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples())
363
| MeshPipelineKey::from_hdr(view.hdr);
364
365
if normal_prepass {
366
view_key |= MeshPipelineKey::NORMAL_PREPASS;
367
}
368
369
if depth_prepass {
370
view_key |= MeshPipelineKey::DEPTH_PREPASS;
371
}
372
373
if motion_vector_prepass {
374
view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS;
375
}
376
377
if deferred_prepass {
378
view_key |= MeshPipelineKey::DEFERRED_PREPASS;
379
}
380
381
if temporal_jitter {
382
view_key |= MeshPipelineKey::TEMPORAL_JITTER;
383
}
384
385
if has_environment_maps {
386
view_key |= MeshPipelineKey::ENVIRONMENT_MAP;
387
}
388
389
if has_irradiance_volumes {
390
view_key |= MeshPipelineKey::IRRADIANCE_VOLUME;
391
}
392
393
if has_ssr {
394
view_key |= MeshPipelineKey::SCREEN_SPACE_REFLECTIONS;
395
}
396
397
if has_oit {
398
view_key |= MeshPipelineKey::OIT_ENABLED;
399
}
400
401
if has_atmosphere {
402
view_key |= MeshPipelineKey::ATMOSPHERE;
403
}
404
405
if view.invert_culling {
406
view_key |= MeshPipelineKey::INVERT_CULLING;
407
}
408
409
if let Some(projection) = projection {
410
view_key |= match projection {
411
Projection::Perspective(_) => MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE,
412
Projection::Orthographic(_) => MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC,
413
Projection::Custom(_) => MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD,
414
};
415
}
416
417
match shadow_filter_method.unwrap_or(&ShadowFilteringMethod::default()) {
418
ShadowFilteringMethod::Hardware2x2 => {
419
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2;
420
}
421
ShadowFilteringMethod::Gaussian => {
422
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN;
423
}
424
ShadowFilteringMethod::Temporal => {
425
view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL;
426
}
427
}
428
429
if !view.hdr {
430
if let Some(tonemapping) = tonemapping {
431
view_key |= MeshPipelineKey::TONEMAP_IN_SHADER;
432
view_key |= tonemapping_pipeline_key(*tonemapping);
433
}
434
if let Some(DebandDither::Enabled) = dither {
435
view_key |= MeshPipelineKey::DEBAND_DITHER;
436
}
437
}
438
if ssao {
439
view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION;
440
}
441
if distance_fog {
442
view_key |= MeshPipelineKey::DISTANCE_FOG;
443
}
444
if let Some(transmission) = transmission {
445
view_key |= transmission.quality.pipeline_key();
446
}
447
if !view_key_cache
448
.get_mut(&view.retained_view_entity)
449
.is_some_and(|current_key| *current_key == view_key)
450
{
451
view_key_cache.insert(view.retained_view_entity, view_key);
452
view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run());
453
}
454
}
455
}
456
457
#[derive(Component)]
458
pub struct MeshTransforms {
459
pub world_from_local: Affine3,
460
pub previous_world_from_local: Affine3,
461
pub flags: u32,
462
}
463
464
#[derive(ShaderType, Clone)]
465
pub struct MeshUniform {
466
// Affine 4x3 matrices transposed to 3x4
467
pub world_from_local: [Vec4; 3],
468
pub previous_world_from_local: [Vec4; 3],
469
// 3x3 matrix packed in mat2x4 and f32 as:
470
// [0].xyz, [1].x,
471
// [1].yz, [2].xy
472
// [2].z
473
pub local_from_world_transpose_a: [Vec4; 2],
474
pub local_from_world_transpose_b: f32,
475
pub flags: u32,
476
// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
477
//
478
// <--- MSB LSB --->
479
// +---- min v ----+ +---- min u ----+
480
// lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
481
// +---- max v ----+ +---- max u ----+
482
// lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
483
//
484
// (MSB: most significant bit; LSB: least significant bit.)
485
pub lightmap_uv_rect: UVec2,
486
/// The index of this mesh's first vertex in the vertex buffer.
487
///
488
/// Multiple meshes can be packed into a single vertex buffer (see
489
/// [`MeshAllocator`]). This value stores the offset of the first vertex in
490
/// this mesh in that buffer.
491
pub first_vertex_index: u32,
492
/// The current skin index, or `u32::MAX` if there's no skin.
493
pub current_skin_index: u32,
494
/// The material and lightmap indices, packed into 32 bits.
495
///
496
/// Low 16 bits: index of the material inside the bind group data.
497
/// High 16 bits: index of the lightmap in the binding array.
498
pub material_and_lightmap_bind_group_slot: u32,
499
/// User supplied tag to identify this mesh instance.
500
pub tag: u32,
501
/// Padding.
502
pub pad: u32,
503
}
504
505
/// Information that has to be transferred from CPU to GPU in order to produce
506
/// the full [`MeshUniform`].
507
///
508
/// This is essentially a subset of the fields in [`MeshUniform`] above.
509
#[derive(ShaderType, Pod, Zeroable, Clone, Copy, Default, Debug)]
510
#[repr(C)]
511
pub struct MeshInputUniform {
512
/// Affine 4x3 matrix transposed to 3x4.
513
pub world_from_local: [Vec4; 3],
514
/// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
515
///
516
/// ```text
517
/// <--- MSB LSB --->
518
/// +---- min v ----+ +---- min u ----+
519
/// lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
520
/// +---- max v ----+ +---- max u ----+
521
/// lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
522
///
523
/// (MSB: most significant bit; LSB: least significant bit.)
524
/// ```
525
pub lightmap_uv_rect: UVec2,
526
/// Various [`MeshFlags`].
527
pub flags: u32,
528
/// The index of this mesh's [`MeshInputUniform`] in the previous frame's
529
/// buffer, if applicable.
530
///
531
/// This is used for TAA. If not present, this will be `u32::MAX`.
532
pub previous_input_index: u32,
533
/// The index of this mesh's first vertex in the vertex buffer.
534
///
535
/// Multiple meshes can be packed into a single vertex buffer (see
536
/// [`MeshAllocator`]). This value stores the offset of the first vertex in
537
/// this mesh in that buffer.
538
pub first_vertex_index: u32,
539
/// The index of this mesh's first index in the index buffer, if any.
540
///
541
/// Multiple meshes can be packed into a single index buffer (see
542
/// [`MeshAllocator`]). This value stores the offset of the first index in
543
/// this mesh in that buffer.
544
///
545
/// If this mesh isn't indexed, this value is ignored.
546
pub first_index_index: u32,
547
/// For an indexed mesh, the number of indices that make it up; for a
548
/// non-indexed mesh, the number of vertices in it.
549
pub index_count: u32,
550
/// The current skin index, or `u32::MAX` if there's no skin.
551
pub current_skin_index: u32,
552
/// The material and lightmap indices, packed into 32 bits.
553
///
554
/// Low 16 bits: index of the material inside the bind group data.
555
/// High 16 bits: index of the lightmap in the binding array.
556
pub material_and_lightmap_bind_group_slot: u32,
557
/// The number of the frame on which this [`MeshInputUniform`] was built.
558
///
559
/// This is used to validate the previous transform and skin. If this
560
/// [`MeshInputUniform`] wasn't updated on this frame, then we know that
561
/// neither this mesh's transform nor that of its joints have been updated
562
/// on this frame, and therefore the transforms of both this mesh and its
563
/// joints must be identical to those for the previous frame.
564
pub timestamp: u32,
565
/// User supplied tag to identify this mesh instance.
566
pub tag: u32,
567
/// Padding.
568
pub pad: u32,
569
}
570
571
/// Information about each mesh instance needed to cull it on GPU.
572
///
573
/// This consists of its axis-aligned bounding box (AABB).
574
#[derive(ShaderType, Pod, Zeroable, Clone, Copy, Default)]
575
#[repr(C)]
576
pub struct MeshCullingData {
577
/// The 3D center of the AABB in model space, padded with an extra unused
578
/// float value.
579
pub aabb_center: Vec4,
580
/// The 3D extents of the AABB in model space, divided by two, padded with
581
/// an extra unused float value.
582
pub aabb_half_extents: Vec4,
583
}
584
585
/// A GPU buffer that holds the information needed to cull meshes on GPU.
586
///
587
/// At the moment, this simply holds each mesh's AABB.
588
///
589
/// To avoid wasting CPU time in the CPU culling case, this buffer will be empty
590
/// if GPU culling isn't in use.
591
#[derive(Resource, Deref, DerefMut)]
592
pub struct MeshCullingDataBuffer(RawBufferVec<MeshCullingData>);
593
594
impl MeshUniform {
595
pub fn new(
596
mesh_transforms: &MeshTransforms,
597
first_vertex_index: u32,
598
material_bind_group_slot: MaterialBindGroupSlot,
599
maybe_lightmap: Option<(LightmapSlotIndex, Rect)>,
600
current_skin_index: Option<u32>,
601
tag: Option<u32>,
602
) -> Self {
603
let (local_from_world_transpose_a, local_from_world_transpose_b) =
604
mesh_transforms.world_from_local.inverse_transpose_3x3();
605
let lightmap_bind_group_slot = match maybe_lightmap {
606
None => u16::MAX,
607
Some((slot_index, _)) => slot_index.into(),
608
};
609
610
Self {
611
world_from_local: mesh_transforms.world_from_local.to_transpose(),
612
previous_world_from_local: mesh_transforms.previous_world_from_local.to_transpose(),
613
lightmap_uv_rect: pack_lightmap_uv_rect(maybe_lightmap.map(|(_, uv_rect)| uv_rect)),
614
local_from_world_transpose_a,
615
local_from_world_transpose_b,
616
flags: mesh_transforms.flags,
617
first_vertex_index,
618
current_skin_index: current_skin_index.unwrap_or(u32::MAX),
619
material_and_lightmap_bind_group_slot: u32::from(material_bind_group_slot)
620
| ((lightmap_bind_group_slot as u32) << 16),
621
tag: tag.unwrap_or(0),
622
pad: 0,
623
}
624
}
625
}
626
627
// NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_types.wgsl!
628
bitflags::bitflags! {
629
/// Various flags and tightly-packed values on a mesh.
630
///
631
/// Flags grow from the top bit down; other values grow from the bottom bit
632
/// up.
633
#[repr(transparent)]
634
pub struct MeshFlags: u32 {
635
/// Bitmask for the 16-bit index into the LOD array.
636
///
637
/// This will be `u16::MAX` if this mesh has no LOD.
638
const LOD_INDEX_MASK = (1 << 16) - 1;
639
/// Disables frustum culling for this mesh.
640
///
641
/// This corresponds to the
642
/// [`bevy_render::view::visibility::NoFrustumCulling`] component.
643
const NO_FRUSTUM_CULLING = 1 << 28;
644
const SHADOW_RECEIVER = 1 << 29;
645
const TRANSMITTED_SHADOW_RECEIVER = 1 << 30;
646
// Indicates the sign of the determinant of the 3x3 model matrix. If the sign is positive,
647
// then the flag should be set, else it should not be set.
648
const SIGN_DETERMINANT_MODEL_3X3 = 1 << 31;
649
const NONE = 0;
650
const UNINITIALIZED = 0xFFFFFFFF;
651
}
652
}
653
654
impl MeshFlags {
655
fn from_components(
656
transform: &GlobalTransform,
657
lod_index: Option<NonMaxU16>,
658
no_frustum_culling: bool,
659
not_shadow_receiver: bool,
660
transmitted_receiver: bool,
661
) -> MeshFlags {
662
let mut mesh_flags = if not_shadow_receiver {
663
MeshFlags::empty()
664
} else {
665
MeshFlags::SHADOW_RECEIVER
666
};
667
if no_frustum_culling {
668
mesh_flags |= MeshFlags::NO_FRUSTUM_CULLING;
669
}
670
if transmitted_receiver {
671
mesh_flags |= MeshFlags::TRANSMITTED_SHADOW_RECEIVER;
672
}
673
if transform.affine().matrix3.determinant().is_sign_positive() {
674
mesh_flags |= MeshFlags::SIGN_DETERMINANT_MODEL_3X3;
675
}
676
677
let lod_index_bits = match lod_index {
678
None => u16::MAX,
679
Some(lod_index) => u16::from(lod_index),
680
};
681
mesh_flags |=
682
MeshFlags::from_bits_retain((lod_index_bits as u32) << MeshFlags::LOD_INDEX_SHIFT);
683
684
mesh_flags
685
}
686
687
/// The first bit of the LOD index.
688
pub const LOD_INDEX_SHIFT: u32 = 0;
689
}
690
691
bitflags::bitflags! {
692
/// Various useful flags for [`RenderMeshInstance`]s.
693
#[derive(Clone, Copy)]
694
pub struct RenderMeshInstanceFlags: u8 {
695
/// The mesh casts shadows.
696
const SHADOW_CASTER = 1 << 0;
697
/// The mesh can participate in automatic batching.
698
const AUTOMATIC_BATCHING = 1 << 1;
699
/// The mesh had a transform last frame and so is eligible for motion
700
/// vector computation.
701
const HAS_PREVIOUS_TRANSFORM = 1 << 2;
702
/// The mesh had a skin last frame and so that skin should be taken into
703
/// account for motion vector computation.
704
const HAS_PREVIOUS_SKIN = 1 << 3;
705
/// The mesh had morph targets last frame and so they should be taken
706
/// into account for motion vector computation.
707
const HAS_PREVIOUS_MORPH = 1 << 4;
708
}
709
}
710
711
/// CPU data that the render world keeps for each entity, when *not* using GPU
712
/// mesh uniform building.
713
#[derive(Deref, DerefMut)]
714
pub struct RenderMeshInstanceCpu {
715
/// Data shared between both the CPU mesh uniform building and the GPU mesh
716
/// uniform building paths.
717
#[deref]
718
pub shared: RenderMeshInstanceShared,
719
/// The transform of the mesh.
720
///
721
/// This will be written into the [`MeshUniform`] at the appropriate time.
722
pub transforms: MeshTransforms,
723
}
724
725
/// CPU data that the render world needs to keep for each entity that contains a
726
/// mesh when using GPU mesh uniform building.
727
#[derive(Deref, DerefMut)]
728
pub struct RenderMeshInstanceGpu {
729
/// Data shared between both the CPU mesh uniform building and the GPU mesh
730
/// uniform building paths.
731
#[deref]
732
pub shared: RenderMeshInstanceShared,
733
/// The representative position of the mesh instance in world-space.
734
///
735
/// This world-space center is used as a spatial proxy for view-dependent
736
/// operations such as distance computation and render-order sorting.
737
pub center: Vec3,
738
/// The index of the [`MeshInputUniform`] in the buffer.
739
pub current_uniform_index: NonMaxU32,
740
}
741
742
/// CPU data that the render world needs to keep about each entity that contains
743
/// a mesh.
744
pub struct RenderMeshInstanceShared {
745
/// The [`AssetId`] of the mesh.
746
pub mesh_asset_id: AssetId<Mesh>,
747
/// A slot for the material bind group index.
748
pub material_bindings_index: MaterialBindingId,
749
/// Various flags.
750
pub flags: RenderMeshInstanceFlags,
751
/// Index of the slab that the lightmap resides in, if a lightmap is
752
/// present.
753
pub lightmap_slab_index: Option<LightmapSlabIndex>,
754
/// User supplied tag to identify this mesh instance.
755
pub tag: u32,
756
/// Render layers that this mesh instance belongs to.
757
pub render_layers: Option<RenderLayers>,
758
/// A representative position of the mesh instance in local space,
759
/// derived from its axis-aligned bounding box.
760
///
761
/// This value is typically used as a spatial proxy for operations such as
762
/// view-dependent sorting (e.g., transparent object ordering).
763
pub center: Vec3,
764
}
765
766
/// Information that is gathered during the parallel portion of mesh extraction
767
/// when GPU mesh uniform building is enabled.
768
///
769
/// From this, the [`MeshInputUniform`] and [`RenderMeshInstanceGpu`] are
770
/// prepared.
771
pub struct RenderMeshInstanceGpuBuilder {
772
/// Data that will be placed on the [`RenderMeshInstanceGpu`].
773
pub shared: RenderMeshInstanceShared,
774
/// The current transform.
775
pub world_from_local: Affine3,
776
/// Four 16-bit unsigned normalized UV values packed into a [`UVec2`]:
777
///
778
/// ```text
779
/// <--- MSB LSB --->
780
/// +---- min v ----+ +---- min u ----+
781
/// lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
782
/// +---- max v ----+ +---- max u ----+
783
/// lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
784
///
785
/// (MSB: most significant bit; LSB: least significant bit.)
786
/// ```
787
pub lightmap_uv_rect: UVec2,
788
/// The index of the previous mesh input.
789
pub previous_input_index: Option<NonMaxU32>,
790
/// Various flags.
791
pub mesh_flags: MeshFlags,
792
}
793
794
/// The per-thread queues used during [`extract_meshes_for_gpu_building`].
795
///
796
/// There are two varieties of these: one for when culling happens on CPU and
797
/// one for when culling happens on GPU. Having the two varieties avoids wasting
798
/// space if GPU culling is disabled.
799
#[derive(Default)]
800
pub enum RenderMeshInstanceGpuQueue {
801
/// The default value.
802
///
803
/// This becomes [`RenderMeshInstanceGpuQueue::CpuCulling`] or
804
/// [`RenderMeshInstanceGpuQueue::GpuCulling`] once extraction starts.
805
#[default]
806
None,
807
/// The version of [`RenderMeshInstanceGpuQueue`] that omits the
808
/// [`MeshCullingData`], so that we don't waste space when GPU
809
/// culling is disabled.
810
CpuCulling {
811
/// Stores GPU data for each entity that became visible or changed in
812
/// such a way that necessitates updating the [`MeshInputUniform`] (e.g.
813
/// changed transform).
814
changed: Vec<(MainEntity, RenderMeshInstanceGpuBuilder)>,
815
/// Stores the IDs of entities that became invisible this frame.
816
removed: Vec<MainEntity>,
817
},
818
/// The version of [`RenderMeshInstanceGpuQueue`] that contains the
819
/// [`MeshCullingData`], used when any view has GPU culling
820
/// enabled.
821
GpuCulling {
822
/// Stores GPU data for each entity that became visible or changed in
823
/// such a way that necessitates updating the [`MeshInputUniform`] (e.g.
824
/// changed transform).
825
changed: Vec<(MainEntity, RenderMeshInstanceGpuBuilder, MeshCullingData)>,
826
/// Stores the IDs of entities that became invisible this frame.
827
removed: Vec<MainEntity>,
828
},
829
}
830
831
/// The per-thread queues containing mesh instances, populated during the
832
/// extract phase.
833
///
834
/// These are filled in [`extract_meshes_for_gpu_building`] and consumed in
835
/// [`collect_meshes_for_gpu_building`].
836
#[derive(Resource, Default, Deref, DerefMut)]
837
pub struct RenderMeshInstanceGpuQueues(Parallel<RenderMeshInstanceGpuQueue>);
838
839
/// Holds a list of meshes that couldn't be extracted this frame because their
840
/// materials weren't prepared yet.
841
///
842
/// On subsequent frames, we try to reextract those meshes.
843
#[derive(Resource, Default, Deref, DerefMut)]
844
pub struct MeshesToReextractNextFrame(MainEntityHashSet);
845
846
impl RenderMeshInstanceShared {
847
/// A gpu builder will provide the mesh instance id
848
/// during [`RenderMeshInstanceGpuPrepared::update`].
849
fn for_gpu_building(
850
previous_transform: Option<&PreviousGlobalTransform>,
851
mesh: &Mesh3d,
852
tag: Option<&MeshTag>,
853
not_shadow_caster: bool,
854
no_automatic_batching: bool,
855
render_layers: Option<&RenderLayers>,
856
aabb: Option<&Aabb>,
857
) -> Self {
858
Self::for_cpu_building(
859
previous_transform,
860
mesh,
861
tag,
862
default(),
863
not_shadow_caster,
864
no_automatic_batching,
865
render_layers,
866
aabb,
867
)
868
}
869
870
/// The cpu builder does not have an equivalent [`RenderMeshInstanceGpuBuilder`].
871
fn for_cpu_building(
872
previous_transform: Option<&PreviousGlobalTransform>,
873
mesh: &Mesh3d,
874
tag: Option<&MeshTag>,
875
material_bindings_index: MaterialBindingId,
876
not_shadow_caster: bool,
877
no_automatic_batching: bool,
878
render_layers: Option<&RenderLayers>,
879
aabb: Option<&Aabb>,
880
) -> Self {
881
let mut mesh_instance_flags = RenderMeshInstanceFlags::empty();
882
mesh_instance_flags.set(RenderMeshInstanceFlags::SHADOW_CASTER, !not_shadow_caster);
883
mesh_instance_flags.set(
884
RenderMeshInstanceFlags::AUTOMATIC_BATCHING,
885
!no_automatic_batching,
886
);
887
mesh_instance_flags.set(
888
RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM,
889
previous_transform.is_some(),
890
);
891
892
RenderMeshInstanceShared {
893
mesh_asset_id: mesh.id(),
894
flags: mesh_instance_flags,
895
material_bindings_index,
896
lightmap_slab_index: None,
897
tag: tag.map_or(0, |i| **i),
898
render_layers: render_layers.cloned(),
899
center: aabb.map_or(Vec3::ZERO, |aabb| aabb.center.into()),
900
}
901
}
902
903
/// Returns true if this entity is eligible to participate in automatic
904
/// batching.
905
#[inline]
906
pub fn should_batch(&self) -> bool {
907
self.flags
908
.contains(RenderMeshInstanceFlags::AUTOMATIC_BATCHING)
909
}
910
}
911
912
/// Information that the render world keeps about each entity that contains a
913
/// mesh.
914
///
915
/// The set of information needed is different depending on whether CPU or GPU
916
/// [`MeshUniform`] building is in use.
917
#[derive(Resource)]
918
pub enum RenderMeshInstances {
919
/// Information needed when using CPU mesh instance data building.
920
CpuBuilding(RenderMeshInstancesCpu),
921
/// Information needed when using GPU mesh instance data building.
922
GpuBuilding(RenderMeshInstancesGpu),
923
}
924
925
/// Information that the render world keeps about each entity that contains a
926
/// mesh, when using CPU mesh instance data building.
927
#[derive(Default, Deref, DerefMut)]
928
pub struct RenderMeshInstancesCpu(MainEntityHashMap<RenderMeshInstanceCpu>);
929
930
/// Information that the render world keeps about each entity that contains a
931
/// mesh, when using GPU mesh instance data building.
932
#[derive(Default, Deref, DerefMut)]
933
pub struct RenderMeshInstancesGpu(MainEntityHashMap<RenderMeshInstanceGpu>);
934
935
impl RenderMeshInstances {
936
/// Creates a new [`RenderMeshInstances`] instance.
937
fn new(use_gpu_instance_buffer_builder: bool) -> RenderMeshInstances {
938
if use_gpu_instance_buffer_builder {
939
RenderMeshInstances::GpuBuilding(RenderMeshInstancesGpu::default())
940
} else {
941
RenderMeshInstances::CpuBuilding(RenderMeshInstancesCpu::default())
942
}
943
}
944
945
/// Returns the ID of the mesh asset attached to the given entity, if any.
946
pub fn mesh_asset_id(&self, entity: MainEntity) -> Option<AssetId<Mesh>> {
947
match *self {
948
RenderMeshInstances::CpuBuilding(ref instances) => instances.mesh_asset_id(entity),
949
RenderMeshInstances::GpuBuilding(ref instances) => instances.mesh_asset_id(entity),
950
}
951
}
952
953
/// Constructs [`RenderMeshQueueData`] for the given entity, if it has a
954
/// mesh attached.
955
pub fn render_mesh_queue_data(&self, entity: MainEntity) -> Option<RenderMeshQueueData<'_>> {
956
match *self {
957
RenderMeshInstances::CpuBuilding(ref instances) => {
958
instances.render_mesh_queue_data(entity)
959
}
960
RenderMeshInstances::GpuBuilding(ref instances) => {
961
instances.render_mesh_queue_data(entity)
962
}
963
}
964
}
965
966
/// Inserts the given flags into the CPU or GPU render mesh instance data
967
/// for the given mesh as appropriate.
968
fn insert_mesh_instance_flags(&mut self, entity: MainEntity, flags: RenderMeshInstanceFlags) {
969
match *self {
970
RenderMeshInstances::CpuBuilding(ref mut instances) => {
971
instances.insert_mesh_instance_flags(entity, flags);
972
}
973
RenderMeshInstances::GpuBuilding(ref mut instances) => {
974
instances.insert_mesh_instance_flags(entity, flags);
975
}
976
}
977
}
978
}
979
980
impl RenderMeshInstancesCpu {
981
fn mesh_asset_id(&self, entity: MainEntity) -> Option<AssetId<Mesh>> {
982
self.get(&entity)
983
.map(|render_mesh_instance| render_mesh_instance.mesh_asset_id)
984
}
985
986
fn render_mesh_queue_data(&self, entity: MainEntity) -> Option<RenderMeshQueueData<'_>> {
987
self.get(&entity).map(|render_mesh_instance| {
988
let world_from_local = &render_mesh_instance.transforms.world_from_local;
989
let center = world_from_local
990
.matrix3
991
.mul_vec3(render_mesh_instance.shared.center)
992
+ world_from_local.translation;
993
994
RenderMeshQueueData {
995
shared: &render_mesh_instance.shared,
996
center,
997
current_uniform_index: InputUniformIndex::default(),
998
}
999
})
1000
}
1001
1002
/// Inserts the given flags into the render mesh instance data for the given
1003
/// mesh.
1004
fn insert_mesh_instance_flags(&mut self, entity: MainEntity, flags: RenderMeshInstanceFlags) {
1005
if let Some(instance) = self.get_mut(&entity) {
1006
instance.flags.insert(flags);
1007
}
1008
}
1009
}
1010
1011
impl RenderMeshInstancesGpu {
1012
fn mesh_asset_id(&self, entity: MainEntity) -> Option<AssetId<Mesh>> {
1013
self.get(&entity)
1014
.map(|render_mesh_instance| render_mesh_instance.mesh_asset_id)
1015
}
1016
1017
fn render_mesh_queue_data(&self, entity: MainEntity) -> Option<RenderMeshQueueData<'_>> {
1018
self.get(&entity)
1019
.map(|render_mesh_instance| RenderMeshQueueData {
1020
shared: &render_mesh_instance.shared,
1021
center: render_mesh_instance.center,
1022
current_uniform_index: InputUniformIndex(
1023
render_mesh_instance.current_uniform_index.into(),
1024
),
1025
})
1026
}
1027
1028
/// Inserts the given flags into the render mesh instance data for the given
1029
/// mesh.
1030
fn insert_mesh_instance_flags(&mut self, entity: MainEntity, flags: RenderMeshInstanceFlags) {
1031
if let Some(instance) = self.get_mut(&entity) {
1032
instance.flags.insert(flags);
1033
}
1034
}
1035
}
1036
1037
impl RenderMeshInstanceGpuQueue {
1038
/// Clears out a [`RenderMeshInstanceGpuQueue`], creating or recreating it
1039
/// as necessary.
1040
///
1041
/// `any_gpu_culling` should be set to true if any view has GPU culling
1042
/// enabled.
1043
fn init(&mut self, any_gpu_culling: bool) {
1044
match (any_gpu_culling, &mut *self) {
1045
(true, RenderMeshInstanceGpuQueue::GpuCulling { changed, removed }) => {
1046
changed.clear();
1047
removed.clear();
1048
}
1049
(true, _) => {
1050
*self = RenderMeshInstanceGpuQueue::GpuCulling {
1051
changed: vec![],
1052
removed: vec![],
1053
}
1054
}
1055
(false, RenderMeshInstanceGpuQueue::CpuCulling { changed, removed }) => {
1056
changed.clear();
1057
removed.clear();
1058
}
1059
(false, _) => {
1060
*self = RenderMeshInstanceGpuQueue::CpuCulling {
1061
changed: vec![],
1062
removed: vec![],
1063
}
1064
}
1065
}
1066
}
1067
1068
/// Adds a new mesh to this queue.
1069
fn push(
1070
&mut self,
1071
entity: MainEntity,
1072
instance_builder: RenderMeshInstanceGpuBuilder,
1073
culling_data_builder: Option<MeshCullingData>,
1074
) {
1075
match (&mut *self, culling_data_builder) {
1076
(
1077
&mut RenderMeshInstanceGpuQueue::CpuCulling {
1078
changed: ref mut queue,
1079
..
1080
},
1081
None,
1082
) => {
1083
queue.push((entity, instance_builder));
1084
}
1085
(
1086
&mut RenderMeshInstanceGpuQueue::GpuCulling {
1087
changed: ref mut queue,
1088
..
1089
},
1090
Some(culling_data_builder),
1091
) => {
1092
queue.push((entity, instance_builder, culling_data_builder));
1093
}
1094
(_, None) => {
1095
*self = RenderMeshInstanceGpuQueue::CpuCulling {
1096
changed: vec![(entity, instance_builder)],
1097
removed: vec![],
1098
};
1099
}
1100
(_, Some(culling_data_builder)) => {
1101
*self = RenderMeshInstanceGpuQueue::GpuCulling {
1102
changed: vec![(entity, instance_builder, culling_data_builder)],
1103
removed: vec![],
1104
};
1105
}
1106
}
1107
}
1108
1109
/// Adds the given entity to the `removed` list, queuing it for removal.
1110
///
1111
/// The `gpu_culling` parameter specifies whether GPU culling is enabled.
1112
fn remove(&mut self, entity: MainEntity, gpu_culling: bool) {
1113
match (&mut *self, gpu_culling) {
1114
(RenderMeshInstanceGpuQueue::None, false) => {
1115
*self = RenderMeshInstanceGpuQueue::CpuCulling {
1116
changed: vec![],
1117
removed: vec![entity],
1118
}
1119
}
1120
(RenderMeshInstanceGpuQueue::None, true) => {
1121
*self = RenderMeshInstanceGpuQueue::GpuCulling {
1122
changed: vec![],
1123
removed: vec![entity],
1124
}
1125
}
1126
(RenderMeshInstanceGpuQueue::CpuCulling { removed, .. }, _)
1127
| (RenderMeshInstanceGpuQueue::GpuCulling { removed, .. }, _) => {
1128
removed.push(entity);
1129
}
1130
}
1131
}
1132
}
1133
1134
impl RenderMeshInstanceGpuBuilder {
1135
/// Prepares the data needed to update the mesh instance.
1136
///
1137
/// This is the thread-safe part of the update.
1138
fn prepare(
1139
mut self,
1140
entity: MainEntity,
1141
mesh_allocator: &MeshAllocator,
1142
mesh_material_ids: &RenderMaterialInstances,
1143
render_material_bindings: &RenderMaterialBindings,
1144
render_lightmaps: &RenderLightmaps,
1145
skin_uniforms: &SkinUniforms,
1146
timestamp: FrameCount,
1147
) -> Option<RenderMeshInstanceGpuPrepared> {
1148
// Look up the material index. If we couldn't fetch the material index,
1149
// then the material hasn't been prepared yet, perhaps because it hasn't
1150
// yet loaded. In that case, we return None so that
1151
// `collect_meshes_for_gpu_building` will add the mesh to
1152
// `meshes_to_reextract_next_frame` and bail.
1153
let mesh_material = mesh_material_ids.mesh_material(entity);
1154
let mesh_material_binding_id = if mesh_material != DUMMY_MESH_MATERIAL.untyped() {
1155
render_material_bindings.get(&mesh_material).copied()?
1156
} else {
1157
// Use a dummy material binding ID.
1158
MaterialBindingId::default()
1159
};
1160
self.shared.material_bindings_index = mesh_material_binding_id;
1161
1162
let (first_vertex_index, vertex_count) =
1163
match mesh_allocator.mesh_vertex_slice(&self.shared.mesh_asset_id) {
1164
Some(mesh_vertex_slice) => (
1165
mesh_vertex_slice.range.start,
1166
mesh_vertex_slice.range.end - mesh_vertex_slice.range.start,
1167
),
1168
None => (0, 0),
1169
};
1170
let (mesh_is_indexed, first_index_index, index_count) =
1171
match mesh_allocator.mesh_index_slice(&self.shared.mesh_asset_id) {
1172
Some(mesh_index_slice) => (
1173
true,
1174
mesh_index_slice.range.start,
1175
mesh_index_slice.range.end - mesh_index_slice.range.start,
1176
),
1177
None => (false, 0, 0),
1178
};
1179
let current_skin_index = match skin_uniforms.skin_byte_offset(entity) {
1180
Some(skin_index) => skin_index.index(),
1181
None => u32::MAX,
1182
};
1183
1184
let lightmap_slot = match render_lightmaps.render_lightmaps.get(&entity) {
1185
Some(render_lightmap) => u16::from(*render_lightmap.slot_index),
1186
None => u16::MAX,
1187
};
1188
let lightmap_slab_index = render_lightmaps
1189
.render_lightmaps
1190
.get(&entity)
1191
.map(|lightmap| lightmap.slab_index);
1192
self.shared.lightmap_slab_index = lightmap_slab_index;
1193
1194
// Create the mesh input uniform.
1195
let mesh_input_uniform = MeshInputUniform {
1196
world_from_local: self.world_from_local.to_transpose(),
1197
lightmap_uv_rect: self.lightmap_uv_rect,
1198
flags: self.mesh_flags.bits(),
1199
previous_input_index: u32::MAX,
1200
timestamp: timestamp.0,
1201
first_vertex_index,
1202
first_index_index,
1203
index_count: if mesh_is_indexed {
1204
index_count
1205
} else {
1206
vertex_count
1207
},
1208
current_skin_index,
1209
material_and_lightmap_bind_group_slot: u32::from(
1210
self.shared.material_bindings_index.slot,
1211
) | ((lightmap_slot as u32) << 16),
1212
tag: self.shared.tag,
1213
pad: 0,
1214
};
1215
1216
let world_from_local = &self.world_from_local;
1217
let center =
1218
world_from_local.matrix3.mul_vec3(self.shared.center) + world_from_local.translation;
1219
1220
Some(RenderMeshInstanceGpuPrepared {
1221
shared: self.shared,
1222
mesh_input_uniform,
1223
center,
1224
})
1225
}
1226
}
1227
1228
pub struct RenderMeshInstanceGpuPrepared {
1229
/// Data shared between the CPU and GPU versions of this mesh instance.
1230
shared: RenderMeshInstanceShared,
1231
/// The data that will be uploaded to the GPU as a [`MeshInputUniform`].
1232
mesh_input_uniform: MeshInputUniform,
1233
/// The world-space center of the mesh instance, used for culling and sorting.
1234
center: Vec3,
1235
}
1236
1237
impl RenderMeshInstanceGpuPrepared {
1238
/// Flushes this mesh instance to the [`RenderMeshInstanceGpu`] and
1239
/// [`MeshInputUniform`] tables, replacing the existing entry if applicable.
1240
fn update(
1241
mut self,
1242
entity: MainEntity,
1243
render_mesh_instances: &mut MainEntityHashMap<RenderMeshInstanceGpu>,
1244
current_input_buffer: &mut InstanceInputUniformBuffer<MeshInputUniform>,
1245
previous_input_buffer: &mut InstanceInputUniformBuffer<MeshInputUniform>,
1246
) -> Option<u32> {
1247
// Did the last frame contain this entity as well?
1248
let current_uniform_index;
1249
match render_mesh_instances.entry(entity) {
1250
Entry::Occupied(mut occupied_entry) => {
1251
// Yes, it did. Replace its entry with the new one.
1252
1253
// Reserve a slot.
1254
current_uniform_index = u32::from(occupied_entry.get_mut().current_uniform_index);
1255
1256
// Save the old mesh input uniform. The mesh preprocessing
1257
// shader will need it to compute motion vectors.
1258
let previous_mesh_input_uniform =
1259
current_input_buffer.get_unchecked(current_uniform_index);
1260
let previous_input_index = previous_input_buffer.add(previous_mesh_input_uniform);
1261
self.mesh_input_uniform.previous_input_index = previous_input_index;
1262
1263
// Write in the new mesh input uniform.
1264
current_input_buffer.set(current_uniform_index, self.mesh_input_uniform);
1265
1266
occupied_entry.replace_entry_with(|_, _| {
1267
Some(RenderMeshInstanceGpu {
1268
shared: self.shared,
1269
center: self.center,
1270
current_uniform_index: NonMaxU32::new(current_uniform_index)
1271
.unwrap_or_default(),
1272
})
1273
});
1274
}
1275
1276
Entry::Vacant(vacant_entry) => {
1277
// No, this is a new entity. Push its data on to the buffer.
1278
current_uniform_index = current_input_buffer.add(self.mesh_input_uniform);
1279
1280
vacant_entry.insert(RenderMeshInstanceGpu {
1281
shared: self.shared,
1282
center: self.center,
1283
current_uniform_index: NonMaxU32::new(current_uniform_index)
1284
.unwrap_or_default(),
1285
});
1286
}
1287
}
1288
1289
Some(current_uniform_index)
1290
}
1291
}
1292
1293
/// Removes a [`MeshInputUniform`] corresponding to an entity that became
1294
/// invisible from the buffer.
1295
fn remove_mesh_input_uniform(
1296
entity: MainEntity,
1297
render_mesh_instances: &mut MainEntityHashMap<RenderMeshInstanceGpu>,
1298
current_input_buffer: &mut InstanceInputUniformBuffer<MeshInputUniform>,
1299
) -> Option<u32> {
1300
// Remove the uniform data.
1301
let removed_render_mesh_instance = render_mesh_instances.remove(&entity)?;
1302
1303
let removed_uniform_index = removed_render_mesh_instance.current_uniform_index.get();
1304
current_input_buffer.remove(removed_uniform_index);
1305
Some(removed_uniform_index)
1306
}
1307
1308
impl MeshCullingData {
1309
/// Returns a new [`MeshCullingData`] initialized with the given AABB.
1310
///
1311
/// If no AABB is provided, an infinitely-large one is conservatively
1312
/// chosen.
1313
fn new(aabb: Option<&Aabb>) -> Self {
1314
match aabb {
1315
Some(aabb) => MeshCullingData {
1316
aabb_center: aabb.center.extend(0.0),
1317
aabb_half_extents: aabb.half_extents.extend(0.0),
1318
},
1319
None => MeshCullingData {
1320
aabb_center: Vec3::ZERO.extend(0.0),
1321
aabb_half_extents: Vec3::INFINITY.extend(0.0),
1322
},
1323
}
1324
}
1325
1326
/// Flushes this mesh instance culling data to the
1327
/// [`MeshCullingDataBuffer`], replacing the existing entry if applicable.
1328
fn update(
1329
&self,
1330
mesh_culling_data_buffer: &mut MeshCullingDataBuffer,
1331
instance_data_index: usize,
1332
) {
1333
while mesh_culling_data_buffer.len() < instance_data_index + 1 {
1334
mesh_culling_data_buffer.push(MeshCullingData::default());
1335
}
1336
mesh_culling_data_buffer.values_mut()[instance_data_index] = *self;
1337
}
1338
}
1339
1340
impl Default for MeshCullingDataBuffer {
1341
#[inline]
1342
fn default() -> Self {
1343
Self(RawBufferVec::new(BufferUsages::STORAGE))
1344
}
1345
}
1346
1347
/// Data that [`crate::material::queue_material_meshes`] and similar systems
1348
/// need in order to place entities that contain meshes in the right batch.
1349
#[derive(Deref)]
1350
pub struct RenderMeshQueueData<'a> {
1351
/// General information about the mesh instance.
1352
#[deref]
1353
pub shared: &'a RenderMeshInstanceShared,
1354
/// The representative position of the mesh instance in world-space.
1355
///
1356
/// This world-space center is used as a spatial proxy for view-dependent
1357
/// operations such as distance computation and render-order sorting.
1358
pub center: Vec3,
1359
/// The index of the [`MeshInputUniform`] in the GPU buffer for this mesh
1360
/// instance.
1361
pub current_uniform_index: InputUniformIndex,
1362
}
1363
1364
/// A [`SystemSet`] that encompasses both [`extract_meshes_for_cpu_building`]
1365
/// and [`extract_meshes_for_gpu_building`].
1366
#[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)]
1367
pub struct MeshExtractionSystems;
1368
1369
/// Extracts meshes from the main world into the render world, populating the
1370
/// [`RenderMeshInstances`].
1371
///
1372
/// This is the variant of the system that runs when we're *not* using GPU
1373
/// [`MeshUniform`] building.
1374
pub fn extract_meshes_for_cpu_building(
1375
mut render_mesh_instances: ResMut<RenderMeshInstances>,
1376
mesh_material_ids: Res<RenderMaterialInstances>,
1377
render_material_bindings: Res<RenderMaterialBindings>,
1378
render_visibility_ranges: Res<RenderVisibilityRanges>,
1379
mut render_mesh_instance_queues: Local<Parallel<Vec<(Entity, RenderMeshInstanceCpu)>>>,
1380
meshes_query: Extract<
1381
Query<(
1382
Entity,
1383
&ViewVisibility,
1384
&GlobalTransform,
1385
Option<&PreviousGlobalTransform>,
1386
&Mesh3d,
1387
Option<&MeshTag>,
1388
Has<NoFrustumCulling>,
1389
Has<NotShadowReceiver>,
1390
Has<TransmittedShadowReceiver>,
1391
Has<NotShadowCaster>,
1392
Has<NoAutomaticBatching>,
1393
Has<VisibilityRange>,
1394
Option<&RenderLayers>,
1395
Option<&Aabb>,
1396
)>,
1397
>,
1398
) {
1399
meshes_query.par_iter().for_each_init(
1400
|| render_mesh_instance_queues.borrow_local_mut(),
1401
|queue,
1402
(
1403
entity,
1404
view_visibility,
1405
transform,
1406
previous_transform,
1407
mesh,
1408
tag,
1409
no_frustum_culling,
1410
not_shadow_receiver,
1411
transmitted_receiver,
1412
not_shadow_caster,
1413
no_automatic_batching,
1414
visibility_range,
1415
render_layers,
1416
aabb,
1417
)| {
1418
if !view_visibility.get() {
1419
return;
1420
}
1421
1422
let mut lod_index = None;
1423
if visibility_range {
1424
lod_index = render_visibility_ranges.lod_index_for_entity(entity.into());
1425
}
1426
1427
let mesh_flags = MeshFlags::from_components(
1428
transform,
1429
lod_index,
1430
no_frustum_culling,
1431
not_shadow_receiver,
1432
transmitted_receiver,
1433
);
1434
1435
let mesh_material = mesh_material_ids.mesh_material(MainEntity::from(entity));
1436
1437
let material_bindings_index = render_material_bindings
1438
.get(&mesh_material)
1439
.copied()
1440
.unwrap_or_default();
1441
1442
let shared = RenderMeshInstanceShared::for_cpu_building(
1443
previous_transform,
1444
mesh,
1445
tag,
1446
material_bindings_index,
1447
not_shadow_caster,
1448
no_automatic_batching,
1449
render_layers,
1450
aabb,
1451
);
1452
1453
let world_from_local = transform.affine();
1454
queue.push((
1455
entity,
1456
RenderMeshInstanceCpu {
1457
transforms: MeshTransforms {
1458
world_from_local: world_from_local.into(),
1459
previous_world_from_local: (previous_transform
1460
.map(|t| t.0)
1461
.unwrap_or(world_from_local))
1462
.into(),
1463
flags: mesh_flags.bits(),
1464
},
1465
shared,
1466
},
1467
));
1468
},
1469
);
1470
1471
// Collect the render mesh instances.
1472
let RenderMeshInstances::CpuBuilding(ref mut render_mesh_instances) = *render_mesh_instances
1473
else {
1474
panic!(
1475
"`extract_meshes_for_cpu_building` should only be called if we're using CPU \
1476
`MeshUniform` building"
1477
);
1478
};
1479
1480
render_mesh_instances.clear();
1481
for queue in render_mesh_instance_queues.iter_mut() {
1482
for (entity, render_mesh_instance) in queue.drain(..) {
1483
render_mesh_instances.insert(entity.into(), render_mesh_instance);
1484
}
1485
}
1486
}
1487
1488
/// All the data that we need from a mesh in the main world.
1489
type GpuMeshExtractionQuery = (
1490
Entity,
1491
Read<ViewVisibility>,
1492
Read<GlobalTransform>,
1493
Option<Read<PreviousGlobalTransform>>,
1494
Option<Read<Lightmap>>,
1495
Option<Read<Aabb>>,
1496
Read<Mesh3d>,
1497
Option<Read<MeshTag>>,
1498
Has<NoFrustumCulling>,
1499
Has<NotShadowReceiver>,
1500
Has<TransmittedShadowReceiver>,
1501
Has<NotShadowCaster>,
1502
Has<NoAutomaticBatching>,
1503
Has<VisibilityRange>,
1504
Option<Read<RenderLayers>>,
1505
);
1506
1507
/// Extracts meshes from the main world into the render world and queues
1508
/// [`MeshInputUniform`]s to be uploaded to the GPU.
1509
///
1510
/// This is optimized to only look at entities that have changed since the last
1511
/// frame.
1512
///
1513
/// This is the variant of the system that runs when we're using GPU
1514
/// [`MeshUniform`] building.
1515
pub fn extract_meshes_for_gpu_building(
1516
mut render_mesh_instances: ResMut<RenderMeshInstances>,
1517
render_visibility_ranges: Res<RenderVisibilityRanges>,
1518
mut render_mesh_instance_queues: ResMut<RenderMeshInstanceGpuQueues>,
1519
changed_meshes_query: Extract<
1520
Query<
1521
GpuMeshExtractionQuery,
1522
Or<(
1523
Changed<ViewVisibility>,
1524
Changed<GlobalTransform>,
1525
Changed<PreviousGlobalTransform>,
1526
Changed<Lightmap>,
1527
Changed<Aabb>,
1528
Changed<Mesh3d>,
1529
Changed<MeshTag>,
1530
Changed<NoFrustumCulling>,
1531
Changed<NotShadowReceiver>,
1532
Changed<TransmittedShadowReceiver>,
1533
Changed<NotShadowCaster>,
1534
Changed<NoAutomaticBatching>,
1535
Changed<VisibilityRange>,
1536
Changed<SkinnedMesh>,
1537
)>,
1538
>,
1539
>,
1540
(
1541
mut removed_previous_global_transform_query,
1542
mut removed_lightmap_query,
1543
mut removed_aabb_query,
1544
mut removed_mesh_tag_query,
1545
mut removed_no_frustum_culling_query,
1546
mut removed_not_shadow_receiver_query,
1547
mut removed_transmitted_receiver_query,
1548
mut removed_not_shadow_caster_query,
1549
mut removed_no_automatic_batching_query,
1550
mut removed_visibility_range_query,
1551
mut removed_skinned_mesh_query,
1552
): (
1553
Extract<RemovedComponents<PreviousGlobalTransform>>,
1554
Extract<RemovedComponents<Lightmap>>,
1555
Extract<RemovedComponents<Aabb>>,
1556
Extract<RemovedComponents<MeshTag>>,
1557
Extract<RemovedComponents<NoFrustumCulling>>,
1558
Extract<RemovedComponents<NotShadowReceiver>>,
1559
Extract<RemovedComponents<TransmittedShadowReceiver>>,
1560
Extract<RemovedComponents<NotShadowCaster>>,
1561
Extract<RemovedComponents<NoAutomaticBatching>>,
1562
Extract<RemovedComponents<VisibilityRange>>,
1563
Extract<RemovedComponents<SkinnedMesh>>,
1564
),
1565
all_meshes_query: Extract<Query<GpuMeshExtractionQuery>>,
1566
mut removed_meshes_query: Extract<RemovedComponents<Mesh3d>>,
1567
gpu_culling_query: Extract<Query<(), (With<Camera>, Without<NoIndirectDrawing>)>>,
1568
meshes_to_reextract_next_frame: ResMut<MeshesToReextractNextFrame>,
1569
mut reextract_entities: Local<EntityHashSet>,
1570
) {
1571
reextract_entities.clear();
1572
1573
let any_gpu_culling = !gpu_culling_query.is_empty();
1574
1575
for render_mesh_instance_queue in render_mesh_instance_queues.iter_mut() {
1576
render_mesh_instance_queue.init(any_gpu_culling);
1577
}
1578
1579
// Collect render mesh instances. Build up the uniform buffer.
1580
1581
let RenderMeshInstances::GpuBuilding(ref mut render_mesh_instances) = *render_mesh_instances
1582
else {
1583
panic!(
1584
"`extract_meshes_for_gpu_building` should only be called if we're \
1585
using GPU `MeshUniform` building"
1586
);
1587
};
1588
1589
// Find all meshes that have changed, and record information needed to
1590
// construct the `MeshInputUniform` for them.
1591
changed_meshes_query.par_iter().for_each_init(
1592
|| render_mesh_instance_queues.borrow_local_mut(),
1593
|queue, query_row| {
1594
extract_mesh_for_gpu_building(
1595
query_row,
1596
&render_visibility_ranges,
1597
render_mesh_instances,
1598
queue,
1599
any_gpu_culling,
1600
);
1601
},
1602
);
1603
1604
// Process materials that `collect_meshes_for_gpu_building` marked as
1605
// needing to be reextracted. This will happen when we extracted a mesh on
1606
// some previous frame, but its material hadn't been prepared yet, perhaps
1607
// because the material hadn't yet been loaded. We reextract such materials
1608
// on subsequent frames so that `collect_meshes_for_gpu_building` will check
1609
// to see if their materials have been prepared.
1610
let iters = meshes_to_reextract_next_frame
1611
.iter()
1612
.map(|&e| *e)
1613
.chain(removed_previous_global_transform_query.read())
1614
.chain(removed_lightmap_query.read())
1615
.chain(removed_aabb_query.read())
1616
.chain(removed_mesh_tag_query.read())
1617
.chain(removed_no_frustum_culling_query.read())
1618
.chain(removed_not_shadow_receiver_query.read())
1619
.chain(removed_transmitted_receiver_query.read())
1620
.chain(removed_not_shadow_caster_query.read())
1621
.chain(removed_no_automatic_batching_query.read())
1622
.chain(removed_visibility_range_query.read())
1623
.chain(removed_skinned_mesh_query.read());
1624
1625
reextract_entities.extend_from_iter(iters);
1626
1627
let mut queue = render_mesh_instance_queues.borrow_local_mut();
1628
for entity in &reextract_entities {
1629
if let Ok(query_row) = all_meshes_query.get(*entity) {
1630
extract_mesh_for_gpu_building(
1631
query_row,
1632
&render_visibility_ranges,
1633
render_mesh_instances,
1634
&mut queue,
1635
any_gpu_culling,
1636
);
1637
}
1638
}
1639
1640
// Also record info about each mesh that became invisible.
1641
for entity in removed_meshes_query.read() {
1642
// Only queue a mesh for removal if we didn't pick it up above.
1643
// It's possible that a necessary component was removed and re-added in
1644
// the same frame.
1645
let entity = MainEntity::from(entity);
1646
if !changed_meshes_query.contains(*entity)
1647
&& !meshes_to_reextract_next_frame.contains(&entity)
1648
{
1649
queue.remove(entity, any_gpu_culling);
1650
}
1651
}
1652
}
1653
1654
fn extract_mesh_for_gpu_building(
1655
(
1656
entity,
1657
view_visibility,
1658
transform,
1659
previous_transform,
1660
lightmap,
1661
aabb,
1662
mesh,
1663
tag,
1664
no_frustum_culling,
1665
not_shadow_receiver,
1666
transmitted_receiver,
1667
not_shadow_caster,
1668
no_automatic_batching,
1669
visibility_range,
1670
render_layers,
1671
): <GpuMeshExtractionQuery as QueryData>::Item<'_, '_>,
1672
render_visibility_ranges: &RenderVisibilityRanges,
1673
render_mesh_instances: &RenderMeshInstancesGpu,
1674
queue: &mut RenderMeshInstanceGpuQueue,
1675
any_gpu_culling: bool,
1676
) {
1677
if !view_visibility.get() {
1678
queue.remove(entity.into(), any_gpu_culling);
1679
return;
1680
}
1681
1682
let mut lod_index = None;
1683
if visibility_range {
1684
lod_index = render_visibility_ranges.lod_index_for_entity(entity.into());
1685
}
1686
1687
let mesh_flags = MeshFlags::from_components(
1688
transform,
1689
lod_index,
1690
no_frustum_culling,
1691
not_shadow_receiver,
1692
transmitted_receiver,
1693
);
1694
1695
let shared = RenderMeshInstanceShared::for_gpu_building(
1696
previous_transform,
1697
mesh,
1698
tag,
1699
not_shadow_caster,
1700
no_automatic_batching,
1701
render_layers,
1702
aabb,
1703
);
1704
1705
let lightmap_uv_rect = pack_lightmap_uv_rect(lightmap.map(|lightmap| lightmap.uv_rect));
1706
1707
let gpu_mesh_culling_data = any_gpu_culling.then(|| MeshCullingData::new(aabb));
1708
1709
let previous_input_index = if shared
1710
.flags
1711
.contains(RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM)
1712
{
1713
render_mesh_instances
1714
.get(&MainEntity::from(entity))
1715
.map(|render_mesh_instance| render_mesh_instance.current_uniform_index)
1716
} else {
1717
None
1718
};
1719
1720
let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder {
1721
shared,
1722
world_from_local: (transform.affine()).into(),
1723
lightmap_uv_rect,
1724
mesh_flags,
1725
previous_input_index,
1726
};
1727
1728
queue.push(
1729
entity.into(),
1730
gpu_mesh_instance_builder,
1731
gpu_mesh_culling_data,
1732
);
1733
}
1734
1735
/// A system that sets the [`RenderMeshInstanceFlags`] for each mesh based on
1736
/// whether the previous frame had skins and/or morph targets.
1737
///
1738
/// Ordinarily, [`RenderMeshInstanceFlags`] are set during the extraction phase.
1739
/// However, we can't do that for the flags related to skins and morph targets
1740
/// because the previous frame's skin and morph targets are the responsibility
1741
/// of [`extract_skins`] and [`extract_morphs`] respectively. We want to run
1742
/// those systems in parallel with mesh extraction for performance, so we need
1743
/// to defer setting of these mesh instance flags to after extraction, which
1744
/// this system does. An alternative to having skin- and morph-target-related
1745
/// data in [`RenderMeshInstanceFlags`] would be to have
1746
/// [`crate::material::queue_material_meshes`] check the skin and morph target
1747
/// tables for each mesh, but that would be too slow in the hot mesh queuing
1748
/// loop.
1749
pub fn set_mesh_motion_vector_flags(
1750
mut render_mesh_instances: ResMut<RenderMeshInstances>,
1751
skin_uniforms: Res<SkinUniforms>,
1752
morph_indices: Res<MorphIndices>,
1753
) {
1754
for &entity in skin_uniforms.all_skins() {
1755
render_mesh_instances
1756
.insert_mesh_instance_flags(entity, RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN);
1757
}
1758
for &entity in morph_indices.prev.keys() {
1759
render_mesh_instances
1760
.insert_mesh_instance_flags(entity, RenderMeshInstanceFlags::HAS_PREVIOUS_MORPH);
1761
}
1762
}
1763
1764
#[derive(Default)]
1765
pub struct GpuMeshBuildingChunks {
1766
prepared: BufferedChannel<(
1767
MainEntity,
1768
RenderMeshInstanceGpuPrepared,
1769
Option<MeshCullingData>,
1770
)>,
1771
reextract: BufferedChannel<MainEntity>,
1772
removed: BufferedChannel<MainEntity>,
1773
}
1774
1775
/// Creates the [`RenderMeshInstanceGpu`]s and [`MeshInputUniform`]s when GPU
1776
pub fn collect_meshes_for_gpu_building(
1777
render_mesh_instances: ResMut<RenderMeshInstances>,
1778
batched_instance_buffers: ResMut<
1779
gpu_preprocessing::BatchedInstanceBuffers<MeshUniform, MeshInputUniform>,
1780
>,
1781
mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
1782
mut render_mesh_instance_queues: ResMut<RenderMeshInstanceGpuQueues>,
1783
mesh_allocator: Res<MeshAllocator>,
1784
mesh_material_ids: Res<RenderMaterialInstances>,
1785
render_material_bindings: Res<RenderMaterialBindings>,
1786
render_lightmaps: Res<RenderLightmaps>,
1787
skin_uniforms: Res<SkinUniforms>,
1788
frame_count: Res<FrameCount>,
1789
mut meshes_to_reextract_next_frame: ResMut<MeshesToReextractNextFrame>,
1790
chunks: Local<GpuMeshBuildingChunks>,
1791
) {
1792
let RenderMeshInstances::GpuBuilding(render_mesh_instances) =
1793
render_mesh_instances.into_inner()
1794
else {
1795
return;
1796
};
1797
1798
// We're going to rebuild `meshes_to_reextract_next_frame`.
1799
meshes_to_reextract_next_frame.clear();
1800
1801
// Collect render mesh instances. Build up the uniform buffer.
1802
let gpu_preprocessing::BatchedInstanceBuffers {
1803
current_input_buffer,
1804
previous_input_buffer,
1805
..
1806
} = batched_instance_buffers.into_inner();
1807
previous_input_buffer.clear();
1808
1809
// Channels used by parallel workers to send data to the single consumer.
1810
let (prepared_rx, prepared_tx) = chunks.prepared.unbounded();
1811
let (reextract_rx, reextract_tx) = chunks.reextract.unbounded();
1812
let (removed_rx, removed_tx) = chunks.removed.unbounded();
1813
1814
// Reference data shared between tasks
1815
let mesh_allocator = &mesh_allocator;
1816
let mesh_material_ids = &mesh_material_ids;
1817
let render_material_bindings = &render_material_bindings;
1818
let render_lightmaps = &render_lightmaps;
1819
let skin_uniforms = &skin_uniforms;
1820
let frame_count = *frame_count;
1821
1822
// Spawn workers on the taskpool to prepare and update meshes in parallel.
1823
ComputeTaskPool::get().scope(|scope| {
1824
// This worker is the bottleneck of mesh preparation and can only run serially, so we want
1825
// it to start working immediately. As soon as the parallel workers produce chunks of
1826
// prepared meshes, this worker will consume them and update the GPU buffers.
1827
scope.spawn(
1828
async move {
1829
while let Ok(mut batch) = prepared_rx.recv().await {
1830
for (entity, prepared, mesh_culling_builder) in batch.drain() {
1831
let Some(instance_data_index) = prepared.update(
1832
entity,
1833
&mut *render_mesh_instances,
1834
current_input_buffer,
1835
previous_input_buffer,
1836
) else {
1837
continue;
1838
};
1839
if let Some(mesh_culling_data) = mesh_culling_builder {
1840
mesh_culling_data.update(
1841
&mut mesh_culling_data_buffer,
1842
instance_data_index as usize,
1843
);
1844
}
1845
}
1846
}
1847
while let Ok(mut batch) = removed_rx.recv().await {
1848
for entity in batch.drain() {
1849
remove_mesh_input_uniform(
1850
entity,
1851
&mut *render_mesh_instances,
1852
current_input_buffer,
1853
);
1854
}
1855
}
1856
while let Ok(mut batch) = reextract_rx.recv().await {
1857
for entity in batch.drain() {
1858
meshes_to_reextract_next_frame.insert(entity);
1859
}
1860
}
1861
// Buffers can't be empty. Make sure there's something in the previous input buffer.
1862
previous_input_buffer.ensure_nonempty();
1863
}
1864
.instrument(info_span!("collect_meshes_consumer")),
1865
);
1866
1867
// Iterate through each queue, spawning a task for each queue. This loop completes quickly
1868
// as it does very little work, it is just spawning and moving data into tasks in a loop.
1869
for queue in render_mesh_instance_queues.iter_mut() {
1870
match *queue {
1871
RenderMeshInstanceGpuQueue::None => {
1872
// This can only happen if the queue is empty.
1873
}
1874
1875
RenderMeshInstanceGpuQueue::CpuCulling {
1876
ref mut changed,
1877
ref mut removed,
1878
} => {
1879
let mut prepared_tx = prepared_tx.clone();
1880
let mut reextract_tx = reextract_tx.clone();
1881
let mut removed_tx = removed_tx.clone();
1882
scope.spawn(async move {
1883
let _span = info_span!("prepared_mesh_producer").entered();
1884
changed
1885
.drain(..)
1886
.for_each(
1887
|(entity, mesh_instance_builder)| match mesh_instance_builder
1888
.prepare(
1889
entity,
1890
mesh_allocator,
1891
mesh_material_ids,
1892
render_material_bindings,
1893
render_lightmaps,
1894
skin_uniforms,
1895
frame_count,
1896
) {
1897
Some(prepared) => {
1898
prepared_tx.send_blocking((entity, prepared, None)).ok();
1899
}
1900
None => {
1901
reextract_tx.send_blocking(entity).ok();
1902
}
1903
},
1904
);
1905
1906
for entity in removed.drain(..) {
1907
removed_tx.send_blocking(entity).unwrap();
1908
}
1909
});
1910
}
1911
1912
RenderMeshInstanceGpuQueue::GpuCulling {
1913
ref mut changed,
1914
ref mut removed,
1915
} => {
1916
let mut prepared_tx = prepared_tx.clone();
1917
let mut reextract_tx = reextract_tx.clone();
1918
let mut removed_tx = removed_tx.clone();
1919
scope.spawn(async move {
1920
let _span = info_span!("prepared_mesh_producer").entered();
1921
changed.drain(..).for_each(
1922
|(entity, mesh_instance_builder, mesh_culling_builder)| {
1923
match mesh_instance_builder.prepare(
1924
entity,
1925
mesh_allocator,
1926
mesh_material_ids,
1927
render_material_bindings,
1928
render_lightmaps,
1929
skin_uniforms,
1930
frame_count,
1931
) {
1932
Some(prepared) => {
1933
let data = (entity, prepared, Some(mesh_culling_builder));
1934
prepared_tx.send_blocking(data).ok();
1935
}
1936
None => {
1937
reextract_tx.send_blocking(entity).ok();
1938
}
1939
}
1940
},
1941
);
1942
1943
for entity in removed.drain(..) {
1944
removed_tx.send_blocking(entity).unwrap();
1945
}
1946
});
1947
}
1948
}
1949
}
1950
1951
// Drop the senders owned by the scope, so the only senders left are those captured by the
1952
// spawned tasks. When the tasks are complete, the channels will close, and the consumer
1953
// will finish. Without this, the scope would deadlock on the blocked consumer.
1954
drop(prepared_tx);
1955
drop(reextract_tx);
1956
drop(removed_tx);
1957
});
1958
}
1959
1960
/// All data needed to construct a pipeline for rendering 3D meshes.
1961
#[derive(Resource, Clone)]
1962
pub struct MeshPipeline {
1963
/// A reference to all the mesh pipeline view layouts.
1964
pub view_layouts: MeshPipelineViewLayouts,
1965
pub clustered_forward_buffer_binding_type: BufferBindingType,
1966
pub mesh_layouts: MeshLayouts,
1967
/// The shader asset handle.
1968
pub shader: Handle<Shader>,
1969
/// `MeshUniform`s are stored in arrays in buffers. If storage buffers are available, they
1970
/// are used and this will be `None`, otherwise uniform buffers will be used with batches
1971
/// of this many `MeshUniform`s, stored at dynamic offsets within the uniform buffer.
1972
/// Use code like this in custom shaders:
1973
/// ```wgsl
1974
/// ##ifdef PER_OBJECT_BUFFER_BATCH_SIZE
1975
/// @group(1) @binding(0) var<uniform> mesh: array<Mesh, #{PER_OBJECT_BUFFER_BATCH_SIZE}u>;
1976
/// ##else
1977
/// @group(1) @binding(0) var<storage> mesh: array<Mesh>;
1978
/// ##endif // PER_OBJECT_BUFFER_BATCH_SIZE
1979
/// ```
1980
pub per_object_buffer_batch_size: Option<u32>,
1981
1982
/// Whether binding arrays (a.k.a. bindless textures) are usable on the
1983
/// current render device.
1984
///
1985
/// This affects whether reflection probes can be used.
1986
pub binding_arrays_are_usable: bool,
1987
1988
/// Whether clustered decals are usable on the current render device.
1989
pub clustered_decals_are_usable: bool,
1990
1991
/// Whether skins will use uniform buffers on account of storage buffers
1992
/// being unavailable on this platform.
1993
pub skins_use_uniform_buffers: bool,
1994
}
1995
1996
impl FromWorld for MeshPipeline {
1997
fn from_world(world: &mut World) -> Self {
1998
let shader = load_embedded_asset!(world, "mesh.wgsl");
1999
let mut system_state: SystemState<(
2000
Res<RenderDevice>,
2001
Res<RenderAdapter>,
2002
Res<MeshPipelineViewLayouts>,
2003
)> = SystemState::new(world);
2004
let (render_device, render_adapter, view_layouts) = system_state.get_mut(world);
2005
2006
let clustered_forward_buffer_binding_type = render_device
2007
.get_supported_read_only_binding_type(CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT);
2008
2009
MeshPipeline {
2010
view_layouts: view_layouts.clone(),
2011
clustered_forward_buffer_binding_type,
2012
mesh_layouts: MeshLayouts::new(&render_device, &render_adapter),
2013
shader,
2014
per_object_buffer_batch_size: GpuArrayBuffer::<MeshUniform>::batch_size(
2015
&render_device.limits(),
2016
),
2017
binding_arrays_are_usable: binding_arrays_are_usable(&render_device, &render_adapter),
2018
clustered_decals_are_usable: decal::clustered::clustered_decals_are_usable(
2019
&render_device,
2020
&render_adapter,
2021
),
2022
skins_use_uniform_buffers: skins_use_uniform_buffers(&render_device.limits()),
2023
}
2024
}
2025
}
2026
2027
impl MeshPipeline {
2028
pub fn get_view_layout(
2029
&self,
2030
layout_key: MeshPipelineViewLayoutKey,
2031
) -> &MeshPipelineViewLayout {
2032
self.view_layouts.get_view_layout(layout_key)
2033
}
2034
}
2035
2036
/// A 1x1x1 'all 1.0' texture to use as a dummy texture in place of optional [`crate::pbr_material::StandardMaterial`] textures
2037
pub fn build_dummy_white_gpu_image(
2038
render_device: Res<RenderDevice>,
2039
default_sampler: Res<DefaultImageSampler>,
2040
render_queue: Res<RenderQueue>,
2041
) -> GpuImage {
2042
let image = Image::default();
2043
let texture = render_device.create_texture(&image.texture_descriptor);
2044
let sampler = match image.sampler {
2045
ImageSampler::Default => (**default_sampler).clone(),
2046
ImageSampler::Descriptor(ref descriptor) => {
2047
render_device.create_sampler(&descriptor.as_wgpu())
2048
}
2049
};
2050
2051
if let Ok(format_size) = image.texture_descriptor.format.pixel_size() {
2052
render_queue.write_texture(
2053
texture.as_image_copy(),
2054
image.data.as_ref().expect("Image was created without data"),
2055
TexelCopyBufferLayout {
2056
offset: 0,
2057
bytes_per_row: Some(image.width() * format_size as u32),
2058
rows_per_image: None,
2059
},
2060
image.texture_descriptor.size,
2061
);
2062
}
2063
2064
let texture_view = texture.create_view(&TextureViewDescriptor::default());
2065
GpuImage {
2066
texture,
2067
texture_view,
2068
sampler,
2069
texture_descriptor: image.texture_descriptor,
2070
texture_view_descriptor: image.texture_view_descriptor,
2071
had_data: true,
2072
}
2073
}
2074
2075
pub fn get_image_texture<'a>(
2076
dummy_white_gpu_image: &'a GpuImage,
2077
gpu_images: &'a RenderAssets<GpuImage>,
2078
handle_option: &Option<Handle<Image>>,
2079
) -> Option<(&'a TextureView, &'a Sampler)> {
2080
if let Some(handle) = handle_option {
2081
let gpu_image = gpu_images.get(handle)?;
2082
Some((&gpu_image.texture_view, &gpu_image.sampler))
2083
} else {
2084
Some((
2085
&dummy_white_gpu_image.texture_view,
2086
&dummy_white_gpu_image.sampler,
2087
))
2088
}
2089
}
2090
2091
impl GetBatchData for MeshPipeline {
2092
type Param = (
2093
SRes<RenderMeshInstances>,
2094
SRes<RenderLightmaps>,
2095
SRes<RenderAssets<RenderMesh>>,
2096
SRes<MeshAllocator>,
2097
SRes<SkinUniforms>,
2098
);
2099
// The material bind group ID, the mesh ID, and the lightmap ID,
2100
// respectively.
2101
type CompareData = (
2102
MaterialBindGroupIndex,
2103
AssetId<Mesh>,
2104
Option<LightmapSlabIndex>,
2105
);
2106
2107
type BufferData = MeshUniform;
2108
2109
fn get_batch_data(
2110
(mesh_instances, lightmaps, _, mesh_allocator, skin_uniforms): &SystemParamItem<
2111
Self::Param,
2112
>,
2113
(_entity, main_entity): (Entity, MainEntity),
2114
) -> Option<(Self::BufferData, Option<Self::CompareData>)> {
2115
let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else {
2116
error!(
2117
"`get_batch_data` should never be called in GPU mesh uniform \
2118
building mode"
2119
);
2120
return None;
2121
};
2122
let mesh_instance = mesh_instances.get(&main_entity)?;
2123
let first_vertex_index =
2124
match mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id) {
2125
Some(mesh_vertex_slice) => mesh_vertex_slice.range.start,
2126
None => 0,
2127
};
2128
let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity);
2129
2130
let current_skin_index = skin_uniforms.skin_index(main_entity);
2131
let material_bind_group_index = mesh_instance.material_bindings_index;
2132
2133
Some((
2134
MeshUniform::new(
2135
&mesh_instance.transforms,
2136
first_vertex_index,
2137
material_bind_group_index.slot,
2138
maybe_lightmap.map(|lightmap| (lightmap.slot_index, lightmap.uv_rect)),
2139
current_skin_index,
2140
Some(mesh_instance.tag),
2141
),
2142
mesh_instance.should_batch().then_some((
2143
material_bind_group_index.group,
2144
mesh_instance.mesh_asset_id,
2145
maybe_lightmap.map(|lightmap| lightmap.slab_index),
2146
)),
2147
))
2148
}
2149
}
2150
2151
impl GetFullBatchData for MeshPipeline {
2152
type BufferInputData = MeshInputUniform;
2153
2154
fn get_index_and_compare_data(
2155
(mesh_instances, lightmaps, _, _, _): &SystemParamItem<Self::Param>,
2156
main_entity: MainEntity,
2157
) -> Option<(NonMaxU32, Option<Self::CompareData>)> {
2158
// This should only be called during GPU building.
2159
let RenderMeshInstances::GpuBuilding(ref mesh_instances) = **mesh_instances else {
2160
error!(
2161
"`get_index_and_compare_data` should never be called in CPU mesh uniform building \
2162
mode"
2163
);
2164
return None;
2165
};
2166
2167
let mesh_instance = mesh_instances.get(&main_entity)?;
2168
let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity);
2169
2170
Some((
2171
mesh_instance.current_uniform_index,
2172
mesh_instance.should_batch().then_some((
2173
mesh_instance.material_bindings_index.group,
2174
mesh_instance.mesh_asset_id,
2175
maybe_lightmap.map(|lightmap| lightmap.slab_index),
2176
)),
2177
))
2178
}
2179
2180
fn get_binned_batch_data(
2181
(mesh_instances, lightmaps, _, mesh_allocator, skin_uniforms): &SystemParamItem<
2182
Self::Param,
2183
>,
2184
main_entity: MainEntity,
2185
) -> Option<Self::BufferData> {
2186
let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else {
2187
error!(
2188
"`get_binned_batch_data` should never be called in GPU mesh uniform building mode"
2189
);
2190
return None;
2191
};
2192
let mesh_instance = mesh_instances.get(&main_entity)?;
2193
let first_vertex_index =
2194
match mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id) {
2195
Some(mesh_vertex_slice) => mesh_vertex_slice.range.start,
2196
None => 0,
2197
};
2198
let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity);
2199
2200
let current_skin_index = skin_uniforms.skin_index(main_entity);
2201
2202
Some(MeshUniform::new(
2203
&mesh_instance.transforms,
2204
first_vertex_index,
2205
mesh_instance.material_bindings_index.slot,
2206
maybe_lightmap.map(|lightmap| (lightmap.slot_index, lightmap.uv_rect)),
2207
current_skin_index,
2208
Some(mesh_instance.tag),
2209
))
2210
}
2211
2212
fn get_binned_index(
2213
(mesh_instances, _, _, _, _): &SystemParamItem<Self::Param>,
2214
main_entity: MainEntity,
2215
) -> Option<NonMaxU32> {
2216
// This should only be called during GPU building.
2217
let RenderMeshInstances::GpuBuilding(ref mesh_instances) = **mesh_instances else {
2218
error!(
2219
"`get_binned_index` should never be called in CPU mesh uniform \
2220
building mode"
2221
);
2222
return None;
2223
};
2224
2225
mesh_instances
2226
.get(&main_entity)
2227
.map(|entity| entity.current_uniform_index)
2228
}
2229
2230
fn write_batch_indirect_parameters_metadata(
2231
indexed: bool,
2232
base_output_index: u32,
2233
batch_set_index: Option<NonMaxU32>,
2234
phase_indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers,
2235
indirect_parameters_offset: u32,
2236
) {
2237
let indirect_parameters = IndirectParametersCpuMetadata {
2238
base_output_index,
2239
batch_set_index: match batch_set_index {
2240
Some(batch_set_index) => u32::from(batch_set_index),
2241
None => !0,
2242
},
2243
};
2244
2245
if indexed {
2246
phase_indirect_parameters_buffers
2247
.indexed
2248
.set(indirect_parameters_offset, indirect_parameters);
2249
} else {
2250
phase_indirect_parameters_buffers
2251
.non_indexed
2252
.set(indirect_parameters_offset, indirect_parameters);
2253
}
2254
}
2255
}
2256
2257
bitflags::bitflags! {
2258
#[derive(Default, Clone, Copy, Debug, PartialEq, Eq, Hash)]
2259
#[repr(transparent)]
2260
// NOTE: Apparently quadro drivers support up to 64x MSAA.
2261
/// MSAA uses the highest 3 bits for the MSAA log2(sample count) to support up to 128x MSAA.
2262
pub struct MeshPipelineKey: u64 {
2263
// Nothing
2264
const NONE = 0;
2265
2266
// Inherited bits
2267
const MORPH_TARGETS = BaseMeshPipelineKey::MORPH_TARGETS.bits();
2268
2269
// Flag bits
2270
const HDR = 1 << 0;
2271
const TONEMAP_IN_SHADER = 1 << 1;
2272
const DEBAND_DITHER = 1 << 2;
2273
const DEPTH_PREPASS = 1 << 3;
2274
const NORMAL_PREPASS = 1 << 4;
2275
const DEFERRED_PREPASS = 1 << 5;
2276
const MOTION_VECTOR_PREPASS = 1 << 6;
2277
const MAY_DISCARD = 1 << 7; // Guards shader codepaths that may discard, allowing early depth tests in most cases
2278
// See: https://www.khronos.org/opengl/wiki/Early_Fragment_Test
2279
const ENVIRONMENT_MAP = 1 << 8;
2280
const SCREEN_SPACE_AMBIENT_OCCLUSION = 1 << 9;
2281
const UNCLIPPED_DEPTH_ORTHO = 1 << 10; // Disables depth clipping for use with directional light shadow views
2282
// Emulated via fragment shader depth on hardware that doesn't support it natively
2283
// See: https://www.w3.org/TR/webgpu/#depth-clipping and https://therealmjp.github.io/posts/shadow-maps/#disabling-z-clipping
2284
const TEMPORAL_JITTER = 1 << 11;
2285
const READS_VIEW_TRANSMISSION_TEXTURE = 1 << 12;
2286
const LIGHTMAPPED = 1 << 13;
2287
const LIGHTMAP_BICUBIC_SAMPLING = 1 << 14;
2288
const IRRADIANCE_VOLUME = 1 << 15;
2289
const VISIBILITY_RANGE_DITHER = 1 << 16;
2290
const SCREEN_SPACE_REFLECTIONS = 1 << 17;
2291
const HAS_PREVIOUS_SKIN = 1 << 18;
2292
const HAS_PREVIOUS_MORPH = 1 << 19;
2293
const OIT_ENABLED = 1 << 20;
2294
const DISTANCE_FOG = 1 << 21;
2295
const ATMOSPHERE = 1 << 22;
2296
const INVERT_CULLING = 1 << 23;
2297
const PREPASS_READS_MATERIAL = 1 << 24;
2298
const LAST_FLAG = Self::PREPASS_READS_MATERIAL.bits();
2299
2300
const ALL_PREPASS_BITS = Self::DEPTH_PREPASS.bits()
2301
| Self::NORMAL_PREPASS.bits()
2302
| Self::DEFERRED_PREPASS.bits()
2303
| Self::MOTION_VECTOR_PREPASS.bits()
2304
| Self::MAY_DISCARD.bits()
2305
| Self::PREPASS_READS_MATERIAL.bits();
2306
2307
// Bitfields
2308
const MSAA_RESERVED_BITS = Self::MSAA_MASK_BITS << Self::MSAA_SHIFT_BITS;
2309
const BLEND_RESERVED_BITS = Self::BLEND_MASK_BITS << Self::BLEND_SHIFT_BITS; // ← Bitmask reserving bits for the blend state
2310
const BLEND_OPAQUE = 0 << Self::BLEND_SHIFT_BITS; // ← Values are just sequential within the mask
2311
const BLEND_PREMULTIPLIED_ALPHA = 1 << Self::BLEND_SHIFT_BITS; // ← As blend states is on 3 bits, it can range from 0 to 7
2312
const BLEND_MULTIPLY = 2 << Self::BLEND_SHIFT_BITS; // ← See `BLEND_MASK_BITS` for the number of bits available
2313
const BLEND_ALPHA = 3 << Self::BLEND_SHIFT_BITS; //
2314
const BLEND_ALPHA_TO_COVERAGE = 4 << Self::BLEND_SHIFT_BITS; // ← We still have room for three more values without adding more bits
2315
const TONEMAP_METHOD_RESERVED_BITS = Self::TONEMAP_METHOD_MASK_BITS << Self::TONEMAP_METHOD_SHIFT_BITS;
2316
const TONEMAP_METHOD_NONE = 0 << Self::TONEMAP_METHOD_SHIFT_BITS;
2317
const TONEMAP_METHOD_REINHARD = 1 << Self::TONEMAP_METHOD_SHIFT_BITS;
2318
const TONEMAP_METHOD_REINHARD_LUMINANCE = 2 << Self::TONEMAP_METHOD_SHIFT_BITS;
2319
const TONEMAP_METHOD_ACES_FITTED = 3 << Self::TONEMAP_METHOD_SHIFT_BITS;
2320
const TONEMAP_METHOD_AGX = 4 << Self::TONEMAP_METHOD_SHIFT_BITS;
2321
const TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM = 5 << Self::TONEMAP_METHOD_SHIFT_BITS;
2322
const TONEMAP_METHOD_TONY_MC_MAPFACE = 6 << Self::TONEMAP_METHOD_SHIFT_BITS;
2323
const TONEMAP_METHOD_BLENDER_FILMIC = 7 << Self::TONEMAP_METHOD_SHIFT_BITS;
2324
const SHADOW_FILTER_METHOD_RESERVED_BITS = Self::SHADOW_FILTER_METHOD_MASK_BITS << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
2325
const SHADOW_FILTER_METHOD_HARDWARE_2X2 = 0 << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
2326
const SHADOW_FILTER_METHOD_GAUSSIAN = 1 << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
2327
const SHADOW_FILTER_METHOD_TEMPORAL = 2 << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
2328
const VIEW_PROJECTION_RESERVED_BITS = Self::VIEW_PROJECTION_MASK_BITS << Self::VIEW_PROJECTION_SHIFT_BITS;
2329
const VIEW_PROJECTION_NONSTANDARD = 0 << Self::VIEW_PROJECTION_SHIFT_BITS;
2330
const VIEW_PROJECTION_PERSPECTIVE = 1 << Self::VIEW_PROJECTION_SHIFT_BITS;
2331
const VIEW_PROJECTION_ORTHOGRAPHIC = 2 << Self::VIEW_PROJECTION_SHIFT_BITS;
2332
const VIEW_PROJECTION_RESERVED = 3 << Self::VIEW_PROJECTION_SHIFT_BITS;
2333
const SCREEN_SPACE_SPECULAR_TRANSMISSION_RESERVED_BITS = Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_MASK_BITS << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
2334
const SCREEN_SPACE_SPECULAR_TRANSMISSION_LOW = 0 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
2335
const SCREEN_SPACE_SPECULAR_TRANSMISSION_MEDIUM = 1 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
2336
const SCREEN_SPACE_SPECULAR_TRANSMISSION_HIGH = 2 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
2337
const SCREEN_SPACE_SPECULAR_TRANSMISSION_ULTRA = 3 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
2338
const ALL_RESERVED_BITS =
2339
Self::BLEND_RESERVED_BITS.bits() |
2340
Self::MSAA_RESERVED_BITS.bits() |
2341
Self::TONEMAP_METHOD_RESERVED_BITS.bits() |
2342
Self::SHADOW_FILTER_METHOD_RESERVED_BITS.bits() |
2343
Self::VIEW_PROJECTION_RESERVED_BITS.bits() |
2344
Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_RESERVED_BITS.bits();
2345
}
2346
}
2347
2348
impl MeshPipelineKey {
2349
const MSAA_MASK_BITS: u64 = 0b111;
2350
const MSAA_SHIFT_BITS: u64 = Self::LAST_FLAG.bits().trailing_zeros() as u64 + 1;
2351
2352
const BLEND_MASK_BITS: u64 = 0b111;
2353
const BLEND_SHIFT_BITS: u64 = Self::MSAA_MASK_BITS.count_ones() as u64 + Self::MSAA_SHIFT_BITS;
2354
2355
const TONEMAP_METHOD_MASK_BITS: u64 = 0b111;
2356
const TONEMAP_METHOD_SHIFT_BITS: u64 =
2357
Self::BLEND_MASK_BITS.count_ones() as u64 + Self::BLEND_SHIFT_BITS;
2358
2359
const SHADOW_FILTER_METHOD_MASK_BITS: u64 = 0b11;
2360
const SHADOW_FILTER_METHOD_SHIFT_BITS: u64 =
2361
Self::TONEMAP_METHOD_MASK_BITS.count_ones() as u64 + Self::TONEMAP_METHOD_SHIFT_BITS;
2362
2363
const VIEW_PROJECTION_MASK_BITS: u64 = 0b11;
2364
const VIEW_PROJECTION_SHIFT_BITS: u64 = Self::SHADOW_FILTER_METHOD_MASK_BITS.count_ones()
2365
as u64
2366
+ Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
2367
2368
const SCREEN_SPACE_SPECULAR_TRANSMISSION_MASK_BITS: u64 = 0b11;
2369
const SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS: u64 =
2370
Self::VIEW_PROJECTION_MASK_BITS.count_ones() as u64 + Self::VIEW_PROJECTION_SHIFT_BITS;
2371
2372
pub fn from_msaa_samples(msaa_samples: u32) -> Self {
2373
let msaa_bits =
2374
(msaa_samples.trailing_zeros() as u64 & Self::MSAA_MASK_BITS) << Self::MSAA_SHIFT_BITS;
2375
Self::from_bits_retain(msaa_bits)
2376
}
2377
2378
pub fn from_hdr(hdr: bool) -> Self {
2379
if hdr {
2380
MeshPipelineKey::HDR
2381
} else {
2382
MeshPipelineKey::NONE
2383
}
2384
}
2385
2386
pub fn msaa_samples(&self) -> u32 {
2387
1 << ((self.bits() >> Self::MSAA_SHIFT_BITS) & Self::MSAA_MASK_BITS)
2388
}
2389
2390
pub fn from_primitive_topology(primitive_topology: PrimitiveTopology) -> Self {
2391
let primitive_topology_bits = ((primitive_topology as u64)
2392
& BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_MASK_BITS)
2393
<< BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_SHIFT_BITS;
2394
Self::from_bits_retain(primitive_topology_bits)
2395
}
2396
2397
pub fn primitive_topology(&self) -> PrimitiveTopology {
2398
let primitive_topology_bits = (self.bits()
2399
>> BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_SHIFT_BITS)
2400
& BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_MASK_BITS;
2401
match primitive_topology_bits {
2402
x if x == PrimitiveTopology::PointList as u64 => PrimitiveTopology::PointList,
2403
x if x == PrimitiveTopology::LineList as u64 => PrimitiveTopology::LineList,
2404
x if x == PrimitiveTopology::LineStrip as u64 => PrimitiveTopology::LineStrip,
2405
x if x == PrimitiveTopology::TriangleList as u64 => PrimitiveTopology::TriangleList,
2406
x if x == PrimitiveTopology::TriangleStrip as u64 => PrimitiveTopology::TriangleStrip,
2407
_ => PrimitiveTopology::default(),
2408
}
2409
}
2410
}
2411
2412
impl From<u64> for MeshPipelineKey {
2413
fn from(value: u64) -> Self {
2414
MeshPipelineKey::from_bits_retain(value)
2415
}
2416
}
2417
2418
impl From<MeshPipelineKey> for u64 {
2419
fn from(value: MeshPipelineKey) -> Self {
2420
value.bits()
2421
}
2422
}
2423
2424
// Ensure that we didn't overflow the number of bits available in `MeshPipelineKey`.
2425
const_assert_eq!(
2426
(((MeshPipelineKey::LAST_FLAG.bits() << 1) - 1) | MeshPipelineKey::ALL_RESERVED_BITS.bits())
2427
& BaseMeshPipelineKey::all().bits(),
2428
0
2429
);
2430
2431
// Ensure that the reserved bits don't overlap with the topology bits
2432
const_assert_eq!(
2433
(BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_MASK_BITS
2434
<< BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_SHIFT_BITS)
2435
& MeshPipelineKey::ALL_RESERVED_BITS.bits(),
2436
0
2437
);
2438
2439
fn is_skinned(layout: &MeshVertexBufferLayoutRef) -> bool {
2440
layout.0.contains(Mesh::ATTRIBUTE_JOINT_INDEX)
2441
&& layout.0.contains(Mesh::ATTRIBUTE_JOINT_WEIGHT)
2442
}
2443
pub fn setup_morph_and_skinning_defs(
2444
mesh_layouts: &MeshLayouts,
2445
layout: &MeshVertexBufferLayoutRef,
2446
offset: u32,
2447
key: &MeshPipelineKey,
2448
shader_defs: &mut Vec<ShaderDefVal>,
2449
vertex_attributes: &mut Vec<VertexAttributeDescriptor>,
2450
skins_use_uniform_buffers: bool,
2451
) -> BindGroupLayoutDescriptor {
2452
let is_morphed = key.intersects(MeshPipelineKey::MORPH_TARGETS);
2453
let is_lightmapped = key.intersects(MeshPipelineKey::LIGHTMAPPED);
2454
let motion_vector_prepass = key.intersects(MeshPipelineKey::MOTION_VECTOR_PREPASS);
2455
2456
if skins_use_uniform_buffers {
2457
shader_defs.push("SKINS_USE_UNIFORM_BUFFERS".into());
2458
}
2459
2460
let mut add_skin_data = || {
2461
shader_defs.push("SKINNED".into());
2462
vertex_attributes.push(Mesh::ATTRIBUTE_JOINT_INDEX.at_shader_location(offset));
2463
vertex_attributes.push(Mesh::ATTRIBUTE_JOINT_WEIGHT.at_shader_location(offset + 1));
2464
};
2465
2466
match (
2467
is_skinned(layout),
2468
is_morphed,
2469
is_lightmapped,
2470
motion_vector_prepass,
2471
) {
2472
(true, false, _, true) => {
2473
add_skin_data();
2474
mesh_layouts.skinned_motion.clone()
2475
}
2476
(true, false, _, false) => {
2477
add_skin_data();
2478
mesh_layouts.skinned.clone()
2479
}
2480
(true, true, _, true) => {
2481
add_skin_data();
2482
shader_defs.push("MORPH_TARGETS".into());
2483
mesh_layouts.morphed_skinned_motion.clone()
2484
}
2485
(true, true, _, false) => {
2486
add_skin_data();
2487
shader_defs.push("MORPH_TARGETS".into());
2488
mesh_layouts.morphed_skinned.clone()
2489
}
2490
(false, true, _, true) => {
2491
shader_defs.push("MORPH_TARGETS".into());
2492
mesh_layouts.morphed_motion.clone()
2493
}
2494
(false, true, _, false) => {
2495
shader_defs.push("MORPH_TARGETS".into());
2496
mesh_layouts.morphed.clone()
2497
}
2498
(false, false, true, _) => mesh_layouts.lightmapped.clone(),
2499
(false, false, false, _) => mesh_layouts.model_only.clone(),
2500
}
2501
}
2502
2503
impl SpecializedMeshPipeline for MeshPipeline {
2504
type Key = MeshPipelineKey;
2505
2506
fn specialize(
2507
&self,
2508
key: Self::Key,
2509
layout: &MeshVertexBufferLayoutRef,
2510
) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
2511
let mut shader_defs = Vec::new();
2512
let mut vertex_attributes = Vec::new();
2513
2514
// Let the shader code know that it's running in a mesh pipeline.
2515
shader_defs.push("MESH_PIPELINE".into());
2516
2517
shader_defs.push("VERTEX_OUTPUT_INSTANCE_INDEX".into());
2518
2519
if layout.0.contains(Mesh::ATTRIBUTE_POSITION) {
2520
shader_defs.push("VERTEX_POSITIONS".into());
2521
vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0));
2522
}
2523
2524
if layout.0.contains(Mesh::ATTRIBUTE_NORMAL) {
2525
shader_defs.push("VERTEX_NORMALS".into());
2526
vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(1));
2527
}
2528
2529
if layout.0.contains(Mesh::ATTRIBUTE_UV_0) {
2530
shader_defs.push("VERTEX_UVS".into());
2531
shader_defs.push("VERTEX_UVS_A".into());
2532
vertex_attributes.push(Mesh::ATTRIBUTE_UV_0.at_shader_location(2));
2533
}
2534
2535
if layout.0.contains(Mesh::ATTRIBUTE_UV_1) {
2536
shader_defs.push("VERTEX_UVS".into());
2537
shader_defs.push("VERTEX_UVS_B".into());
2538
vertex_attributes.push(Mesh::ATTRIBUTE_UV_1.at_shader_location(3));
2539
}
2540
2541
if layout.0.contains(Mesh::ATTRIBUTE_TANGENT) {
2542
shader_defs.push("VERTEX_TANGENTS".into());
2543
vertex_attributes.push(Mesh::ATTRIBUTE_TANGENT.at_shader_location(4));
2544
}
2545
2546
if layout.0.contains(Mesh::ATTRIBUTE_COLOR) {
2547
shader_defs.push("VERTEX_COLORS".into());
2548
vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(5));
2549
}
2550
2551
if cfg!(feature = "pbr_transmission_textures") {
2552
shader_defs.push("PBR_TRANSMISSION_TEXTURES_SUPPORTED".into());
2553
}
2554
if cfg!(feature = "pbr_multi_layer_material_textures") {
2555
shader_defs.push("PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED".into());
2556
}
2557
if cfg!(feature = "pbr_anisotropy_texture") {
2558
shader_defs.push("PBR_ANISOTROPY_TEXTURE_SUPPORTED".into());
2559
}
2560
if cfg!(feature = "pbr_specular_textures") {
2561
shader_defs.push("PBR_SPECULAR_TEXTURES_SUPPORTED".into());
2562
}
2563
if cfg!(feature = "bluenoise_texture") {
2564
shader_defs.push("BLUE_NOISE_TEXTURE".into());
2565
}
2566
2567
let bind_group_layout = self.get_view_layout(key.into());
2568
let mut bind_group_layout = vec![
2569
bind_group_layout.main_layout.clone(),
2570
bind_group_layout.binding_array_layout.clone(),
2571
];
2572
2573
if key.msaa_samples() > 1 {
2574
shader_defs.push("MULTISAMPLED".into());
2575
};
2576
2577
bind_group_layout.push(setup_morph_and_skinning_defs(
2578
&self.mesh_layouts,
2579
layout,
2580
6,
2581
&key,
2582
&mut shader_defs,
2583
&mut vertex_attributes,
2584
self.skins_use_uniform_buffers,
2585
));
2586
2587
if key.contains(MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION) {
2588
shader_defs.push("SCREEN_SPACE_AMBIENT_OCCLUSION".into());
2589
}
2590
2591
let vertex_buffer_layout = layout.0.get_layout(&vertex_attributes)?;
2592
2593
let (label, blend, depth_write_enabled);
2594
let pass = key.intersection(MeshPipelineKey::BLEND_RESERVED_BITS);
2595
let (mut is_opaque, mut alpha_to_coverage_enabled) = (false, false);
2596
if key.contains(MeshPipelineKey::OIT_ENABLED) && pass == MeshPipelineKey::BLEND_ALPHA {
2597
label = "oit_mesh_pipeline".into();
2598
// TODO tail blending would need alpha blending
2599
blend = None;
2600
shader_defs.push("OIT_ENABLED".into());
2601
// TODO it should be possible to use this to combine MSAA and OIT
2602
// alpha_to_coverage_enabled = true;
2603
depth_write_enabled = false;
2604
} else if pass == MeshPipelineKey::BLEND_ALPHA {
2605
label = "alpha_blend_mesh_pipeline".into();
2606
blend = Some(BlendState::ALPHA_BLENDING);
2607
// For the transparent pass, fragments that are closer will be alpha blended
2608
// but their depth is not written to the depth buffer
2609
depth_write_enabled = false;
2610
} else if pass == MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA {
2611
label = "premultiplied_alpha_mesh_pipeline".into();
2612
blend = Some(BlendState::PREMULTIPLIED_ALPHA_BLENDING);
2613
shader_defs.push("PREMULTIPLY_ALPHA".into());
2614
shader_defs.push("BLEND_PREMULTIPLIED_ALPHA".into());
2615
// For the transparent pass, fragments that are closer will be alpha blended
2616
// but their depth is not written to the depth buffer
2617
depth_write_enabled = false;
2618
} else if pass == MeshPipelineKey::BLEND_MULTIPLY {
2619
label = "multiply_mesh_pipeline".into();
2620
blend = Some(BlendState {
2621
color: BlendComponent {
2622
src_factor: BlendFactor::Dst,
2623
dst_factor: BlendFactor::OneMinusSrcAlpha,
2624
operation: BlendOperation::Add,
2625
},
2626
alpha: BlendComponent::OVER,
2627
});
2628
shader_defs.push("PREMULTIPLY_ALPHA".into());
2629
shader_defs.push("BLEND_MULTIPLY".into());
2630
// For the multiply pass, fragments that are closer will be alpha blended
2631
// but their depth is not written to the depth buffer
2632
depth_write_enabled = false;
2633
} else if pass == MeshPipelineKey::BLEND_ALPHA_TO_COVERAGE {
2634
label = "alpha_to_coverage_mesh_pipeline".into();
2635
// BlendState::REPLACE is not needed here, and None will be potentially much faster in some cases
2636
blend = None;
2637
// For the opaque and alpha mask passes, fragments that are closer will replace
2638
// the current fragment value in the output and the depth is written to the
2639
// depth buffer
2640
depth_write_enabled = true;
2641
is_opaque = !key.contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE);
2642
alpha_to_coverage_enabled = true;
2643
shader_defs.push("ALPHA_TO_COVERAGE".into());
2644
} else {
2645
label = "opaque_mesh_pipeline".into();
2646
// BlendState::REPLACE is not needed here, and None will be potentially much faster in some cases
2647
blend = None;
2648
// For the opaque and alpha mask passes, fragments that are closer will replace
2649
// the current fragment value in the output and the depth is written to the
2650
// depth buffer
2651
depth_write_enabled = true;
2652
is_opaque = !key.contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE);
2653
}
2654
2655
if key.contains(MeshPipelineKey::NORMAL_PREPASS) {
2656
shader_defs.push("NORMAL_PREPASS".into());
2657
}
2658
2659
if key.contains(MeshPipelineKey::DEPTH_PREPASS) {
2660
shader_defs.push("DEPTH_PREPASS".into());
2661
}
2662
2663
if key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
2664
shader_defs.push("MOTION_VECTOR_PREPASS".into());
2665
}
2666
2667
if key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) {
2668
shader_defs.push("HAS_PREVIOUS_SKIN".into());
2669
}
2670
2671
if key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) {
2672
shader_defs.push("HAS_PREVIOUS_MORPH".into());
2673
}
2674
2675
if key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
2676
shader_defs.push("DEFERRED_PREPASS".into());
2677
}
2678
2679
if key.contains(MeshPipelineKey::NORMAL_PREPASS) && key.msaa_samples() == 1 && is_opaque {
2680
shader_defs.push("LOAD_PREPASS_NORMALS".into());
2681
}
2682
2683
let view_projection = key.intersection(MeshPipelineKey::VIEW_PROJECTION_RESERVED_BITS);
2684
if view_projection == MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD {
2685
shader_defs.push("VIEW_PROJECTION_NONSTANDARD".into());
2686
} else if view_projection == MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE {
2687
shader_defs.push("VIEW_PROJECTION_PERSPECTIVE".into());
2688
} else if view_projection == MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC {
2689
shader_defs.push("VIEW_PROJECTION_ORTHOGRAPHIC".into());
2690
}
2691
2692
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
2693
shader_defs.push("WEBGL2".into());
2694
2695
#[cfg(feature = "experimental_pbr_pcss")]
2696
shader_defs.push("PCSS_SAMPLERS_AVAILABLE".into());
2697
2698
if key.contains(MeshPipelineKey::TONEMAP_IN_SHADER) {
2699
shader_defs.push("TONEMAP_IN_SHADER".into());
2700
shader_defs.push(ShaderDefVal::UInt(
2701
"TONEMAPPING_LUT_TEXTURE_BINDING_INDEX".into(),
2702
TONEMAPPING_LUT_TEXTURE_BINDING_INDEX,
2703
));
2704
shader_defs.push(ShaderDefVal::UInt(
2705
"TONEMAPPING_LUT_SAMPLER_BINDING_INDEX".into(),
2706
TONEMAPPING_LUT_SAMPLER_BINDING_INDEX,
2707
));
2708
2709
let method = key.intersection(MeshPipelineKey::TONEMAP_METHOD_RESERVED_BITS);
2710
2711
if method == MeshPipelineKey::TONEMAP_METHOD_NONE {
2712
shader_defs.push("TONEMAP_METHOD_NONE".into());
2713
} else if method == MeshPipelineKey::TONEMAP_METHOD_REINHARD {
2714
shader_defs.push("TONEMAP_METHOD_REINHARD".into());
2715
} else if method == MeshPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE {
2716
shader_defs.push("TONEMAP_METHOD_REINHARD_LUMINANCE".into());
2717
} else if method == MeshPipelineKey::TONEMAP_METHOD_ACES_FITTED {
2718
shader_defs.push("TONEMAP_METHOD_ACES_FITTED".into());
2719
} else if method == MeshPipelineKey::TONEMAP_METHOD_AGX {
2720
shader_defs.push("TONEMAP_METHOD_AGX".into());
2721
} else if method == MeshPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM {
2722
shader_defs.push("TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM".into());
2723
} else if method == MeshPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC {
2724
shader_defs.push("TONEMAP_METHOD_BLENDER_FILMIC".into());
2725
} else if method == MeshPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE {
2726
shader_defs.push("TONEMAP_METHOD_TONY_MC_MAPFACE".into());
2727
}
2728
2729
// Debanding is tied to tonemapping in the shader, cannot run without it.
2730
if key.contains(MeshPipelineKey::DEBAND_DITHER) {
2731
shader_defs.push("DEBAND_DITHER".into());
2732
}
2733
}
2734
2735
if key.contains(MeshPipelineKey::MAY_DISCARD) {
2736
shader_defs.push("MAY_DISCARD".into());
2737
}
2738
2739
if key.contains(MeshPipelineKey::ENVIRONMENT_MAP) {
2740
shader_defs.push("ENVIRONMENT_MAP".into());
2741
}
2742
2743
if key.contains(MeshPipelineKey::IRRADIANCE_VOLUME) && IRRADIANCE_VOLUMES_ARE_USABLE {
2744
shader_defs.push("IRRADIANCE_VOLUME".into());
2745
}
2746
2747
if key.contains(MeshPipelineKey::LIGHTMAPPED) {
2748
shader_defs.push("LIGHTMAP".into());
2749
}
2750
if key.contains(MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING) {
2751
shader_defs.push("LIGHTMAP_BICUBIC_SAMPLING".into());
2752
}
2753
2754
if key.contains(MeshPipelineKey::TEMPORAL_JITTER) {
2755
shader_defs.push("TEMPORAL_JITTER".into());
2756
}
2757
2758
let shadow_filter_method =
2759
key.intersection(MeshPipelineKey::SHADOW_FILTER_METHOD_RESERVED_BITS);
2760
if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2 {
2761
shader_defs.push("SHADOW_FILTER_METHOD_HARDWARE_2X2".into());
2762
} else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN {
2763
shader_defs.push("SHADOW_FILTER_METHOD_GAUSSIAN".into());
2764
} else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL {
2765
shader_defs.push("SHADOW_FILTER_METHOD_TEMPORAL".into());
2766
}
2767
2768
let blur_quality =
2769
key.intersection(MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_RESERVED_BITS);
2770
2771
shader_defs.push(ShaderDefVal::Int(
2772
"SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS".into(),
2773
match blur_quality {
2774
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_LOW => 4,
2775
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_MEDIUM => 8,
2776
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_HIGH => 16,
2777
MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_ULTRA => 32,
2778
_ => unreachable!(), // Not possible, since the mask is 2 bits, and we've covered all 4 cases
2779
},
2780
));
2781
2782
if key.contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) {
2783
shader_defs.push("VISIBILITY_RANGE_DITHER".into());
2784
}
2785
2786
if key.contains(MeshPipelineKey::DISTANCE_FOG) {
2787
shader_defs.push("DISTANCE_FOG".into());
2788
}
2789
2790
if key.contains(MeshPipelineKey::ATMOSPHERE) {
2791
shader_defs.push("ATMOSPHERE".into());
2792
}
2793
2794
if self.binding_arrays_are_usable {
2795
shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into());
2796
shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into());
2797
}
2798
2799
if IRRADIANCE_VOLUMES_ARE_USABLE {
2800
shader_defs.push("IRRADIANCE_VOLUMES_ARE_USABLE".into());
2801
}
2802
2803
if self.clustered_decals_are_usable {
2804
shader_defs.push("CLUSTERED_DECALS_ARE_USABLE".into());
2805
if cfg!(feature = "pbr_light_textures") {
2806
shader_defs.push("LIGHT_TEXTURES".into());
2807
}
2808
}
2809
2810
let format = if key.contains(MeshPipelineKey::HDR) {
2811
ViewTarget::TEXTURE_FORMAT_HDR
2812
} else {
2813
TextureFormat::bevy_default()
2814
};
2815
2816
// This is defined here so that custom shaders that use something other than
2817
// the mesh binding from bevy_pbr::mesh_bindings can easily make use of this
2818
// in their own shaders.
2819
if let Some(per_object_buffer_batch_size) = self.per_object_buffer_batch_size {
2820
shader_defs.push(ShaderDefVal::UInt(
2821
"PER_OBJECT_BUFFER_BATCH_SIZE".into(),
2822
per_object_buffer_batch_size,
2823
));
2824
}
2825
2826
Ok(RenderPipelineDescriptor {
2827
vertex: VertexState {
2828
shader: self.shader.clone(),
2829
shader_defs: shader_defs.clone(),
2830
buffers: vec![vertex_buffer_layout],
2831
..default()
2832
},
2833
fragment: Some(FragmentState {
2834
shader: self.shader.clone(),
2835
shader_defs,
2836
targets: vec![Some(ColorTargetState {
2837
format,
2838
blend,
2839
write_mask: ColorWrites::ALL,
2840
})],
2841
..default()
2842
}),
2843
layout: bind_group_layout,
2844
primitive: PrimitiveState {
2845
cull_mode: Some(Face::Back),
2846
unclipped_depth: false,
2847
topology: key.primitive_topology(),
2848
..default()
2849
},
2850
depth_stencil: Some(DepthStencilState {
2851
format: CORE_3D_DEPTH_FORMAT,
2852
depth_write_enabled,
2853
depth_compare: CompareFunction::GreaterEqual,
2854
stencil: StencilState {
2855
front: StencilFaceState::IGNORE,
2856
back: StencilFaceState::IGNORE,
2857
read_mask: 0,
2858
write_mask: 0,
2859
},
2860
bias: DepthBiasState {
2861
constant: 0,
2862
slope_scale: 0.0,
2863
clamp: 0.0,
2864
},
2865
}),
2866
multisample: MultisampleState {
2867
count: key.msaa_samples(),
2868
mask: !0,
2869
alpha_to_coverage_enabled,
2870
},
2871
label: Some(label),
2872
..default()
2873
})
2874
}
2875
}
2876
2877
/// The bind groups for meshes currently loaded.
2878
///
2879
/// If GPU mesh preprocessing isn't in use, these are global to the scene. If
2880
/// GPU mesh preprocessing is in use, these are specific to a single phase.
2881
#[derive(Default)]
2882
pub struct MeshPhaseBindGroups {
2883
model_only: Option<BindGroup>,
2884
skinned: Option<MeshBindGroupPair>,
2885
morph_targets: HashMap<AssetId<Mesh>, MeshBindGroupPair>,
2886
lightmaps: HashMap<LightmapSlabIndex, BindGroup>,
2887
}
2888
2889
pub struct MeshBindGroupPair {
2890
motion_vectors: BindGroup,
2891
no_motion_vectors: BindGroup,
2892
}
2893
2894
/// All bind groups for meshes currently loaded.
2895
#[derive(Resource)]
2896
pub enum MeshBindGroups {
2897
/// The bind groups for the meshes for the entire scene, if GPU mesh
2898
/// preprocessing isn't in use.
2899
CpuPreprocessing(MeshPhaseBindGroups),
2900
/// A mapping from the type ID of a phase (e.g. [`Opaque3d`]) to the mesh
2901
/// bind groups for that phase.
2902
GpuPreprocessing(TypeIdMap<MeshPhaseBindGroups>),
2903
}
2904
2905
impl MeshPhaseBindGroups {
2906
pub fn reset(&mut self) {
2907
self.model_only = None;
2908
self.skinned = None;
2909
self.morph_targets.clear();
2910
self.lightmaps.clear();
2911
}
2912
/// Get the `BindGroup` for `RenderMesh` with given `handle_id` and lightmap
2913
/// key `lightmap`.
2914
pub fn get(
2915
&self,
2916
asset_id: AssetId<Mesh>,
2917
lightmap: Option<LightmapSlabIndex>,
2918
is_skinned: bool,
2919
morph: bool,
2920
motion_vectors: bool,
2921
) -> Option<&BindGroup> {
2922
match (is_skinned, morph, lightmap) {
2923
(_, true, _) => self
2924
.morph_targets
2925
.get(&asset_id)
2926
.map(|bind_group_pair| bind_group_pair.get(motion_vectors)),
2927
(true, false, _) => self
2928
.skinned
2929
.as_ref()
2930
.map(|bind_group_pair| bind_group_pair.get(motion_vectors)),
2931
(false, false, Some(lightmap_slab)) => self.lightmaps.get(&lightmap_slab),
2932
(false, false, None) => self.model_only.as_ref(),
2933
}
2934
}
2935
}
2936
2937
impl MeshBindGroupPair {
2938
fn get(&self, motion_vectors: bool) -> &BindGroup {
2939
if motion_vectors {
2940
&self.motion_vectors
2941
} else {
2942
&self.no_motion_vectors
2943
}
2944
}
2945
}
2946
2947
/// Creates the per-mesh bind groups for each type of mesh and each phase.
2948
pub fn prepare_mesh_bind_groups(
2949
mut commands: Commands,
2950
meshes: Res<RenderAssets<RenderMesh>>,
2951
mesh_pipeline: Res<MeshPipeline>,
2952
render_device: Res<RenderDevice>,
2953
pipeline_cache: Res<PipelineCache>,
2954
cpu_batched_instance_buffer: Option<
2955
Res<no_gpu_preprocessing::BatchedInstanceBuffer<MeshUniform>>,
2956
>,
2957
gpu_batched_instance_buffers: Option<
2958
Res<gpu_preprocessing::BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
2959
>,
2960
skins_uniform: Res<SkinUniforms>,
2961
weights_uniform: Res<MorphUniforms>,
2962
mut render_lightmaps: ResMut<RenderLightmaps>,
2963
) {
2964
// CPU mesh preprocessing path.
2965
if let Some(cpu_batched_instance_buffer) = cpu_batched_instance_buffer
2966
&& let Some(instance_data_binding) = cpu_batched_instance_buffer
2967
.into_inner()
2968
.instance_data_binding()
2969
{
2970
// In this path, we only have a single set of bind groups for all phases.
2971
let cpu_preprocessing_mesh_bind_groups = prepare_mesh_bind_groups_for_phase(
2972
instance_data_binding,
2973
&meshes,
2974
&mesh_pipeline,
2975
&render_device,
2976
&pipeline_cache,
2977
&skins_uniform,
2978
&weights_uniform,
2979
&mut render_lightmaps,
2980
);
2981
2982
commands.insert_resource(MeshBindGroups::CpuPreprocessing(
2983
cpu_preprocessing_mesh_bind_groups,
2984
));
2985
return;
2986
}
2987
2988
// GPU mesh preprocessing path.
2989
if let Some(gpu_batched_instance_buffers) = gpu_batched_instance_buffers {
2990
let mut gpu_preprocessing_mesh_bind_groups = TypeIdMap::default();
2991
2992
// Loop over each phase.
2993
for (phase_type_id, batched_phase_instance_buffers) in
2994
&gpu_batched_instance_buffers.phase_instance_buffers
2995
{
2996
let Some(instance_data_binding) =
2997
batched_phase_instance_buffers.instance_data_binding()
2998
else {
2999
continue;
3000
};
3001
3002
let mesh_phase_bind_groups = prepare_mesh_bind_groups_for_phase(
3003
instance_data_binding,
3004
&meshes,
3005
&mesh_pipeline,
3006
&render_device,
3007
&pipeline_cache,
3008
&skins_uniform,
3009
&weights_uniform,
3010
&mut render_lightmaps,
3011
);
3012
3013
gpu_preprocessing_mesh_bind_groups.insert(*phase_type_id, mesh_phase_bind_groups);
3014
}
3015
3016
commands.insert_resource(MeshBindGroups::GpuPreprocessing(
3017
gpu_preprocessing_mesh_bind_groups,
3018
));
3019
}
3020
}
3021
3022
/// Creates the per-mesh bind groups for each type of mesh, for a single phase.
3023
fn prepare_mesh_bind_groups_for_phase(
3024
model: BindingResource,
3025
meshes: &RenderAssets<RenderMesh>,
3026
mesh_pipeline: &MeshPipeline,
3027
render_device: &RenderDevice,
3028
pipeline_cache: &PipelineCache,
3029
skins_uniform: &SkinUniforms,
3030
weights_uniform: &MorphUniforms,
3031
render_lightmaps: &mut RenderLightmaps,
3032
) -> MeshPhaseBindGroups {
3033
let layouts = &mesh_pipeline.mesh_layouts;
3034
3035
// TODO: Reuse allocations.
3036
let mut groups = MeshPhaseBindGroups {
3037
model_only: Some(layouts.model_only(render_device, pipeline_cache, &model)),
3038
..default()
3039
};
3040
3041
// Create the skinned mesh bind group with the current and previous buffers
3042
// (the latter being for motion vector computation).
3043
let (skin, prev_skin) = (&skins_uniform.current_buffer, &skins_uniform.prev_buffer);
3044
groups.skinned = Some(MeshBindGroupPair {
3045
motion_vectors: layouts.skinned_motion(
3046
render_device,
3047
pipeline_cache,
3048
&model,
3049
skin,
3050
prev_skin,
3051
),
3052
no_motion_vectors: layouts.skinned(render_device, pipeline_cache, &model, skin),
3053
});
3054
3055
// Create the morphed bind groups just like we did for the skinned bind
3056
// group.
3057
if let Some(weights) = weights_uniform.current_buffer.buffer() {
3058
let prev_weights = weights_uniform.prev_buffer.buffer().unwrap_or(weights);
3059
for (id, gpu_mesh) in meshes.iter() {
3060
if let Some(targets) = gpu_mesh.morph_targets.as_ref() {
3061
let bind_group_pair = if is_skinned(&gpu_mesh.layout) {
3062
let prev_skin = &skins_uniform.prev_buffer;
3063
MeshBindGroupPair {
3064
motion_vectors: layouts.morphed_skinned_motion(
3065
render_device,
3066
pipeline_cache,
3067
&model,
3068
skin,
3069
weights,
3070
targets,
3071
prev_skin,
3072
prev_weights,
3073
),
3074
no_motion_vectors: layouts.morphed_skinned(
3075
render_device,
3076
pipeline_cache,
3077
&model,
3078
skin,
3079
weights,
3080
targets,
3081
),
3082
}
3083
} else {
3084
MeshBindGroupPair {
3085
motion_vectors: layouts.morphed_motion(
3086
render_device,
3087
pipeline_cache,
3088
&model,
3089
weights,
3090
targets,
3091
prev_weights,
3092
),
3093
no_motion_vectors: layouts.morphed(
3094
render_device,
3095
pipeline_cache,
3096
&model,
3097
weights,
3098
targets,
3099
),
3100
}
3101
};
3102
groups.morph_targets.insert(id, bind_group_pair);
3103
}
3104
}
3105
}
3106
3107
// Create lightmap bindgroups. There will be one bindgroup for each slab.
3108
let bindless_supported = render_lightmaps.bindless_supported;
3109
for (lightmap_slab_id, lightmap_slab) in render_lightmaps.slabs.iter_mut().enumerate() {
3110
groups.lightmaps.insert(
3111
LightmapSlabIndex(NonMaxU32::new(lightmap_slab_id as u32).unwrap()),
3112
layouts.lightmapped(
3113
render_device,
3114
pipeline_cache,
3115
&model,
3116
lightmap_slab,
3117
bindless_supported,
3118
),
3119
);
3120
}
3121
3122
groups
3123
}
3124
3125
pub struct SetMeshViewBindGroup<const I: usize>;
3126
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshViewBindGroup<I> {
3127
type Param = ();
3128
type ViewQuery = (
3129
Read<ViewUniformOffset>,
3130
Read<ViewLightsUniformOffset>,
3131
Read<ViewFogUniformOffset>,
3132
Read<ViewLightProbesUniformOffset>,
3133
Read<ViewScreenSpaceReflectionsUniformOffset>,
3134
Read<ViewContactShadowsUniformOffset>,
3135
Read<ViewEnvironmentMapUniformOffset>,
3136
Read<MeshViewBindGroup>,
3137
Option<Read<OrderIndependentTransparencySettingsOffset>>,
3138
);
3139
type ItemQuery = ();
3140
3141
#[inline]
3142
fn render<'w>(
3143
_item: &P,
3144
(
3145
view_uniform,
3146
view_lights,
3147
view_fog,
3148
view_light_probes,
3149
view_ssr,
3150
view_contact_shadows,
3151
view_environment_map,
3152
mesh_view_bind_group,
3153
maybe_oit_layers_count_offset,
3154
): ROQueryItem<'w, '_, Self::ViewQuery>,
3155
_entity: Option<()>,
3156
_: SystemParamItem<'w, '_, Self::Param>,
3157
pass: &mut TrackedRenderPass<'w>,
3158
) -> RenderCommandResult {
3159
let mut offsets: SmallVec<[u32; 8]> = smallvec![
3160
view_uniform.offset,
3161
view_lights.offset,
3162
view_fog.offset,
3163
**view_light_probes,
3164
**view_ssr,
3165
**view_contact_shadows,
3166
**view_environment_map,
3167
];
3168
if let Some(layers_count_offset) = maybe_oit_layers_count_offset {
3169
offsets.push(layers_count_offset.offset);
3170
}
3171
pass.set_bind_group(I, &mesh_view_bind_group.main, &offsets);
3172
3173
RenderCommandResult::Success
3174
}
3175
}
3176
3177
pub struct SetMeshViewBindingArrayBindGroup<const I: usize>;
3178
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshViewBindingArrayBindGroup<I> {
3179
type Param = ();
3180
type ViewQuery = (Read<MeshViewBindGroup>,);
3181
type ItemQuery = ();
3182
3183
#[inline]
3184
fn render<'w>(
3185
_item: &P,
3186
(mesh_view_bind_group,): ROQueryItem<'w, '_, Self::ViewQuery>,
3187
_entity: Option<()>,
3188
_: SystemParamItem<'w, '_, Self::Param>,
3189
pass: &mut TrackedRenderPass<'w>,
3190
) -> RenderCommandResult {
3191
pass.set_bind_group(I, &mesh_view_bind_group.binding_array, &[]);
3192
3193
RenderCommandResult::Success
3194
}
3195
}
3196
3197
pub struct SetMeshViewEmptyBindGroup<const I: usize>;
3198
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshViewEmptyBindGroup<I> {
3199
type Param = ();
3200
type ViewQuery = (Read<MeshViewBindGroup>,);
3201
type ItemQuery = ();
3202
3203
#[inline]
3204
fn render<'w>(
3205
_item: &P,
3206
(mesh_view_bind_group,): ROQueryItem<'w, '_, Self::ViewQuery>,
3207
_entity: Option<()>,
3208
_: SystemParamItem<'w, '_, Self::Param>,
3209
pass: &mut TrackedRenderPass<'w>,
3210
) -> RenderCommandResult {
3211
pass.set_bind_group(I, &mesh_view_bind_group.empty, &[]);
3212
3213
RenderCommandResult::Success
3214
}
3215
}
3216
3217
pub struct SetMeshBindGroup<const I: usize>;
3218
impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshBindGroup<I> {
3219
type Param = (
3220
SRes<RenderDevice>,
3221
SRes<MeshBindGroups>,
3222
SRes<RenderMeshInstances>,
3223
SRes<SkinUniforms>,
3224
SRes<MorphIndices>,
3225
SRes<RenderLightmaps>,
3226
);
3227
type ViewQuery = Has<MotionVectorPrepass>;
3228
type ItemQuery = ();
3229
3230
#[inline]
3231
fn render<'w>(
3232
item: &P,
3233
has_motion_vector_prepass: bool,
3234
_item_query: Option<()>,
3235
(
3236
render_device,
3237
bind_groups,
3238
mesh_instances,
3239
skin_uniforms,
3240
morph_indices,
3241
lightmaps,
3242
): SystemParamItem<'w, '_, Self::Param>,
3243
pass: &mut TrackedRenderPass<'w>,
3244
) -> RenderCommandResult {
3245
let bind_groups = bind_groups.into_inner();
3246
let mesh_instances = mesh_instances.into_inner();
3247
let skin_uniforms = skin_uniforms.into_inner();
3248
let morph_indices = morph_indices.into_inner();
3249
3250
let entity = &item.main_entity();
3251
3252
let Some(mesh_asset_id) = mesh_instances.mesh_asset_id(*entity) else {
3253
return RenderCommandResult::Success;
3254
};
3255
3256
let current_skin_byte_offset = skin_uniforms.skin_byte_offset(*entity);
3257
let current_morph_index = morph_indices.current.get(entity);
3258
let prev_morph_index = morph_indices.prev.get(entity);
3259
3260
let is_skinned = current_skin_byte_offset.is_some();
3261
let is_morphed = current_morph_index.is_some();
3262
3263
let lightmap_slab_index = lightmaps
3264
.render_lightmaps
3265
.get(entity)
3266
.map(|render_lightmap| render_lightmap.slab_index);
3267
3268
let Some(mesh_phase_bind_groups) = (match *bind_groups {
3269
MeshBindGroups::CpuPreprocessing(ref mesh_phase_bind_groups) => {
3270
Some(mesh_phase_bind_groups)
3271
}
3272
MeshBindGroups::GpuPreprocessing(ref mesh_phase_bind_groups) => {
3273
mesh_phase_bind_groups.get(&TypeId::of::<P>())
3274
}
3275
}) else {
3276
// This is harmless if e.g. we're rendering the `Shadow` phase and
3277
// there weren't any shadows.
3278
return RenderCommandResult::Success;
3279
};
3280
3281
let Some(bind_group) = mesh_phase_bind_groups.get(
3282
mesh_asset_id,
3283
lightmap_slab_index,
3284
is_skinned,
3285
is_morphed,
3286
has_motion_vector_prepass,
3287
) else {
3288
return RenderCommandResult::Failure(
3289
"The MeshBindGroups resource wasn't set in the render phase. \
3290
It should be set by the prepare_mesh_bind_group system.\n\
3291
This is a bevy bug! Please open an issue.",
3292
);
3293
};
3294
3295
let mut dynamic_offsets: [u32; 5] = Default::default();
3296
let mut offset_count = 0;
3297
if let PhaseItemExtraIndex::DynamicOffset(dynamic_offset) = item.extra_index() {
3298
dynamic_offsets[offset_count] = dynamic_offset;
3299
offset_count += 1;
3300
}
3301
if let Some(current_skin_index) = current_skin_byte_offset
3302
&& skins_use_uniform_buffers(&render_device.limits())
3303
{
3304
dynamic_offsets[offset_count] = current_skin_index.byte_offset;
3305
offset_count += 1;
3306
}
3307
if let Some(current_morph_index) = current_morph_index {
3308
dynamic_offsets[offset_count] = current_morph_index.index;
3309
offset_count += 1;
3310
}
3311
3312
// Attach motion vectors if needed.
3313
if has_motion_vector_prepass {
3314
// Attach the previous skin index for motion vector computation.
3315
if skins_use_uniform_buffers(&render_device.limits())
3316
&& let Some(current_skin_byte_offset) = current_skin_byte_offset
3317
{
3318
dynamic_offsets[offset_count] = current_skin_byte_offset.byte_offset;
3319
offset_count += 1;
3320
}
3321
3322
// Attach the previous morph index for motion vector computation. If
3323
// there isn't one, just use zero as the shader will ignore it.
3324
if current_morph_index.is_some() {
3325
match prev_morph_index {
3326
Some(prev_morph_index) => {
3327
dynamic_offsets[offset_count] = prev_morph_index.index;
3328
}
3329
None => dynamic_offsets[offset_count] = 0,
3330
}
3331
offset_count += 1;
3332
}
3333
}
3334
3335
pass.set_bind_group(I, bind_group, &dynamic_offsets[0..offset_count]);
3336
3337
RenderCommandResult::Success
3338
}
3339
}
3340
3341
pub struct DrawMesh;
3342
impl<P: PhaseItem> RenderCommand<P> for DrawMesh {
3343
type Param = (
3344
SRes<RenderAssets<RenderMesh>>,
3345
SRes<RenderMeshInstances>,
3346
SRes<IndirectParametersBuffers>,
3347
SRes<PipelineCache>,
3348
SRes<MeshAllocator>,
3349
Option<SRes<PreprocessPipelines>>,
3350
SRes<GpuPreprocessingSupport>,
3351
);
3352
type ViewQuery = Has<PreprocessBindGroups>;
3353
type ItemQuery = ();
3354
#[inline]
3355
fn render<'w>(
3356
item: &P,
3357
has_preprocess_bind_group: ROQueryItem<Self::ViewQuery>,
3358
_item_query: Option<()>,
3359
(
3360
meshes,
3361
mesh_instances,
3362
indirect_parameters_buffer,
3363
pipeline_cache,
3364
mesh_allocator,
3365
preprocess_pipelines,
3366
preprocessing_support,
3367
): SystemParamItem<'w, '_, Self::Param>,
3368
pass: &mut TrackedRenderPass<'w>,
3369
) -> RenderCommandResult {
3370
// If we're using GPU preprocessing, then we're dependent on that
3371
// compute shader having been run, which of course can only happen if
3372
// it's compiled. Otherwise, our mesh instance data won't be present.
3373
if let Some(preprocess_pipelines) = preprocess_pipelines
3374
&& (!has_preprocess_bind_group
3375
|| !preprocess_pipelines
3376
.pipelines_are_loaded(&pipeline_cache, &preprocessing_support))
3377
{
3378
return RenderCommandResult::Skip;
3379
}
3380
3381
let meshes = meshes.into_inner();
3382
let mesh_instances = mesh_instances.into_inner();
3383
let indirect_parameters_buffer = indirect_parameters_buffer.into_inner();
3384
let mesh_allocator = mesh_allocator.into_inner();
3385
3386
let Some(mesh_asset_id) = mesh_instances.mesh_asset_id(item.main_entity()) else {
3387
return RenderCommandResult::Skip;
3388
};
3389
let Some(gpu_mesh) = meshes.get(mesh_asset_id) else {
3390
return RenderCommandResult::Skip;
3391
};
3392
let Some(vertex_buffer_slice) = mesh_allocator.mesh_vertex_slice(&mesh_asset_id) else {
3393
return RenderCommandResult::Skip;
3394
};
3395
3396
pass.set_vertex_buffer(0, vertex_buffer_slice.buffer.slice(..));
3397
3398
let batch_range = item.batch_range();
3399
3400
// Draw either directly or indirectly, as appropriate. If we're in
3401
// indirect mode, we can additionally multi-draw. (We can't multi-draw
3402
// in direct mode because `wgpu` doesn't expose that functionality.)
3403
match &gpu_mesh.buffer_info {
3404
RenderMeshBufferInfo::Indexed {
3405
index_format,
3406
count,
3407
} => {
3408
let Some(index_buffer_slice) = mesh_allocator.mesh_index_slice(&mesh_asset_id)
3409
else {
3410
return RenderCommandResult::Skip;
3411
};
3412
3413
pass.set_index_buffer(index_buffer_slice.buffer.slice(..), *index_format);
3414
3415
match item.extra_index() {
3416
PhaseItemExtraIndex::None | PhaseItemExtraIndex::DynamicOffset(_) => {
3417
pass.draw_indexed(
3418
index_buffer_slice.range.start
3419
..(index_buffer_slice.range.start + *count),
3420
vertex_buffer_slice.range.start as i32,
3421
batch_range.clone(),
3422
);
3423
}
3424
PhaseItemExtraIndex::IndirectParametersIndex {
3425
range: indirect_parameters_range,
3426
batch_set_index,
3427
} => {
3428
// Look up the indirect parameters buffer, as well as
3429
// the buffer we're going to use for
3430
// `multi_draw_indexed_indirect_count` (if available).
3431
let Some(phase_indirect_parameters_buffers) =
3432
indirect_parameters_buffer.get(&TypeId::of::<P>())
3433
else {
3434
warn!(
3435
"Not rendering mesh because indexed indirect parameters buffer \
3436
wasn't present for this phase",
3437
);
3438
return RenderCommandResult::Skip;
3439
};
3440
let (Some(indirect_parameters_buffer), Some(batch_sets_buffer)) = (
3441
phase_indirect_parameters_buffers.indexed.data_buffer(),
3442
phase_indirect_parameters_buffers
3443
.indexed
3444
.batch_sets_buffer(),
3445
) else {
3446
warn!(
3447
"Not rendering mesh because indexed indirect parameters buffer \
3448
wasn't present",
3449
);
3450
return RenderCommandResult::Skip;
3451
};
3452
3453
// Calculate the location of the indirect parameters
3454
// within the buffer.
3455
let indirect_parameters_offset = indirect_parameters_range.start as u64
3456
* size_of::<IndirectParametersIndexed>() as u64;
3457
let indirect_parameters_count =
3458
indirect_parameters_range.end - indirect_parameters_range.start;
3459
3460
// If we're using `multi_draw_indirect_count`, take the
3461
// number of batches from the appropriate position in
3462
// the batch sets buffer. Otherwise, supply the size of
3463
// the batch set.
3464
match batch_set_index {
3465
Some(batch_set_index) => {
3466
let count_offset = u32::from(batch_set_index)
3467
* (size_of::<IndirectBatchSet>() as u32);
3468
pass.multi_draw_indexed_indirect_count(
3469
indirect_parameters_buffer,
3470
indirect_parameters_offset,
3471
batch_sets_buffer,
3472
count_offset as u64,
3473
indirect_parameters_count,
3474
);
3475
}
3476
None => {
3477
pass.multi_draw_indexed_indirect(
3478
indirect_parameters_buffer,
3479
indirect_parameters_offset,
3480
indirect_parameters_count,
3481
);
3482
}
3483
}
3484
}
3485
}
3486
}
3487
3488
RenderMeshBufferInfo::NonIndexed => match item.extra_index() {
3489
PhaseItemExtraIndex::None | PhaseItemExtraIndex::DynamicOffset(_) => {
3490
pass.draw(vertex_buffer_slice.range, batch_range.clone());
3491
}
3492
PhaseItemExtraIndex::IndirectParametersIndex {
3493
range: indirect_parameters_range,
3494
batch_set_index,
3495
} => {
3496
// Look up the indirect parameters buffer, as well as the
3497
// buffer we're going to use for
3498
// `multi_draw_indirect_count` (if available).
3499
let Some(phase_indirect_parameters_buffers) =
3500
indirect_parameters_buffer.get(&TypeId::of::<P>())
3501
else {
3502
warn!(
3503
"Not rendering mesh because non-indexed indirect parameters buffer \
3504
wasn't present for this phase",
3505
);
3506
return RenderCommandResult::Skip;
3507
};
3508
let (Some(indirect_parameters_buffer), Some(batch_sets_buffer)) = (
3509
phase_indirect_parameters_buffers.non_indexed.data_buffer(),
3510
phase_indirect_parameters_buffers
3511
.non_indexed
3512
.batch_sets_buffer(),
3513
) else {
3514
warn!(
3515
"Not rendering mesh because non-indexed indirect parameters buffer \
3516
wasn't present"
3517
);
3518
return RenderCommandResult::Skip;
3519
};
3520
3521
// Calculate the location of the indirect parameters within
3522
// the buffer.
3523
let indirect_parameters_offset = indirect_parameters_range.start as u64
3524
* size_of::<IndirectParametersNonIndexed>() as u64;
3525
let indirect_parameters_count =
3526
indirect_parameters_range.end - indirect_parameters_range.start;
3527
3528
// If we're using `multi_draw_indirect_count`, take the
3529
// number of batches from the appropriate position in the
3530
// batch sets buffer. Otherwise, supply the size of the
3531
// batch set.
3532
match batch_set_index {
3533
Some(batch_set_index) => {
3534
let count_offset =
3535
u32::from(batch_set_index) * (size_of::<IndirectBatchSet>() as u32);
3536
pass.multi_draw_indirect_count(
3537
indirect_parameters_buffer,
3538
indirect_parameters_offset,
3539
batch_sets_buffer,
3540
count_offset as u64,
3541
indirect_parameters_count,
3542
);
3543
}
3544
None => {
3545
pass.multi_draw_indirect(
3546
indirect_parameters_buffer,
3547
indirect_parameters_offset,
3548
indirect_parameters_count,
3549
);
3550
}
3551
}
3552
}
3553
},
3554
}
3555
RenderCommandResult::Success
3556
}
3557
}
3558
3559
#[cfg(test)]
3560
mod tests {
3561
use super::MeshPipelineKey;
3562
#[test]
3563
fn mesh_key_msaa_samples() {
3564
for i in [1, 2, 4, 8, 16, 32, 64, 128] {
3565
assert_eq!(MeshPipelineKey::from_msaa_samples(i).msaa_samples(), i);
3566
}
3567
}
3568
}
3569
3570