Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_pbr/src/material_bind_groups.rs
9353 views
1
//! Material bind group management for bindless resources.
2
//!
3
//! In bindless mode, Bevy's renderer groups materials into bind groups. This
4
//! allocator manages each bind group, assigning slots to materials as
5
//! appropriate.
6
7
use crate::Material;
8
use bevy_derive::{Deref, DerefMut};
9
use bevy_ecs::{
10
resource::Resource,
11
system::{Commands, Res},
12
};
13
use bevy_platform::collections::{HashMap, HashSet};
14
use bevy_reflect::{prelude::ReflectDefault, Reflect};
15
use bevy_render::render_resource::{BindlessSlabResourceLimit, PipelineCache};
16
use bevy_render::{
17
render_resource::{
18
BindGroup, BindGroupEntry, BindGroupLayoutDescriptor, BindingNumber, BindingResource,
19
BindingResources, BindlessDescriptor, BindlessIndex, BindlessIndexTableDescriptor,
20
BindlessResourceType, Buffer, BufferBinding, BufferDescriptor, BufferId,
21
BufferInitDescriptor, BufferUsages, CompareFunction, FilterMode, MipmapFilterMode,
22
OwnedBindingResource, PreparedBindGroup, RawBufferVec, Sampler, SamplerDescriptor,
23
SamplerId, TextureView, TextureViewDimension, TextureViewId, UnpreparedBindGroup,
24
WgpuSampler, WgpuTextureView,
25
},
26
renderer::{RenderDevice, RenderQueue},
27
settings::WgpuFeatures,
28
texture::FallbackImage,
29
};
30
use bevy_utils::{default, TypeIdMap};
31
use bytemuck::Pod;
32
use core::hash::Hash;
33
use core::{cmp::Ordering, iter, mem, ops::Range};
34
use tracing::{error, trace};
35
36
#[derive(Resource, Deref, DerefMut, Default)]
37
pub struct MaterialBindGroupAllocators(TypeIdMap<MaterialBindGroupAllocator>);
38
39
/// A resource that places materials into bind groups and tracks their
40
/// resources.
41
///
42
/// Internally, Bevy has separate allocators for bindless and non-bindless
43
/// materials. This resource provides a common interface to the specific
44
/// allocator in use.
45
pub enum MaterialBindGroupAllocator {
46
/// The allocator used when the material is bindless.
47
Bindless(Box<MaterialBindGroupBindlessAllocator>),
48
/// The allocator used when the material is non-bindless.
49
NonBindless(Box<MaterialBindGroupNonBindlessAllocator>),
50
}
51
52
/// The allocator that places bindless materials into bind groups and tracks
53
/// their resources.
54
pub struct MaterialBindGroupBindlessAllocator {
55
/// The label of the bind group allocator to use for allocated buffers.
56
label: &'static str,
57
/// The slabs, each of which contains a bind group.
58
slabs: Vec<MaterialBindlessSlab>,
59
/// The layout of the bind groups that we produce.
60
bind_group_layout: BindGroupLayoutDescriptor,
61
/// Information about the bindless resources in the material.
62
///
63
/// We use this information to create and maintain bind groups.
64
bindless_descriptor: BindlessDescriptor,
65
66
/// Dummy buffers that we use to fill empty slots in buffer binding arrays.
67
///
68
/// There's one fallback buffer for each buffer in the bind group, each
69
/// appropriately sized. Each buffer contains one uninitialized element of
70
/// the applicable type.
71
fallback_buffers: HashMap<BindlessIndex, Buffer>,
72
73
/// The maximum number of resources that can be stored in a slab.
74
///
75
/// This corresponds to `SLAB_CAPACITY` in the `#[bindless(SLAB_CAPACITY)]`
76
/// attribute, when deriving `AsBindGroup`.
77
slab_capacity: u32,
78
}
79
80
/// A single bind group and the bookkeeping necessary to allocate into it.
81
pub struct MaterialBindlessSlab {
82
/// The current bind group, if it's up to date.
83
///
84
/// If this is `None`, then the bind group is dirty and needs to be
85
/// regenerated.
86
bind_group: Option<BindGroup>,
87
88
/// The GPU-accessible buffers that hold the mapping from binding index to
89
/// bindless slot.
90
///
91
/// This is conventionally assigned to bind group binding 0, but it can be
92
/// changed using the `#[bindless(index_table(binding(B)))]` attribute on
93
/// `AsBindGroup`.
94
///
95
/// Because the slab binary searches this table, the entries within must be
96
/// sorted by bindless index.
97
bindless_index_tables: Vec<MaterialBindlessIndexTable>,
98
99
/// The binding arrays containing samplers.
100
samplers: HashMap<BindlessResourceType, MaterialBindlessBindingArray<Sampler>>,
101
/// The binding arrays containing textures.
102
textures: HashMap<BindlessResourceType, MaterialBindlessBindingArray<TextureView>>,
103
/// The binding arrays containing buffers.
104
buffers: HashMap<BindlessIndex, MaterialBindlessBindingArray<Buffer>>,
105
/// The buffers that contain plain old data (i.e. the structure-level
106
/// `#[data]` attribute of `AsBindGroup`).
107
data_buffers: HashMap<BindlessIndex, MaterialDataBuffer>,
108
109
/// A list of free slot IDs.
110
free_slots: Vec<MaterialBindGroupSlot>,
111
/// The total number of materials currently allocated in this slab.
112
live_allocation_count: u32,
113
/// The total number of resources currently allocated in the binding arrays.
114
allocated_resource_count: u32,
115
}
116
117
/// A GPU-accessible buffer that holds the mapping from binding index to
118
/// bindless slot.
119
///
120
/// This is conventionally assigned to bind group binding 0, but it can be
121
/// changed by altering the [`Self::binding_number`], which corresponds to the
122
/// `#[bindless(index_table(binding(B)))]` attribute in `AsBindGroup`.
123
struct MaterialBindlessIndexTable {
124
/// The buffer containing the mappings.
125
buffer: RetainedRawBufferVec<u32>,
126
/// The range of bindless indices that this bindless index table covers.
127
///
128
/// If this range is M..N, then the field at index $i$ maps to bindless
129
/// index $i$ + M. The size of this table is N - M.
130
///
131
/// This corresponds to the `#[bindless(index_table(range(M..N)))]`
132
/// attribute in `AsBindGroup`.
133
index_range: Range<BindlessIndex>,
134
/// The binding number that this index table is assigned to in the shader.
135
binding_number: BindingNumber,
136
}
137
138
/// A single binding array for storing bindless resources and the bookkeeping
139
/// necessary to allocate into it.
140
struct MaterialBindlessBindingArray<R>
141
where
142
R: GetBindingResourceId,
143
{
144
/// The number of the binding that we attach this binding array to.
145
binding_number: BindingNumber,
146
/// A mapping from bindless slot index to the resource stored in that slot,
147
/// if any.
148
bindings: Vec<Option<MaterialBindlessBinding<R>>>,
149
/// The type of resource stored in this binding array.
150
resource_type: BindlessResourceType,
151
/// Maps a resource ID to the slot in which it's stored.
152
///
153
/// This is essentially the inverse mapping of [`Self::bindings`].
154
resource_to_slot: HashMap<BindingResourceId, u32>,
155
/// A list of free slots in [`Self::bindings`] that contain no binding.
156
free_slots: Vec<u32>,
157
/// The number of allocated objects in this binding array.
158
len: u32,
159
}
160
161
/// A single resource (sampler, texture, or buffer) in a binding array.
162
///
163
/// Resources hold a reference count, which specifies the number of materials
164
/// currently allocated within the slab that refer to this resource. When the
165
/// reference count drops to zero, the resource is freed.
166
struct MaterialBindlessBinding<R>
167
where
168
R: GetBindingResourceId,
169
{
170
/// The sampler, texture, or buffer.
171
resource: R,
172
/// The number of materials currently allocated within the containing slab
173
/// that use this resource.
174
ref_count: u32,
175
}
176
177
/// The allocator that stores bind groups for non-bindless materials.
178
pub struct MaterialBindGroupNonBindlessAllocator {
179
/// The label of the bind group allocator to use for allocated buffers.
180
label: &'static str,
181
/// A mapping from [`MaterialBindGroupIndex`] to the bind group allocated in
182
/// each slot.
183
bind_groups: Vec<Option<MaterialNonBindlessAllocatedBindGroup>>,
184
/// The bind groups that are dirty and need to be prepared.
185
///
186
/// To prepare the bind groups, call
187
/// [`MaterialBindGroupAllocator::prepare_bind_groups`].
188
to_prepare: HashSet<MaterialBindGroupIndex>,
189
/// A list of free bind group indices.
190
free_indices: Vec<MaterialBindGroupIndex>,
191
}
192
193
/// A single bind group that a [`MaterialBindGroupNonBindlessAllocator`] is
194
/// currently managing.
195
enum MaterialNonBindlessAllocatedBindGroup {
196
/// An unprepared bind group.
197
///
198
/// The allocator prepares all outstanding unprepared bind groups when
199
/// [`MaterialBindGroupNonBindlessAllocator::prepare_bind_groups`] is
200
/// called.
201
Unprepared {
202
/// The unprepared bind group, including extra data.
203
bind_group: UnpreparedBindGroup,
204
/// The layout of that bind group.
205
layout: BindGroupLayoutDescriptor,
206
},
207
/// A bind group that's already been prepared.
208
Prepared {
209
bind_group: PreparedBindGroup,
210
#[expect(dead_code, reason = "These buffers are only referenced by bind groups")]
211
uniform_buffers: Vec<Buffer>,
212
},
213
}
214
215
/// Dummy instances of various resources that we fill unused slots in binding
216
/// arrays with.
217
#[derive(Resource)]
218
pub struct FallbackBindlessResources {
219
/// A dummy filtering sampler.
220
filtering_sampler: Sampler,
221
/// A dummy non-filtering sampler.
222
non_filtering_sampler: Sampler,
223
/// A dummy comparison sampler.
224
comparison_sampler: Sampler,
225
}
226
227
/// The `wgpu` ID of a single bindless or non-bindless resource.
228
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
229
enum BindingResourceId {
230
/// A buffer.
231
Buffer(BufferId),
232
/// A texture view, with the given dimension.
233
TextureView(TextureViewDimension, TextureViewId),
234
/// A sampler.
235
Sampler(SamplerId),
236
/// A buffer containing plain old data.
237
///
238
/// This corresponds to the `#[data]` structure-level attribute on
239
/// `AsBindGroup`.
240
DataBuffer,
241
}
242
243
/// A temporary list of references to `wgpu` bindless resources.
244
///
245
/// We need this because the `wgpu` bindless API takes a slice of references.
246
/// Thus we need to create intermediate vectors of bindless resources in order
247
/// to satisfy `wgpu`'s lifetime requirements.
248
enum BindingResourceArray<'a> {
249
/// A list of bindings.
250
Buffers(Vec<BufferBinding<'a>>),
251
/// A list of texture views.
252
TextureViews(Vec<&'a WgpuTextureView>),
253
/// A list of samplers.
254
Samplers(Vec<&'a WgpuSampler>),
255
}
256
257
/// The location of a material (either bindless or non-bindless) within the
258
/// slabs.
259
#[derive(Clone, Copy, Debug, Default, Reflect)]
260
#[reflect(Clone, Default)]
261
pub struct MaterialBindingId {
262
/// The index of the bind group (slab) where the GPU data is located.
263
pub group: MaterialBindGroupIndex,
264
/// The slot within that bind group.
265
///
266
/// Non-bindless materials will always have a slot of 0.
267
pub slot: MaterialBindGroupSlot,
268
}
269
270
/// The index of each material bind group.
271
///
272
/// In bindless mode, each bind group contains multiple materials. In
273
/// non-bindless mode, each bind group contains only one material.
274
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Reflect, Deref, DerefMut)]
275
#[reflect(Default, Clone, PartialEq, Hash)]
276
pub struct MaterialBindGroupIndex(pub u32);
277
278
impl From<u32> for MaterialBindGroupIndex {
279
fn from(value: u32) -> Self {
280
MaterialBindGroupIndex(value)
281
}
282
}
283
284
/// The index of the slot containing material data within each material bind
285
/// group.
286
///
287
/// In bindless mode, this slot is needed to locate the material data in each
288
/// bind group, since multiple materials are packed into a single slab. In
289
/// non-bindless mode, this slot is always 0.
290
#[derive(Clone, Copy, Debug, Default, PartialEq, Reflect, Deref, DerefMut)]
291
#[reflect(Default, Clone, PartialEq)]
292
pub struct MaterialBindGroupSlot(pub u32);
293
294
/// The CPU/GPU synchronization state of a buffer that we maintain.
295
///
296
/// Currently, the only buffer that we maintain is the
297
/// [`MaterialBindlessIndexTable`].
298
enum BufferDirtyState {
299
/// The buffer is currently synchronized between the CPU and GPU.
300
Clean,
301
/// The buffer hasn't been created yet.
302
NeedsReserve,
303
/// The buffer exists on both CPU and GPU, but the GPU data is out of date.
304
NeedsUpload,
305
}
306
307
/// Information that describes a potential allocation of an
308
/// [`UnpreparedBindGroup`] into a slab.
309
struct BindlessAllocationCandidate {
310
/// A map that, for every resource in the [`UnpreparedBindGroup`] that
311
/// already existed in this slab, maps bindless index of that resource to
312
/// its slot in the appropriate binding array.
313
pre_existing_resources: HashMap<BindlessIndex, u32>,
314
/// Stores the number of free slots that are needed to satisfy this
315
/// allocation.
316
needed_free_slots: u32,
317
}
318
319
/// A trait that allows fetching the [`BindingResourceId`] from a
320
/// [`BindlessResourceType`].
321
///
322
/// This is used when freeing bindless resources, in order to locate the IDs
323
/// assigned to each resource so that they can be removed from the appropriate
324
/// maps.
325
trait GetBindingResourceId {
326
/// Returns the [`BindingResourceId`] for this resource.
327
///
328
/// `resource_type` specifies this resource's type. This is used for
329
/// textures, as a `wgpu` [`TextureView`] doesn't store enough information
330
/// itself to determine its dimension.
331
fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId;
332
}
333
334
/// The public interface to a slab, which represents a single bind group.
335
pub struct MaterialSlab<'a>(MaterialSlabImpl<'a>);
336
337
/// The actual implementation of a material slab.
338
///
339
/// This has bindless and non-bindless variants.
340
enum MaterialSlabImpl<'a> {
341
/// The implementation of the slab interface we use when the slab
342
/// is bindless.
343
Bindless(&'a MaterialBindlessSlab),
344
/// The implementation of the slab interface we use when the slab
345
/// is non-bindless.
346
NonBindless(MaterialNonBindlessSlab<'a>),
347
}
348
349
/// A single bind group that the [`MaterialBindGroupNonBindlessAllocator`]
350
/// manages.
351
enum MaterialNonBindlessSlab<'a> {
352
/// A slab that has a bind group.
353
Prepared(&'a PreparedBindGroup),
354
/// A slab that doesn't yet have a bind group.
355
Unprepared,
356
}
357
358
/// Manages an array of untyped plain old data on GPU and allocates individual
359
/// slots within that array.
360
///
361
/// This supports the `#[data]` attribute of `AsBindGroup`.
362
struct MaterialDataBuffer {
363
/// The number of the binding that we attach this storage buffer to.
364
binding_number: BindingNumber,
365
/// The actual data.
366
///
367
/// Note that this is untyped (`u8`); the actual aligned size of each
368
/// element is given by [`Self::aligned_element_size`];
369
buffer: RetainedRawBufferVec<u8>,
370
/// The size of each element in the buffer, including padding and alignment
371
/// if any.
372
aligned_element_size: u32,
373
/// A list of free slots within the buffer.
374
free_slots: Vec<u32>,
375
/// The actual number of slots that have been allocated.
376
len: u32,
377
}
378
379
/// A buffer containing plain old data, already packed into the appropriate GPU
380
/// format, and that can be updated incrementally.
381
///
382
/// This structure exists in order to encapsulate the lazy update
383
/// ([`BufferDirtyState`]) logic in a single place.
384
#[derive(Deref, DerefMut)]
385
struct RetainedRawBufferVec<T>
386
where
387
T: Pod,
388
{
389
/// The contents of the buffer.
390
#[deref]
391
buffer: RawBufferVec<T>,
392
/// Whether the contents of the buffer have been uploaded to the GPU.
393
dirty: BufferDirtyState,
394
}
395
396
/// The size of the buffer that we assign to unused buffer slots, in bytes.
397
///
398
/// This is essentially arbitrary, as it doesn't seem to matter to `wgpu` what
399
/// the size is.
400
const DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE: u64 = 16;
401
402
impl From<u32> for MaterialBindGroupSlot {
403
fn from(value: u32) -> Self {
404
MaterialBindGroupSlot(value)
405
}
406
}
407
408
impl From<MaterialBindGroupSlot> for u32 {
409
fn from(value: MaterialBindGroupSlot) -> Self {
410
value.0
411
}
412
}
413
414
impl<'a> From<&'a OwnedBindingResource> for BindingResourceId {
415
fn from(value: &'a OwnedBindingResource) -> Self {
416
match *value {
417
OwnedBindingResource::Buffer(ref buffer) => BindingResourceId::Buffer(buffer.id()),
418
OwnedBindingResource::Data(_) => BindingResourceId::DataBuffer,
419
OwnedBindingResource::TextureView(ref texture_view_dimension, ref texture_view) => {
420
BindingResourceId::TextureView(*texture_view_dimension, texture_view.id())
421
}
422
OwnedBindingResource::Sampler(_, ref sampler) => {
423
BindingResourceId::Sampler(sampler.id())
424
}
425
}
426
}
427
}
428
429
impl GetBindingResourceId for Buffer {
430
fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId {
431
BindingResourceId::Buffer(self.id())
432
}
433
}
434
435
impl GetBindingResourceId for Sampler {
436
fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId {
437
BindingResourceId::Sampler(self.id())
438
}
439
}
440
441
impl GetBindingResourceId for TextureView {
442
fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId {
443
let texture_view_dimension = match resource_type {
444
BindlessResourceType::Texture1d => TextureViewDimension::D1,
445
BindlessResourceType::Texture2d => TextureViewDimension::D2,
446
BindlessResourceType::Texture2dArray => TextureViewDimension::D2Array,
447
BindlessResourceType::Texture3d => TextureViewDimension::D3,
448
BindlessResourceType::TextureCube => TextureViewDimension::Cube,
449
BindlessResourceType::TextureCubeArray => TextureViewDimension::CubeArray,
450
_ => panic!("Resource type is not a texture"),
451
};
452
BindingResourceId::TextureView(texture_view_dimension, self.id())
453
}
454
}
455
456
impl MaterialBindGroupAllocator {
457
/// Creates a new [`MaterialBindGroupAllocator`] managing the data for a
458
/// single material.
459
pub fn new(
460
render_device: &RenderDevice,
461
label: &'static str,
462
bindless_descriptor: Option<BindlessDescriptor>,
463
bind_group_layout: BindGroupLayoutDescriptor,
464
slab_capacity: Option<BindlessSlabResourceLimit>,
465
) -> MaterialBindGroupAllocator {
466
if let Some(bindless_descriptor) = bindless_descriptor {
467
MaterialBindGroupAllocator::Bindless(Box::new(MaterialBindGroupBindlessAllocator::new(
468
render_device,
469
label,
470
bindless_descriptor,
471
bind_group_layout,
472
slab_capacity,
473
)))
474
} else {
475
MaterialBindGroupAllocator::NonBindless(Box::new(
476
MaterialBindGroupNonBindlessAllocator::new(label),
477
))
478
}
479
}
480
481
/// Returns the slab with the given index, if one exists.
482
pub fn get(&self, group: MaterialBindGroupIndex) -> Option<MaterialSlab<'_>> {
483
match *self {
484
MaterialBindGroupAllocator::Bindless(ref bindless_allocator) => bindless_allocator
485
.get(group)
486
.map(|bindless_slab| MaterialSlab(MaterialSlabImpl::Bindless(bindless_slab))),
487
MaterialBindGroupAllocator::NonBindless(ref non_bindless_allocator) => {
488
non_bindless_allocator.get(group).map(|non_bindless_slab| {
489
MaterialSlab(MaterialSlabImpl::NonBindless(non_bindless_slab))
490
})
491
}
492
}
493
}
494
495
/// Allocates an [`UnpreparedBindGroup`] and returns the resulting binding ID.
496
///
497
/// This method should generally be preferred over
498
/// [`Self::allocate_prepared`], because this method supports both bindless
499
/// and non-bindless bind groups. Only use [`Self::allocate_prepared`] if
500
/// you need to prepare the bind group yourself.
501
pub fn allocate_unprepared(
502
&mut self,
503
unprepared_bind_group: UnpreparedBindGroup,
504
bind_group_layout: &BindGroupLayoutDescriptor,
505
) -> MaterialBindingId {
506
match *self {
507
MaterialBindGroupAllocator::Bindless(
508
ref mut material_bind_group_bindless_allocator,
509
) => material_bind_group_bindless_allocator.allocate_unprepared(unprepared_bind_group),
510
MaterialBindGroupAllocator::NonBindless(
511
ref mut material_bind_group_non_bindless_allocator,
512
) => material_bind_group_non_bindless_allocator
513
.allocate_unprepared(unprepared_bind_group, (*bind_group_layout).clone()),
514
}
515
}
516
517
/// Places a pre-prepared bind group into a slab.
518
///
519
/// For bindless materials, the allocator internally manages the bind
520
/// groups, so calling this method will panic if this is a bindless
521
/// allocator. Only non-bindless allocators support this method.
522
///
523
/// It's generally preferred to use [`Self::allocate_unprepared`], because
524
/// that method supports both bindless and non-bindless allocators. Only use
525
/// this method if you need to prepare the bind group yourself.
526
pub fn allocate_prepared(
527
&mut self,
528
prepared_bind_group: PreparedBindGroup,
529
) -> MaterialBindingId {
530
match *self {
531
MaterialBindGroupAllocator::Bindless(_) => {
532
panic!(
533
"Bindless resources are incompatible with implementing `as_bind_group` \
534
directly; implement `unprepared_bind_group` instead or disable bindless"
535
)
536
}
537
MaterialBindGroupAllocator::NonBindless(ref mut non_bindless_allocator) => {
538
non_bindless_allocator.allocate_prepared(prepared_bind_group)
539
}
540
}
541
}
542
543
/// Deallocates the material with the given binding ID.
544
///
545
/// Any resources that are no longer referenced are removed from the slab.
546
pub fn free(&mut self, material_binding_id: MaterialBindingId) {
547
match *self {
548
MaterialBindGroupAllocator::Bindless(
549
ref mut material_bind_group_bindless_allocator,
550
) => material_bind_group_bindless_allocator.free(material_binding_id),
551
MaterialBindGroupAllocator::NonBindless(
552
ref mut material_bind_group_non_bindless_allocator,
553
) => material_bind_group_non_bindless_allocator.free(material_binding_id),
554
}
555
}
556
557
/// Recreates any bind groups corresponding to slabs that have been modified
558
/// since last calling [`MaterialBindGroupAllocator::prepare_bind_groups`].
559
pub fn prepare_bind_groups(
560
&mut self,
561
render_device: &RenderDevice,
562
pipeline_cache: &PipelineCache,
563
fallback_bindless_resources: &FallbackBindlessResources,
564
fallback_image: &FallbackImage,
565
) {
566
match *self {
567
MaterialBindGroupAllocator::Bindless(
568
ref mut material_bind_group_bindless_allocator,
569
) => material_bind_group_bindless_allocator.prepare_bind_groups(
570
render_device,
571
pipeline_cache,
572
fallback_bindless_resources,
573
fallback_image,
574
),
575
MaterialBindGroupAllocator::NonBindless(
576
ref mut material_bind_group_non_bindless_allocator,
577
) => material_bind_group_non_bindless_allocator
578
.prepare_bind_groups(render_device, pipeline_cache),
579
}
580
}
581
582
/// Uploads the contents of all buffers that this
583
/// [`MaterialBindGroupAllocator`] manages to the GPU.
584
///
585
/// Non-bindless allocators don't currently manage any buffers, so this
586
/// method only has an effect for bindless allocators.
587
pub fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
588
match *self {
589
MaterialBindGroupAllocator::Bindless(
590
ref mut material_bind_group_bindless_allocator,
591
) => material_bind_group_bindless_allocator.write_buffers(render_device, render_queue),
592
MaterialBindGroupAllocator::NonBindless(_) => {
593
// Not applicable.
594
}
595
}
596
}
597
598
/// Get number of allocated slabs for bindless material, returns 0 if it is
599
/// [`Self::NonBindless`].
600
pub fn slab_count(&self) -> usize {
601
match self {
602
Self::Bindless(bless) => bless.slabs.len(),
603
Self::NonBindless(_) => 0,
604
}
605
}
606
607
/// Get total size of slabs allocated for bindless material, returns 0 if it is
608
/// [`Self::NonBindless`].
609
pub fn slabs_size(&self) -> usize {
610
match self {
611
Self::Bindless(bless) => bless
612
.slabs
613
.iter()
614
.flat_map(|slab| {
615
slab.data_buffers
616
.iter()
617
.map(|(_, buffer)| buffer.buffer.len())
618
})
619
.sum(),
620
Self::NonBindless(_) => 0,
621
}
622
}
623
624
/// Get number of bindless material allocations in slabs, returns 0 if it is
625
/// [`Self::NonBindless`].
626
pub fn allocations(&self) -> u64 {
627
match self {
628
Self::Bindless(bless) => bless
629
.slabs
630
.iter()
631
.map(|slab| u64::from(slab.allocated_resource_count))
632
.sum(),
633
Self::NonBindless(_) => 0,
634
}
635
}
636
}
637
638
impl MaterialBindlessIndexTable {
639
/// Creates a new [`MaterialBindlessIndexTable`] for a single slab.
640
fn new(
641
bindless_index_table_descriptor: &BindlessIndexTableDescriptor,
642
) -> MaterialBindlessIndexTable {
643
// Preallocate space for one bindings table, so that there will always be a buffer.
644
let mut buffer = RetainedRawBufferVec::new(BufferUsages::STORAGE);
645
for _ in *bindless_index_table_descriptor.indices.start
646
..*bindless_index_table_descriptor.indices.end
647
{
648
buffer.push(0);
649
}
650
651
MaterialBindlessIndexTable {
652
buffer,
653
index_range: bindless_index_table_descriptor.indices.clone(),
654
binding_number: bindless_index_table_descriptor.binding_number,
655
}
656
}
657
658
/// Returns the bindings in the binding index table.
659
///
660
/// If the current [`MaterialBindlessIndexTable::index_range`] is M..N, then
661
/// element *i* of the returned binding index table contains the slot of the
662
/// bindless resource with bindless index *i* + M.
663
fn get(&self, slot: MaterialBindGroupSlot) -> &[u32] {
664
let struct_size = *self.index_range.end as usize - *self.index_range.start as usize;
665
let start = struct_size * slot.0 as usize;
666
&self.buffer.values()[start..(start + struct_size)]
667
}
668
669
/// Returns a single binding from the binding index table.
670
fn get_binding(
671
&self,
672
slot: MaterialBindGroupSlot,
673
bindless_index: BindlessIndex,
674
) -> Option<u32> {
675
if bindless_index < self.index_range.start || bindless_index >= self.index_range.end {
676
return None;
677
}
678
self.get(slot)
679
.get((*bindless_index - *self.index_range.start) as usize)
680
.copied()
681
}
682
683
fn table_length(&self) -> u32 {
684
self.index_range.end.0 - self.index_range.start.0
685
}
686
687
/// Updates the binding index table for a single material.
688
///
689
/// The `allocated_resource_slots` map contains a mapping from the
690
/// [`BindlessIndex`] of each resource that the material references to the
691
/// slot that that resource occupies in the appropriate binding array. This
692
/// method serializes that map into a binding index table that the shader
693
/// can read.
694
fn set(
695
&mut self,
696
slot: MaterialBindGroupSlot,
697
allocated_resource_slots: &HashMap<BindlessIndex, u32>,
698
) {
699
let table_len = self.table_length() as usize;
700
let range = (slot.0 as usize * table_len)..((slot.0 as usize + 1) * table_len);
701
while self.buffer.len() < range.end {
702
self.buffer.push(0);
703
}
704
705
for (&bindless_index, &resource_slot) in allocated_resource_slots {
706
if self.index_range.contains(&bindless_index) {
707
self.buffer.set(
708
*bindless_index + range.start as u32 - *self.index_range.start,
709
resource_slot,
710
);
711
}
712
}
713
714
// Mark the buffer as needing to be recreated, in case we grew it.
715
self.buffer.dirty = BufferDirtyState::NeedsReserve;
716
}
717
718
/// Returns the [`BindGroupEntry`] for the index table itself.
719
fn bind_group_entry(&self) -> BindGroupEntry<'_> {
720
BindGroupEntry {
721
binding: *self.binding_number,
722
resource: self
723
.buffer
724
.buffer()
725
.expect("Bindings buffer must exist")
726
.as_entire_binding(),
727
}
728
}
729
}
730
731
impl<T> RetainedRawBufferVec<T>
732
where
733
T: Pod,
734
{
735
/// Creates a new empty [`RetainedRawBufferVec`] supporting the given
736
/// [`BufferUsages`].
737
fn new(buffer_usages: BufferUsages) -> RetainedRawBufferVec<T> {
738
RetainedRawBufferVec {
739
buffer: RawBufferVec::new(buffer_usages),
740
dirty: BufferDirtyState::NeedsUpload,
741
}
742
}
743
744
/// Recreates the GPU backing buffer if needed.
745
fn prepare(&mut self, render_device: &RenderDevice) {
746
match self.dirty {
747
BufferDirtyState::Clean | BufferDirtyState::NeedsUpload => {}
748
BufferDirtyState::NeedsReserve => {
749
let capacity = self.buffer.len();
750
self.buffer.reserve(capacity, render_device);
751
self.dirty = BufferDirtyState::NeedsUpload;
752
}
753
}
754
}
755
756
/// Writes the current contents of the buffer to the GPU if necessary.
757
fn write(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
758
match self.dirty {
759
BufferDirtyState::Clean => {}
760
BufferDirtyState::NeedsReserve | BufferDirtyState::NeedsUpload => {
761
self.buffer.write_buffer(render_device, render_queue);
762
self.dirty = BufferDirtyState::Clean;
763
}
764
}
765
}
766
}
767
768
impl MaterialBindGroupBindlessAllocator {
769
/// Creates a new [`MaterialBindGroupBindlessAllocator`] managing the data
770
/// for a single bindless material.
771
fn new(
772
render_device: &RenderDevice,
773
label: &'static str,
774
bindless_descriptor: BindlessDescriptor,
775
bind_group_layout: BindGroupLayoutDescriptor,
776
slab_capacity: Option<BindlessSlabResourceLimit>,
777
) -> MaterialBindGroupBindlessAllocator {
778
let fallback_buffers = bindless_descriptor
779
.buffers
780
.iter()
781
.map(|bindless_buffer_descriptor| {
782
(
783
bindless_buffer_descriptor.bindless_index,
784
render_device.create_buffer(&BufferDescriptor {
785
label: Some("bindless fallback buffer"),
786
size: match bindless_buffer_descriptor.size {
787
Some(size) => size as u64,
788
None => DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE,
789
},
790
usage: BufferUsages::STORAGE,
791
mapped_at_creation: false,
792
}),
793
)
794
})
795
.collect();
796
797
MaterialBindGroupBindlessAllocator {
798
label,
799
slabs: vec![],
800
bind_group_layout,
801
bindless_descriptor,
802
fallback_buffers,
803
slab_capacity: slab_capacity
804
.expect("Non-bindless materials should use the non-bindless allocator")
805
.resolve(),
806
}
807
}
808
809
/// Allocates the resources for a single material into a slab and returns
810
/// the resulting ID.
811
///
812
/// The returned [`MaterialBindingId`] can later be used to fetch the slab
813
/// that was used.
814
///
815
/// This function can't fail. If all slabs are full, then a new slab is
816
/// created, and the material is allocated into it.
817
fn allocate_unprepared(
818
&mut self,
819
mut unprepared_bind_group: UnpreparedBindGroup,
820
) -> MaterialBindingId {
821
for (slab_index, slab) in self.slabs.iter_mut().enumerate() {
822
trace!("Trying to allocate in slab {}", slab_index);
823
match slab.try_allocate(unprepared_bind_group, self.slab_capacity) {
824
Ok(slot) => {
825
return MaterialBindingId {
826
group: MaterialBindGroupIndex(slab_index as u32),
827
slot,
828
};
829
}
830
Err(bind_group) => unprepared_bind_group = bind_group,
831
}
832
}
833
834
let group = MaterialBindGroupIndex(self.slabs.len() as u32);
835
self.slabs
836
.push(MaterialBindlessSlab::new(&self.bindless_descriptor));
837
838
// Allocate into the newly-pushed slab.
839
let Ok(slot) = self
840
.slabs
841
.last_mut()
842
.expect("We just pushed a slab")
843
.try_allocate(unprepared_bind_group, self.slab_capacity)
844
else {
845
panic!("An allocation into an empty slab should always succeed")
846
};
847
848
MaterialBindingId { group, slot }
849
}
850
851
/// Deallocates the material with the given binding ID.
852
///
853
/// Any resources that are no longer referenced are removed from the slab.
854
fn free(&mut self, material_binding_id: MaterialBindingId) {
855
self.slabs
856
.get_mut(material_binding_id.group.0 as usize)
857
.expect("Slab should exist")
858
.free(material_binding_id.slot, &self.bindless_descriptor);
859
}
860
861
/// Returns the slab with the given bind group index.
862
///
863
/// A [`MaterialBindGroupIndex`] can be fetched from a
864
/// [`MaterialBindingId`].
865
fn get(&self, group: MaterialBindGroupIndex) -> Option<&MaterialBindlessSlab> {
866
self.slabs.get(group.0 as usize)
867
}
868
869
/// Recreates any bind groups corresponding to slabs that have been modified
870
/// since last calling
871
/// [`MaterialBindGroupBindlessAllocator::prepare_bind_groups`].
872
fn prepare_bind_groups(
873
&mut self,
874
render_device: &RenderDevice,
875
pipeline_cache: &PipelineCache,
876
fallback_bindless_resources: &FallbackBindlessResources,
877
fallback_image: &FallbackImage,
878
) {
879
for slab in &mut self.slabs {
880
slab.prepare(
881
render_device,
882
pipeline_cache,
883
self.label,
884
&self.bind_group_layout,
885
fallback_bindless_resources,
886
&self.fallback_buffers,
887
fallback_image,
888
&self.bindless_descriptor,
889
self.slab_capacity,
890
);
891
}
892
}
893
894
/// Writes any buffers that we're managing to the GPU.
895
///
896
/// Currently, this only consists of the bindless index tables.
897
fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
898
for slab in &mut self.slabs {
899
slab.write_buffer(render_device, render_queue);
900
}
901
}
902
}
903
904
impl MaterialBindlessSlab {
905
/// Attempts to allocate the given unprepared bind group in this slab.
906
///
907
/// If the allocation succeeds, this method returns the slot that the
908
/// allocation was placed in. If the allocation fails because the slab was
909
/// full, this method returns the unprepared bind group back to the caller
910
/// so that it can try to allocate again.
911
fn try_allocate(
912
&mut self,
913
unprepared_bind_group: UnpreparedBindGroup,
914
slot_capacity: u32,
915
) -> Result<MaterialBindGroupSlot, UnpreparedBindGroup> {
916
// Locate pre-existing resources, and determine how many free slots we need.
917
let Some(allocation_candidate) = self.check_allocation(&unprepared_bind_group) else {
918
return Err(unprepared_bind_group);
919
};
920
921
// Check to see if we have enough free space.
922
//
923
// As a special case, note that if *nothing* is allocated in this slab,
924
// then we always allow a material to be placed in it, regardless of the
925
// number of bindings the material has. This is so that, if the
926
// platform's maximum bindless count is set too low to hold even a
927
// single material, we can still place each material into a separate
928
// slab instead of failing outright.
929
if self.allocated_resource_count > 0
930
&& self.allocated_resource_count + allocation_candidate.needed_free_slots
931
> slot_capacity
932
{
933
trace!("Slab is full, can't allocate");
934
return Err(unprepared_bind_group);
935
}
936
937
// OK, we can allocate in this slab. Assign a slot ID.
938
let slot = self
939
.free_slots
940
.pop()
941
.unwrap_or(MaterialBindGroupSlot(self.live_allocation_count));
942
943
// Bump the live allocation count.
944
self.live_allocation_count += 1;
945
946
// Insert the resources into the binding arrays.
947
let allocated_resource_slots =
948
self.insert_resources(unprepared_bind_group.bindings, allocation_candidate);
949
950
// Serialize the allocated resource slots.
951
for bindless_index_table in &mut self.bindless_index_tables {
952
bindless_index_table.set(slot, &allocated_resource_slots);
953
}
954
955
// Invalidate the cached bind group.
956
self.bind_group = None;
957
958
Ok(slot)
959
}
960
961
/// Gathers the information needed to determine whether the given unprepared
962
/// bind group can be allocated in this slab.
963
fn check_allocation(
964
&self,
965
unprepared_bind_group: &UnpreparedBindGroup,
966
) -> Option<BindlessAllocationCandidate> {
967
let mut allocation_candidate = BindlessAllocationCandidate {
968
pre_existing_resources: HashMap::default(),
969
needed_free_slots: 0,
970
};
971
972
for &(bindless_index, ref owned_binding_resource) in unprepared_bind_group.bindings.iter() {
973
let bindless_index = BindlessIndex(bindless_index);
974
match *owned_binding_resource {
975
OwnedBindingResource::Buffer(ref buffer) => {
976
let Some(binding_array) = self.buffers.get(&bindless_index) else {
977
error!(
978
"Binding array wasn't present for buffer at index {:?}",
979
bindless_index
980
);
981
return None;
982
};
983
match binding_array.find(BindingResourceId::Buffer(buffer.id())) {
984
Some(slot) => {
985
allocation_candidate
986
.pre_existing_resources
987
.insert(bindless_index, slot);
988
}
989
None => allocation_candidate.needed_free_slots += 1,
990
}
991
}
992
993
OwnedBindingResource::Data(_) => {
994
// The size of a data buffer is unlimited.
995
}
996
997
OwnedBindingResource::TextureView(texture_view_dimension, ref texture_view) => {
998
let bindless_resource_type = BindlessResourceType::from(texture_view_dimension);
999
match self
1000
.textures
1001
.get(&bindless_resource_type)
1002
.expect("Missing binding array for texture")
1003
.find(BindingResourceId::TextureView(
1004
texture_view_dimension,
1005
texture_view.id(),
1006
)) {
1007
Some(slot) => {
1008
allocation_candidate
1009
.pre_existing_resources
1010
.insert(bindless_index, slot);
1011
}
1012
None => {
1013
allocation_candidate.needed_free_slots += 1;
1014
}
1015
}
1016
}
1017
1018
OwnedBindingResource::Sampler(sampler_binding_type, ref sampler) => {
1019
let bindless_resource_type = BindlessResourceType::from(sampler_binding_type);
1020
match self
1021
.samplers
1022
.get(&bindless_resource_type)
1023
.expect("Missing binding array for sampler")
1024
.find(BindingResourceId::Sampler(sampler.id()))
1025
{
1026
Some(slot) => {
1027
allocation_candidate
1028
.pre_existing_resources
1029
.insert(bindless_index, slot);
1030
}
1031
None => {
1032
allocation_candidate.needed_free_slots += 1;
1033
}
1034
}
1035
}
1036
}
1037
}
1038
1039
Some(allocation_candidate)
1040
}
1041
1042
/// Inserts the given [`BindingResources`] into this slab.
1043
///
1044
/// Returns a table that maps the bindless index of each resource to its
1045
/// slot in its binding array.
1046
fn insert_resources(
1047
&mut self,
1048
mut binding_resources: BindingResources,
1049
allocation_candidate: BindlessAllocationCandidate,
1050
) -> HashMap<BindlessIndex, u32> {
1051
let mut allocated_resource_slots = HashMap::default();
1052
1053
for (bindless_index, owned_binding_resource) in binding_resources.drain(..) {
1054
let bindless_index = BindlessIndex(bindless_index);
1055
1056
let pre_existing_slot = allocation_candidate
1057
.pre_existing_resources
1058
.get(&bindless_index);
1059
1060
// Otherwise, we need to insert it anew.
1061
let binding_resource_id = BindingResourceId::from(&owned_binding_resource);
1062
let increment_allocated_resource_count = match owned_binding_resource {
1063
OwnedBindingResource::Buffer(buffer) => {
1064
let slot = self
1065
.buffers
1066
.get_mut(&bindless_index)
1067
.expect("Buffer binding array should exist")
1068
.insert(binding_resource_id, buffer);
1069
allocated_resource_slots.insert(bindless_index, slot);
1070
1071
if let Some(pre_existing_slot) = pre_existing_slot {
1072
assert_eq!(*pre_existing_slot, slot);
1073
1074
false
1075
} else {
1076
true
1077
}
1078
}
1079
OwnedBindingResource::Data(data) => {
1080
if pre_existing_slot.is_some() {
1081
panic!("Data buffers can't be deduplicated")
1082
}
1083
1084
let slot = self
1085
.data_buffers
1086
.get_mut(&bindless_index)
1087
.expect("Data buffer binding array should exist")
1088
.insert(&data);
1089
allocated_resource_slots.insert(bindless_index, slot);
1090
false
1091
}
1092
OwnedBindingResource::TextureView(texture_view_dimension, texture_view) => {
1093
let bindless_resource_type = BindlessResourceType::from(texture_view_dimension);
1094
let slot = self
1095
.textures
1096
.get_mut(&bindless_resource_type)
1097
.expect("Texture array should exist")
1098
.insert(binding_resource_id, texture_view);
1099
allocated_resource_slots.insert(bindless_index, slot);
1100
1101
if let Some(pre_existing_slot) = pre_existing_slot {
1102
assert_eq!(*pre_existing_slot, slot);
1103
1104
false
1105
} else {
1106
true
1107
}
1108
}
1109
OwnedBindingResource::Sampler(sampler_binding_type, sampler) => {
1110
let bindless_resource_type = BindlessResourceType::from(sampler_binding_type);
1111
let slot = self
1112
.samplers
1113
.get_mut(&bindless_resource_type)
1114
.expect("Sampler should exist")
1115
.insert(binding_resource_id, sampler);
1116
allocated_resource_slots.insert(bindless_index, slot);
1117
1118
if let Some(pre_existing_slot) = pre_existing_slot {
1119
assert_eq!(*pre_existing_slot, slot);
1120
1121
false
1122
} else {
1123
true
1124
}
1125
}
1126
};
1127
1128
// Bump the allocated resource count.
1129
if increment_allocated_resource_count {
1130
self.allocated_resource_count += 1;
1131
}
1132
}
1133
1134
allocated_resource_slots
1135
}
1136
1137
/// Removes the material allocated in the given slot, with the given
1138
/// descriptor, from this slab.
1139
fn free(&mut self, slot: MaterialBindGroupSlot, bindless_descriptor: &BindlessDescriptor) {
1140
// Loop through each binding.
1141
for (bindless_index, bindless_resource_type) in
1142
bindless_descriptor.resources.iter().enumerate()
1143
{
1144
let bindless_index = BindlessIndex::from(bindless_index as u32);
1145
let Some(bindless_index_table) = self.get_bindless_index_table(bindless_index) else {
1146
continue;
1147
};
1148
let Some(bindless_binding) = bindless_index_table.get_binding(slot, bindless_index)
1149
else {
1150
continue;
1151
};
1152
1153
// Free the binding. If the resource in question was anything other
1154
// than a data buffer, then it has a reference count and
1155
// consequently we need to decrement it.
1156
let decrement_allocated_resource_count = match *bindless_resource_type {
1157
BindlessResourceType::None => false,
1158
BindlessResourceType::Buffer => self
1159
.buffers
1160
.get_mut(&bindless_index)
1161
.expect("Buffer should exist with that bindless index")
1162
.remove(bindless_binding),
1163
BindlessResourceType::DataBuffer => {
1164
self.data_buffers
1165
.get_mut(&bindless_index)
1166
.expect("Data buffer should exist with that bindless index")
1167
.remove(bindless_binding);
1168
false
1169
}
1170
BindlessResourceType::SamplerFiltering
1171
| BindlessResourceType::SamplerNonFiltering
1172
| BindlessResourceType::SamplerComparison => self
1173
.samplers
1174
.get_mut(bindless_resource_type)
1175
.expect("Sampler array should exist")
1176
.remove(bindless_binding),
1177
BindlessResourceType::Texture1d
1178
| BindlessResourceType::Texture2d
1179
| BindlessResourceType::Texture2dArray
1180
| BindlessResourceType::Texture3d
1181
| BindlessResourceType::TextureCube
1182
| BindlessResourceType::TextureCubeArray => self
1183
.textures
1184
.get_mut(bindless_resource_type)
1185
.expect("Texture array should exist")
1186
.remove(bindless_binding),
1187
};
1188
1189
// If the slot is now free, decrement the allocated resource
1190
// count.
1191
if decrement_allocated_resource_count {
1192
self.allocated_resource_count -= 1;
1193
}
1194
}
1195
1196
// Invalidate the cached bind group.
1197
self.bind_group = None;
1198
1199
// Release the slot ID.
1200
self.free_slots.push(slot);
1201
self.live_allocation_count -= 1;
1202
}
1203
1204
/// Recreates the bind group and bindless index table buffer if necessary.
1205
fn prepare(
1206
&mut self,
1207
render_device: &RenderDevice,
1208
pipeline_cache: &PipelineCache,
1209
label: &'static str,
1210
bind_group_layout: &BindGroupLayoutDescriptor,
1211
fallback_bindless_resources: &FallbackBindlessResources,
1212
fallback_buffers: &HashMap<BindlessIndex, Buffer>,
1213
fallback_image: &FallbackImage,
1214
bindless_descriptor: &BindlessDescriptor,
1215
slab_capacity: u32,
1216
) {
1217
// Create the bindless index table buffers if needed.
1218
for bindless_index_table in &mut self.bindless_index_tables {
1219
bindless_index_table.buffer.prepare(render_device);
1220
}
1221
1222
// Create any data buffers we were managing if necessary.
1223
for data_buffer in self.data_buffers.values_mut() {
1224
data_buffer.buffer.prepare(render_device);
1225
}
1226
1227
// Create the bind group if needed.
1228
self.prepare_bind_group(
1229
render_device,
1230
pipeline_cache,
1231
label,
1232
bind_group_layout,
1233
fallback_bindless_resources,
1234
fallback_buffers,
1235
fallback_image,
1236
bindless_descriptor,
1237
slab_capacity,
1238
);
1239
}
1240
1241
/// Recreates the bind group if this slab has been changed since the last
1242
/// time we created it.
1243
fn prepare_bind_group(
1244
&mut self,
1245
render_device: &RenderDevice,
1246
pipeline_cache: &PipelineCache,
1247
label: &'static str,
1248
bind_group_layout: &BindGroupLayoutDescriptor,
1249
fallback_bindless_resources: &FallbackBindlessResources,
1250
fallback_buffers: &HashMap<BindlessIndex, Buffer>,
1251
fallback_image: &FallbackImage,
1252
bindless_descriptor: &BindlessDescriptor,
1253
slab_capacity: u32,
1254
) {
1255
// If the bind group is clean, then do nothing.
1256
if self.bind_group.is_some() {
1257
return;
1258
}
1259
1260
// Determine whether we need to pad out our binding arrays with dummy
1261
// resources.
1262
let required_binding_array_size = if render_device
1263
.features()
1264
.contains(WgpuFeatures::PARTIALLY_BOUND_BINDING_ARRAY)
1265
{
1266
None
1267
} else {
1268
Some(slab_capacity)
1269
};
1270
1271
let binding_resource_arrays = self.create_binding_resource_arrays(
1272
fallback_bindless_resources,
1273
fallback_buffers,
1274
fallback_image,
1275
bindless_descriptor,
1276
required_binding_array_size,
1277
);
1278
1279
let mut bind_group_entries: Vec<_> = self
1280
.bindless_index_tables
1281
.iter()
1282
.map(|bindless_index_table| bindless_index_table.bind_group_entry())
1283
.collect();
1284
1285
for &(&binding, ref binding_resource_array) in binding_resource_arrays.iter() {
1286
bind_group_entries.push(BindGroupEntry {
1287
binding,
1288
resource: match *binding_resource_array {
1289
BindingResourceArray::Buffers(ref buffer_bindings) => {
1290
BindingResource::BufferArray(&buffer_bindings[..])
1291
}
1292
BindingResourceArray::TextureViews(ref texture_views) => {
1293
BindingResource::TextureViewArray(&texture_views[..])
1294
}
1295
BindingResourceArray::Samplers(ref samplers) => {
1296
BindingResource::SamplerArray(&samplers[..])
1297
}
1298
},
1299
});
1300
}
1301
1302
// Create bind group entries for any data buffers we're managing.
1303
for data_buffer in self.data_buffers.values() {
1304
bind_group_entries.push(BindGroupEntry {
1305
binding: *data_buffer.binding_number,
1306
resource: data_buffer
1307
.buffer
1308
.buffer()
1309
.expect("Backing data buffer must have been uploaded by now")
1310
.as_entire_binding(),
1311
});
1312
}
1313
1314
self.bind_group = Some(render_device.create_bind_group(
1315
Some(label),
1316
&pipeline_cache.get_bind_group_layout(bind_group_layout),
1317
&bind_group_entries,
1318
));
1319
}
1320
1321
/// Writes any buffers that we're managing to the GPU.
1322
///
1323
/// Currently, this consists of the bindless index table plus any data
1324
/// buffers we're managing.
1325
fn write_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
1326
for bindless_index_table in &mut self.bindless_index_tables {
1327
bindless_index_table
1328
.buffer
1329
.write(render_device, render_queue);
1330
}
1331
1332
for data_buffer in self.data_buffers.values_mut() {
1333
data_buffer.buffer.write(render_device, render_queue);
1334
}
1335
}
1336
1337
/// Converts our binding arrays into binding resource arrays suitable for
1338
/// passing to `wgpu`.
1339
fn create_binding_resource_arrays<'a>(
1340
&'a self,
1341
fallback_bindless_resources: &'a FallbackBindlessResources,
1342
fallback_buffers: &'a HashMap<BindlessIndex, Buffer>,
1343
fallback_image: &'a FallbackImage,
1344
bindless_descriptor: &'a BindlessDescriptor,
1345
required_binding_array_size: Option<u32>,
1346
) -> Vec<(&'a u32, BindingResourceArray<'a>)> {
1347
let mut binding_resource_arrays = vec![];
1348
1349
// Build sampler bindings.
1350
self.create_sampler_binding_resource_arrays(
1351
&mut binding_resource_arrays,
1352
fallback_bindless_resources,
1353
required_binding_array_size,
1354
);
1355
1356
// Build texture bindings.
1357
self.create_texture_binding_resource_arrays(
1358
&mut binding_resource_arrays,
1359
fallback_image,
1360
required_binding_array_size,
1361
);
1362
1363
// Build buffer bindings.
1364
self.create_buffer_binding_resource_arrays(
1365
&mut binding_resource_arrays,
1366
fallback_buffers,
1367
bindless_descriptor,
1368
required_binding_array_size,
1369
);
1370
1371
binding_resource_arrays
1372
}
1373
1374
/// Accumulates sampler binding arrays into binding resource arrays suitable
1375
/// for passing to `wgpu`.
1376
fn create_sampler_binding_resource_arrays<'a, 'b>(
1377
&'a self,
1378
binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>,
1379
fallback_bindless_resources: &'a FallbackBindlessResources,
1380
required_binding_array_size: Option<u32>,
1381
) {
1382
// We have one binding resource array per sampler type.
1383
for (bindless_resource_type, fallback_sampler) in [
1384
(
1385
BindlessResourceType::SamplerFiltering,
1386
&fallback_bindless_resources.filtering_sampler,
1387
),
1388
(
1389
BindlessResourceType::SamplerNonFiltering,
1390
&fallback_bindless_resources.non_filtering_sampler,
1391
),
1392
(
1393
BindlessResourceType::SamplerComparison,
1394
&fallback_bindless_resources.comparison_sampler,
1395
),
1396
] {
1397
let mut sampler_bindings = vec![];
1398
1399
match self.samplers.get(&bindless_resource_type) {
1400
Some(sampler_bindless_binding_array) => {
1401
for maybe_bindless_binding in sampler_bindless_binding_array.bindings.iter() {
1402
match *maybe_bindless_binding {
1403
Some(ref bindless_binding) => {
1404
sampler_bindings.push(&*bindless_binding.resource);
1405
}
1406
None => sampler_bindings.push(&**fallback_sampler),
1407
}
1408
}
1409
}
1410
1411
None => {
1412
// Fill with a single fallback sampler.
1413
sampler_bindings.push(&**fallback_sampler);
1414
}
1415
}
1416
1417
if let Some(required_binding_array_size) = required_binding_array_size {
1418
sampler_bindings.extend(iter::repeat_n(
1419
&**fallback_sampler,
1420
required_binding_array_size as usize - sampler_bindings.len(),
1421
));
1422
}
1423
1424
let binding_number = bindless_resource_type
1425
.binding_number()
1426
.expect("Sampler bindless resource type must have a binding number");
1427
1428
binding_resource_arrays.push((
1429
&**binding_number,
1430
BindingResourceArray::Samplers(sampler_bindings),
1431
));
1432
}
1433
}
1434
1435
/// Accumulates texture binding arrays into binding resource arrays suitable
1436
/// for passing to `wgpu`.
1437
fn create_texture_binding_resource_arrays<'a, 'b>(
1438
&'a self,
1439
binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>,
1440
fallback_image: &'a FallbackImage,
1441
required_binding_array_size: Option<u32>,
1442
) {
1443
for (bindless_resource_type, fallback_image) in [
1444
(BindlessResourceType::Texture1d, &fallback_image.d1),
1445
(BindlessResourceType::Texture2d, &fallback_image.d2),
1446
(
1447
BindlessResourceType::Texture2dArray,
1448
&fallback_image.d2_array,
1449
),
1450
(BindlessResourceType::Texture3d, &fallback_image.d3),
1451
(BindlessResourceType::TextureCube, &fallback_image.cube),
1452
(
1453
BindlessResourceType::TextureCubeArray,
1454
&fallback_image.cube_array,
1455
),
1456
] {
1457
let mut texture_bindings = vec![];
1458
1459
let binding_number = bindless_resource_type
1460
.binding_number()
1461
.expect("Texture bindless resource type must have a binding number");
1462
1463
match self.textures.get(&bindless_resource_type) {
1464
Some(texture_bindless_binding_array) => {
1465
for maybe_bindless_binding in texture_bindless_binding_array.bindings.iter() {
1466
match *maybe_bindless_binding {
1467
Some(ref bindless_binding) => {
1468
texture_bindings.push(&*bindless_binding.resource);
1469
}
1470
None => texture_bindings.push(&*fallback_image.texture_view),
1471
}
1472
}
1473
}
1474
1475
None => {
1476
// Fill with a single fallback image.
1477
texture_bindings.push(&*fallback_image.texture_view);
1478
}
1479
}
1480
1481
if let Some(required_binding_array_size) = required_binding_array_size {
1482
texture_bindings.extend(iter::repeat_n(
1483
&*fallback_image.texture_view,
1484
required_binding_array_size as usize - texture_bindings.len(),
1485
));
1486
}
1487
1488
binding_resource_arrays.push((
1489
binding_number,
1490
BindingResourceArray::TextureViews(texture_bindings),
1491
));
1492
}
1493
}
1494
1495
/// Accumulates buffer binding arrays into binding resource arrays suitable
1496
/// for `wgpu`.
1497
fn create_buffer_binding_resource_arrays<'a, 'b>(
1498
&'a self,
1499
binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>,
1500
fallback_buffers: &'a HashMap<BindlessIndex, Buffer>,
1501
bindless_descriptor: &'a BindlessDescriptor,
1502
required_binding_array_size: Option<u32>,
1503
) {
1504
for bindless_buffer_descriptor in bindless_descriptor.buffers.iter() {
1505
let Some(buffer_bindless_binding_array) =
1506
self.buffers.get(&bindless_buffer_descriptor.bindless_index)
1507
else {
1508
// This is OK, because index buffers are present in
1509
// `BindlessDescriptor::buffers` but not in
1510
// `BindlessDescriptor::resources`.
1511
continue;
1512
};
1513
1514
let fallback_buffer = fallback_buffers
1515
.get(&bindless_buffer_descriptor.bindless_index)
1516
.expect("Fallback buffer should exist");
1517
1518
let mut buffer_bindings: Vec<_> = buffer_bindless_binding_array
1519
.bindings
1520
.iter()
1521
.map(|maybe_bindless_binding| {
1522
let buffer = match *maybe_bindless_binding {
1523
None => fallback_buffer,
1524
Some(ref bindless_binding) => &bindless_binding.resource,
1525
};
1526
BufferBinding {
1527
buffer,
1528
offset: 0,
1529
size: None,
1530
}
1531
})
1532
.collect();
1533
1534
if let Some(required_binding_array_size) = required_binding_array_size {
1535
buffer_bindings.extend(iter::repeat_n(
1536
BufferBinding {
1537
buffer: fallback_buffer,
1538
offset: 0,
1539
size: None,
1540
},
1541
required_binding_array_size as usize - buffer_bindings.len(),
1542
));
1543
}
1544
1545
binding_resource_arrays.push((
1546
&*buffer_bindless_binding_array.binding_number,
1547
BindingResourceArray::Buffers(buffer_bindings),
1548
));
1549
}
1550
}
1551
1552
/// Returns the [`BindGroup`] corresponding to this slab, if it's been
1553
/// prepared.
1554
fn bind_group(&self) -> Option<&BindGroup> {
1555
self.bind_group.as_ref()
1556
}
1557
1558
/// Returns the bindless index table containing the given bindless index.
1559
fn get_bindless_index_table(
1560
&self,
1561
bindless_index: BindlessIndex,
1562
) -> Option<&MaterialBindlessIndexTable> {
1563
let table_index = self
1564
.bindless_index_tables
1565
.binary_search_by(|bindless_index_table| {
1566
if bindless_index < bindless_index_table.index_range.start {
1567
Ordering::Less
1568
} else if bindless_index >= bindless_index_table.index_range.end {
1569
Ordering::Greater
1570
} else {
1571
Ordering::Equal
1572
}
1573
})
1574
.ok()?;
1575
self.bindless_index_tables.get(table_index)
1576
}
1577
}
1578
1579
impl<R> MaterialBindlessBindingArray<R>
1580
where
1581
R: GetBindingResourceId,
1582
{
1583
/// Creates a new [`MaterialBindlessBindingArray`] with the given binding
1584
/// number, managing resources of the given type.
1585
fn new(
1586
binding_number: BindingNumber,
1587
resource_type: BindlessResourceType,
1588
) -> MaterialBindlessBindingArray<R> {
1589
MaterialBindlessBindingArray {
1590
binding_number,
1591
bindings: vec![],
1592
resource_type,
1593
resource_to_slot: HashMap::default(),
1594
free_slots: vec![],
1595
len: 0,
1596
}
1597
}
1598
1599
/// Returns the slot corresponding to the given resource, if that resource
1600
/// is located in this binding array.
1601
///
1602
/// If the resource isn't in this binding array, this method returns `None`.
1603
fn find(&self, binding_resource_id: BindingResourceId) -> Option<u32> {
1604
self.resource_to_slot.get(&binding_resource_id).copied()
1605
}
1606
1607
/// Inserts a bindless resource into a binding array and returns the index
1608
/// of the slot it was inserted into.
1609
fn insert(&mut self, binding_resource_id: BindingResourceId, resource: R) -> u32 {
1610
match self.resource_to_slot.entry(binding_resource_id) {
1611
bevy_platform::collections::hash_map::Entry::Occupied(o) => {
1612
let slot = *o.get();
1613
1614
self.bindings[slot as usize]
1615
.as_mut()
1616
.expect("A slot in the resource_to_slot map should have a value")
1617
.ref_count += 1;
1618
1619
slot
1620
}
1621
bevy_platform::collections::hash_map::Entry::Vacant(v) => {
1622
let slot = self.free_slots.pop().unwrap_or(self.len);
1623
v.insert(slot);
1624
1625
if self.bindings.len() < slot as usize + 1 {
1626
self.bindings.resize_with(slot as usize + 1, || None);
1627
}
1628
self.bindings[slot as usize] = Some(MaterialBindlessBinding::new(resource));
1629
1630
self.len += 1;
1631
slot
1632
}
1633
}
1634
}
1635
1636
/// Removes a reference to an object from the slot.
1637
///
1638
/// If the reference count dropped to 0 and the object was freed, this
1639
/// method returns true. If the object was still referenced after removing
1640
/// it, returns false.
1641
fn remove(&mut self, slot: u32) -> bool {
1642
let maybe_binding = &mut self.bindings[slot as usize];
1643
let binding = maybe_binding
1644
.as_mut()
1645
.expect("Attempted to free an already-freed binding");
1646
1647
binding.ref_count -= 1;
1648
if binding.ref_count != 0 {
1649
return false;
1650
}
1651
1652
let binding_resource_id = binding.resource.binding_resource_id(self.resource_type);
1653
self.resource_to_slot.remove(&binding_resource_id);
1654
1655
*maybe_binding = None;
1656
self.free_slots.push(slot);
1657
self.len -= 1;
1658
true
1659
}
1660
}
1661
1662
impl<R> MaterialBindlessBinding<R>
1663
where
1664
R: GetBindingResourceId,
1665
{
1666
/// Creates a new [`MaterialBindlessBinding`] for a freshly-added resource.
1667
///
1668
/// The reference count is initialized to 1.
1669
fn new(resource: R) -> MaterialBindlessBinding<R> {
1670
MaterialBindlessBinding {
1671
resource,
1672
ref_count: 1,
1673
}
1674
}
1675
}
1676
1677
/// Returns true if the material will *actually* use bindless resources or false
1678
/// if it won't.
1679
///
1680
/// This takes the platform support (or lack thereof) for bindless resources
1681
/// into account.
1682
pub fn material_uses_bindless_resources<M>(render_device: &RenderDevice) -> bool
1683
where
1684
M: Material,
1685
{
1686
M::bindless_slot_count().is_some_and(|bindless_slot_count| {
1687
M::bindless_supported(render_device) && bindless_slot_count.resolve() > 1
1688
})
1689
}
1690
1691
impl MaterialBindlessSlab {
1692
/// Creates a new [`MaterialBindlessSlab`] for a material with the given
1693
/// bindless descriptor.
1694
///
1695
/// We use this when no existing slab could hold a material to be allocated.
1696
fn new(bindless_descriptor: &BindlessDescriptor) -> MaterialBindlessSlab {
1697
let mut buffers = HashMap::default();
1698
let mut samplers = HashMap::default();
1699
let mut textures = HashMap::default();
1700
let mut data_buffers = HashMap::default();
1701
1702
for (bindless_index, bindless_resource_type) in
1703
bindless_descriptor.resources.iter().enumerate()
1704
{
1705
let bindless_index = BindlessIndex(bindless_index as u32);
1706
match *bindless_resource_type {
1707
BindlessResourceType::None => {}
1708
BindlessResourceType::Buffer => {
1709
let binding_number = bindless_descriptor
1710
.buffers
1711
.iter()
1712
.find(|bindless_buffer_descriptor| {
1713
bindless_buffer_descriptor.bindless_index == bindless_index
1714
})
1715
.expect(
1716
"Bindless buffer descriptor matching that bindless index should be \
1717
present",
1718
)
1719
.binding_number;
1720
buffers.insert(
1721
bindless_index,
1722
MaterialBindlessBindingArray::new(binding_number, *bindless_resource_type),
1723
);
1724
}
1725
BindlessResourceType::DataBuffer => {
1726
// Copy the data in.
1727
let buffer_descriptor = bindless_descriptor
1728
.buffers
1729
.iter()
1730
.find(|bindless_buffer_descriptor| {
1731
bindless_buffer_descriptor.bindless_index == bindless_index
1732
})
1733
.expect(
1734
"Bindless buffer descriptor matching that bindless index should be \
1735
present",
1736
);
1737
data_buffers.insert(
1738
bindless_index,
1739
MaterialDataBuffer::new(
1740
buffer_descriptor.binding_number,
1741
buffer_descriptor
1742
.size
1743
.expect("Data buffers should have a size")
1744
as u32,
1745
),
1746
);
1747
}
1748
BindlessResourceType::SamplerFiltering
1749
| BindlessResourceType::SamplerNonFiltering
1750
| BindlessResourceType::SamplerComparison => {
1751
samplers.insert(
1752
*bindless_resource_type,
1753
MaterialBindlessBindingArray::new(
1754
*bindless_resource_type.binding_number().unwrap(),
1755
*bindless_resource_type,
1756
),
1757
);
1758
}
1759
BindlessResourceType::Texture1d
1760
| BindlessResourceType::Texture2d
1761
| BindlessResourceType::Texture2dArray
1762
| BindlessResourceType::Texture3d
1763
| BindlessResourceType::TextureCube
1764
| BindlessResourceType::TextureCubeArray => {
1765
textures.insert(
1766
*bindless_resource_type,
1767
MaterialBindlessBindingArray::new(
1768
*bindless_resource_type.binding_number().unwrap(),
1769
*bindless_resource_type,
1770
),
1771
);
1772
}
1773
}
1774
}
1775
1776
let bindless_index_tables = bindless_descriptor
1777
.index_tables
1778
.iter()
1779
.map(MaterialBindlessIndexTable::new)
1780
.collect();
1781
1782
MaterialBindlessSlab {
1783
bind_group: None,
1784
bindless_index_tables,
1785
samplers,
1786
textures,
1787
buffers,
1788
data_buffers,
1789
free_slots: vec![],
1790
live_allocation_count: 0,
1791
allocated_resource_count: 0,
1792
}
1793
}
1794
}
1795
1796
pub fn init_fallback_bindless_resources(mut commands: Commands, render_device: Res<RenderDevice>) {
1797
commands.insert_resource(FallbackBindlessResources {
1798
filtering_sampler: render_device.create_sampler(&SamplerDescriptor {
1799
label: Some("fallback filtering sampler"),
1800
..default()
1801
}),
1802
non_filtering_sampler: render_device.create_sampler(&SamplerDescriptor {
1803
label: Some("fallback non-filtering sampler"),
1804
mag_filter: FilterMode::Nearest,
1805
min_filter: FilterMode::Nearest,
1806
mipmap_filter: MipmapFilterMode::Nearest,
1807
..default()
1808
}),
1809
comparison_sampler: render_device.create_sampler(&SamplerDescriptor {
1810
label: Some("fallback comparison sampler"),
1811
compare: Some(CompareFunction::Always),
1812
..default()
1813
}),
1814
});
1815
}
1816
1817
impl MaterialBindGroupNonBindlessAllocator {
1818
/// Creates a new [`MaterialBindGroupNonBindlessAllocator`] managing the
1819
/// bind groups for a single non-bindless material.
1820
fn new(label: &'static str) -> MaterialBindGroupNonBindlessAllocator {
1821
MaterialBindGroupNonBindlessAllocator {
1822
label,
1823
bind_groups: vec![],
1824
to_prepare: HashSet::default(),
1825
free_indices: vec![],
1826
}
1827
}
1828
1829
/// Inserts a bind group, either unprepared or prepared, into this allocator
1830
/// and returns a [`MaterialBindingId`].
1831
///
1832
/// The returned [`MaterialBindingId`] can later be used to fetch the bind
1833
/// group.
1834
fn allocate(&mut self, bind_group: MaterialNonBindlessAllocatedBindGroup) -> MaterialBindingId {
1835
let group_id = self
1836
.free_indices
1837
.pop()
1838
.unwrap_or(MaterialBindGroupIndex(self.bind_groups.len() as u32));
1839
if self.bind_groups.len() < *group_id as usize + 1 {
1840
self.bind_groups
1841
.resize_with(*group_id as usize + 1, || None);
1842
}
1843
1844
if matches!(
1845
bind_group,
1846
MaterialNonBindlessAllocatedBindGroup::Unprepared { .. }
1847
) {
1848
self.to_prepare.insert(group_id);
1849
}
1850
1851
self.bind_groups[*group_id as usize] = Some(bind_group);
1852
1853
MaterialBindingId {
1854
group: group_id,
1855
slot: default(),
1856
}
1857
}
1858
1859
/// Inserts an unprepared bind group into this allocator and returns a
1860
/// [`MaterialBindingId`].
1861
fn allocate_unprepared(
1862
&mut self,
1863
unprepared_bind_group: UnpreparedBindGroup,
1864
bind_group_layout: BindGroupLayoutDescriptor,
1865
) -> MaterialBindingId {
1866
self.allocate(MaterialNonBindlessAllocatedBindGroup::Unprepared {
1867
bind_group: unprepared_bind_group,
1868
layout: bind_group_layout,
1869
})
1870
}
1871
1872
/// Inserts an prepared bind group into this allocator and returns a
1873
/// [`MaterialBindingId`].
1874
fn allocate_prepared(&mut self, prepared_bind_group: PreparedBindGroup) -> MaterialBindingId {
1875
self.allocate(MaterialNonBindlessAllocatedBindGroup::Prepared {
1876
bind_group: prepared_bind_group,
1877
uniform_buffers: vec![],
1878
})
1879
}
1880
1881
/// Deallocates the bind group with the given binding ID.
1882
fn free(&mut self, binding_id: MaterialBindingId) {
1883
debug_assert_eq!(binding_id.slot, MaterialBindGroupSlot(0));
1884
debug_assert!(self.bind_groups[*binding_id.group as usize].is_some());
1885
self.bind_groups[*binding_id.group as usize] = None;
1886
self.to_prepare.remove(&binding_id.group);
1887
self.free_indices.push(binding_id.group);
1888
}
1889
1890
/// Returns a wrapper around the bind group with the given index.
1891
fn get(&self, group: MaterialBindGroupIndex) -> Option<MaterialNonBindlessSlab<'_>> {
1892
self.bind_groups[group.0 as usize]
1893
.as_ref()
1894
.map(|bind_group| match bind_group {
1895
MaterialNonBindlessAllocatedBindGroup::Prepared { bind_group, .. } => {
1896
MaterialNonBindlessSlab::Prepared(bind_group)
1897
}
1898
MaterialNonBindlessAllocatedBindGroup::Unprepared { .. } => {
1899
MaterialNonBindlessSlab::Unprepared
1900
}
1901
})
1902
}
1903
1904
/// Prepares any as-yet unprepared bind groups that this allocator is
1905
/// managing.
1906
///
1907
/// Unprepared bind groups can be added to this allocator with
1908
/// [`Self::allocate_unprepared`]. Such bind groups will defer being
1909
/// prepared until the next time this method is called.
1910
fn prepare_bind_groups(
1911
&mut self,
1912
render_device: &RenderDevice,
1913
pipeline_cache: &PipelineCache,
1914
) {
1915
for bind_group_index in mem::take(&mut self.to_prepare) {
1916
let Some(MaterialNonBindlessAllocatedBindGroup::Unprepared {
1917
bind_group: unprepared_bind_group,
1918
layout: bind_group_layout,
1919
}) = mem::take(&mut self.bind_groups[*bind_group_index as usize])
1920
else {
1921
panic!("Allocation didn't exist or was already prepared");
1922
};
1923
1924
// Pack any `Data` into uniform buffers.
1925
let mut uniform_buffers = vec![];
1926
for (index, binding) in unprepared_bind_group.bindings.iter() {
1927
let OwnedBindingResource::Data(ref owned_data) = *binding else {
1928
continue;
1929
};
1930
let label = format!("material uniform data {}", *index);
1931
let uniform_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
1932
label: Some(&label),
1933
contents: &owned_data.0,
1934
usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM,
1935
});
1936
uniform_buffers.push(uniform_buffer);
1937
}
1938
1939
// Create bind group entries.
1940
let mut bind_group_entries = vec![];
1941
let mut uniform_buffers_iter = uniform_buffers.iter();
1942
for (index, binding) in unprepared_bind_group.bindings.iter() {
1943
match *binding {
1944
OwnedBindingResource::Data(_) => {
1945
bind_group_entries.push(BindGroupEntry {
1946
binding: *index,
1947
resource: uniform_buffers_iter
1948
.next()
1949
.expect("We should have created uniform buffers for each `Data`")
1950
.as_entire_binding(),
1951
});
1952
}
1953
_ => bind_group_entries.push(BindGroupEntry {
1954
binding: *index,
1955
resource: binding.get_binding(),
1956
}),
1957
}
1958
}
1959
1960
// Create the bind group.
1961
let bind_group = render_device.create_bind_group(
1962
self.label,
1963
&pipeline_cache.get_bind_group_layout(&bind_group_layout),
1964
&bind_group_entries,
1965
);
1966
1967
self.bind_groups[*bind_group_index as usize] =
1968
Some(MaterialNonBindlessAllocatedBindGroup::Prepared {
1969
bind_group: PreparedBindGroup {
1970
bindings: unprepared_bind_group.bindings,
1971
bind_group,
1972
},
1973
uniform_buffers,
1974
});
1975
}
1976
}
1977
}
1978
1979
impl<'a> MaterialSlab<'a> {
1980
/// Returns the [`BindGroup`] corresponding to this slab, if it's been
1981
/// prepared.
1982
///
1983
/// You can prepare bind groups by calling
1984
/// [`MaterialBindGroupAllocator::prepare_bind_groups`]. If the bind group
1985
/// isn't ready, this method returns `None`.
1986
pub fn bind_group(&self) -> Option<&'a BindGroup> {
1987
match self.0 {
1988
MaterialSlabImpl::Bindless(material_bindless_slab) => {
1989
material_bindless_slab.bind_group()
1990
}
1991
MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Prepared(
1992
prepared_bind_group,
1993
)) => Some(&prepared_bind_group.bind_group),
1994
MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Unprepared) => None,
1995
}
1996
}
1997
}
1998
1999
impl MaterialDataBuffer {
2000
/// Creates a new [`MaterialDataBuffer`] managing a buffer of elements of
2001
/// size `aligned_element_size` that will be bound to the given binding
2002
/// number.
2003
fn new(binding_number: BindingNumber, aligned_element_size: u32) -> MaterialDataBuffer {
2004
MaterialDataBuffer {
2005
binding_number,
2006
buffer: RetainedRawBufferVec::new(BufferUsages::STORAGE),
2007
aligned_element_size,
2008
free_slots: vec![],
2009
len: 0,
2010
}
2011
}
2012
2013
/// Allocates a slot for a new piece of data, copies the data into that
2014
/// slot, and returns the slot ID.
2015
///
2016
/// The size of the piece of data supplied to this method must equal the
2017
/// [`Self::aligned_element_size`] provided to [`MaterialDataBuffer::new`].
2018
fn insert(&mut self, data: &[u8]) -> u32 {
2019
// Make sure the data is of the right length.
2020
debug_assert_eq!(data.len(), self.aligned_element_size as usize);
2021
2022
// Grab a slot.
2023
let slot = self.free_slots.pop().unwrap_or(self.len);
2024
2025
// Calculate the range we're going to copy to.
2026
let start = slot as usize * self.aligned_element_size as usize;
2027
let end = (slot as usize + 1) * self.aligned_element_size as usize;
2028
2029
// Resize the buffer if necessary.
2030
if self.buffer.len() < end {
2031
self.buffer.reserve_internal(end);
2032
}
2033
while self.buffer.values().len() < end {
2034
self.buffer.push(0);
2035
}
2036
2037
// Copy in the data.
2038
self.buffer.values_mut()[start..end].copy_from_slice(data);
2039
2040
// Mark the buffer dirty, and finish up.
2041
self.len += 1;
2042
self.buffer.dirty = BufferDirtyState::NeedsReserve;
2043
slot
2044
}
2045
2046
/// Marks the given slot as free.
2047
fn remove(&mut self, slot: u32) {
2048
self.free_slots.push(slot);
2049
self.len -= 1;
2050
}
2051
}
2052
2053