Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_render/src/render_resource/specializer.rs
9353 views
1
use bevy_material::descriptor::{
2
CachedComputePipelineId, CachedRenderPipelineId, ComputePipelineDescriptor,
3
RenderPipelineDescriptor,
4
};
5
6
use super::{ComputePipeline, PipelineCache, RenderPipeline};
7
use bevy_ecs::error::BevyError;
8
use bevy_log::error;
9
use bevy_platform::{
10
collections::{
11
hash_map::{Entry, VacantEntry},
12
HashMap,
13
},
14
hash::FixedHasher,
15
};
16
use core::{hash::Hash, marker::PhantomData};
17
use variadics_please::all_tuples;
18
19
pub use bevy_render_macros::{Specializer, SpecializerKey};
20
21
/// Defines a type that is able to be "specialized" and cached by creating and transforming
22
/// its descriptor type. This is implemented for [`RenderPipeline`] and [`ComputePipeline`], and
23
/// likely will not have much utility for other types.
24
///
25
/// See docs on [`Specializer`] for more info.
26
pub trait Specializable {
27
type Descriptor: PartialEq + Clone + Send + Sync;
28
type CachedId: Clone + Send + Sync;
29
fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId;
30
fn get_descriptor(pipeline_cache: &PipelineCache, id: Self::CachedId) -> &Self::Descriptor;
31
}
32
33
impl Specializable for RenderPipeline {
34
type Descriptor = RenderPipelineDescriptor;
35
type CachedId = CachedRenderPipelineId;
36
37
fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId {
38
pipeline_cache.queue_render_pipeline(descriptor)
39
}
40
41
fn get_descriptor(
42
pipeline_cache: &PipelineCache,
43
id: CachedRenderPipelineId,
44
) -> &Self::Descriptor {
45
pipeline_cache.get_render_pipeline_descriptor(id)
46
}
47
}
48
49
impl Specializable for ComputePipeline {
50
type Descriptor = ComputePipelineDescriptor;
51
52
type CachedId = CachedComputePipelineId;
53
54
fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId {
55
pipeline_cache.queue_compute_pipeline(descriptor)
56
}
57
58
fn get_descriptor(
59
pipeline_cache: &PipelineCache,
60
id: CachedComputePipelineId,
61
) -> &Self::Descriptor {
62
pipeline_cache.get_compute_pipeline_descriptor(id)
63
}
64
}
65
66
/// Defines a type capable of "specializing" values of a type T.
67
///
68
/// Specialization is the process of generating variants of a type T
69
/// from small hashable keys, and specializers themselves can be
70
/// thought of as [pure functions] from the key type to `T`, that
71
/// [memoize] their results based on the key.
72
///
73
/// <div class="warning">
74
/// Because specialization is designed for use with render and compute
75
/// pipelines, specializers act on <i>descriptors</i> of <code>T</code> rather
76
/// than produce <code>T</code> itself, but the above comparison is still valid.
77
/// </div>
78
///
79
/// Since compiling render and compute pipelines can be so slow,
80
/// specialization allows a Bevy app to detect when it would compile
81
/// a duplicate pipeline and reuse what's already in the cache. While
82
/// pipelines could all be memoized hashing each whole descriptor, this
83
/// would be much slower and could still create duplicates. In contrast,
84
/// memoizing groups of *related* pipelines based on a small hashable
85
/// key is much faster. See the docs on [`SpecializerKey`] for more info.
86
///
87
/// ## Composing Specializers
88
///
89
/// This trait can be derived with `#[derive(Specializer)]` for structs whose
90
/// fields all implement [`Specializer`]. This allows for composing multiple
91
/// specializers together, and makes encapsulation and separating concerns
92
/// between specializers much nicer. One could make individual specializers
93
/// for common operations and place them in entirely separate modules, then
94
/// compose them together with a single `#[derive]`
95
///
96
/// ```rust
97
/// # use bevy_ecs::error::BevyError;
98
/// # use bevy_render::render_resource::Specializer;
99
/// # use bevy_render::render_resource::SpecializerKey;
100
/// # use bevy_render::render_resource::RenderPipeline;
101
/// # use bevy_render::render_resource::RenderPipelineDescriptor;
102
/// struct A;
103
/// struct B;
104
/// #[derive(Copy, Clone, PartialEq, Eq, Hash, SpecializerKey)]
105
/// struct BKey { contrived_number: u32 };
106
///
107
/// impl Specializer<RenderPipeline> for A {
108
/// type Key = ();
109
///
110
/// fn specialize(
111
/// &self,
112
/// key: (),
113
/// descriptor: &mut RenderPipelineDescriptor
114
/// ) -> Result<(), BevyError> {
115
/// # let _ = descriptor;
116
/// // mutate the descriptor here
117
/// Ok(key)
118
/// }
119
/// }
120
///
121
/// impl Specializer<RenderPipeline> for B {
122
/// type Key = BKey;
123
///
124
/// fn specialize(
125
/// &self,
126
/// key: BKey,
127
/// descriptor: &mut RenderPipelineDescriptor
128
/// ) -> Result<BKey, BevyError> {
129
/// # let _ = descriptor;
130
/// // mutate the descriptor here
131
/// Ok(key)
132
/// }
133
/// }
134
///
135
/// #[derive(Specializer)]
136
/// #[specialize(RenderPipeline)]
137
/// struct C {
138
/// #[key(default)]
139
/// a: A,
140
/// b: B,
141
/// }
142
///
143
/// /*
144
/// The generated implementation:
145
/// impl Specializer<RenderPipeline> for C {
146
/// type Key = BKey;
147
/// fn specialize(
148
/// &self,
149
/// key: Self::Key,
150
/// descriptor: &mut RenderPipelineDescriptor
151
/// ) -> Result<Canonical<Self::Key>, BevyError> {
152
/// let _ = self.a.specialize((), descriptor);
153
/// let key = self.b.specialize(key, descriptor);
154
/// Ok(key)
155
/// }
156
/// }
157
/// */
158
/// ```
159
///
160
/// The key type for a composed specializer will be a tuple of the keys
161
/// of each field, and their specialization logic will be applied in field
162
/// order. Since derive macros can't have generic parameters, the derive macro
163
/// requires an additional `#[specialize(..targets)]` attribute to specify a
164
/// list of types to target for the implementation. `#[specialize(all)]` is
165
/// also allowed, and will generate a fully generic implementation at the cost
166
/// of slightly worse error messages.
167
///
168
/// Additionally, each field can optionally take a `#[key]` attribute to
169
/// specify a "key override". This will hide that field's key from being
170
/// exposed by the wrapper, and always use the value given by the attribute.
171
/// Values for this attribute may either be `default` which will use the key's
172
/// [`Default`] implementation, or a valid rust expression of the key type.
173
///
174
/// [pure functions]: https://en.wikipedia.org/wiki/Pure_function
175
/// [memoize]: https://en.wikipedia.org/wiki/Memoization
176
pub trait Specializer<T: Specializable>: Send + Sync + 'static {
177
type Key: SpecializerKey;
178
fn specialize(
179
&self,
180
key: Self::Key,
181
descriptor: &mut T::Descriptor,
182
) -> Result<Canonical<Self::Key>, BevyError>;
183
}
184
185
// TODO: update docs for `SpecializerKey` with a more concrete example
186
// once we've migrated mesh layout specialization
187
188
/// Defines a type that is able to be used as a key for [`Specializer`]s
189
///
190
/// <div class = "warning">
191
/// <strong>Most types should implement this trait with the included derive macro.</strong> <br/>
192
/// This generates a "canonical" key type, with <code>IS_CANONICAL = true</code>, and <code>Canonical = Self</code>
193
/// </div>
194
///
195
/// ## What's a "canonical" key?
196
///
197
/// The specialization API memoizes pipelines based on the hash of each key, but this
198
/// can still produce duplicates. For example, if one used a list of vertex attributes
199
/// as a key, even if all the same attributes were present they could be in any order.
200
/// In each case, though the keys would be "different" they would produce the same
201
/// pipeline.
202
///
203
/// To address this, during specialization keys are processed into a [canonical]
204
/// (or "standard") form that represents the actual descriptor that was produced.
205
/// In the previous example, that would be the final `VertexBufferLayout` contained
206
/// by the pipeline descriptor. This new key is used by [`Variants`] to
207
/// perform additional checks for duplicates, but only if required. If a key is
208
/// canonical from the start, then there's no need.
209
///
210
/// For implementors: the main property of a canonical key is that if two keys hash
211
/// differently, they should nearly always produce different descriptors.
212
///
213
/// [canonical]: https://en.wikipedia.org/wiki/Canonicalization
214
pub trait SpecializerKey: Clone + Hash + Eq {
215
/// Denotes whether this key is canonical or not. This should only be `true`
216
/// if and only if `Canonical = Self`.
217
const IS_CANONICAL: bool;
218
219
/// The canonical key type to convert this into during specialization.
220
type Canonical: Hash + Eq;
221
}
222
223
pub type Canonical<T> = <T as SpecializerKey>::Canonical;
224
225
impl<T: Specializable> Specializer<T> for () {
226
type Key = ();
227
228
fn specialize(
229
&self,
230
_key: Self::Key,
231
_descriptor: &mut T::Descriptor,
232
) -> Result<(), BevyError> {
233
Ok(())
234
}
235
}
236
237
impl<T: Specializable, V: Send + Sync + 'static> Specializer<T> for PhantomData<V> {
238
type Key = ();
239
240
fn specialize(
241
&self,
242
_key: Self::Key,
243
_descriptor: &mut T::Descriptor,
244
) -> Result<(), BevyError> {
245
Ok(())
246
}
247
}
248
249
macro_rules! impl_specialization_key_tuple {
250
($(#[$meta:meta])* $($T:ident),*) => {
251
$(#[$meta])*
252
impl <$($T: SpecializerKey),*> SpecializerKey for ($($T,)*) {
253
const IS_CANONICAL: bool = true $(&& <$T as SpecializerKey>::IS_CANONICAL)*;
254
type Canonical = ($(Canonical<$T>,)*);
255
}
256
};
257
}
258
259
all_tuples!(
260
#[doc(fake_variadic)]
261
impl_specialization_key_tuple,
262
0,
263
12,
264
T
265
);
266
267
/// A cache for variants of a resource type created by a specializer.
268
/// At most one resource will be created for each key.
269
pub struct Variants<T: Specializable, S: Specializer<T>> {
270
specializer: S,
271
base_descriptor: T::Descriptor,
272
primary_cache: HashMap<S::Key, T::CachedId>,
273
secondary_cache: HashMap<Canonical<S::Key>, T::CachedId>,
274
}
275
276
impl<T: Specializable, S: Specializer<T>> Variants<T, S> {
277
/// Creates a new [`Variants`] from a [`Specializer`] and a base descriptor.
278
#[inline]
279
pub fn new(specializer: S, base_descriptor: T::Descriptor) -> Self {
280
Self {
281
specializer,
282
base_descriptor,
283
primary_cache: Default::default(),
284
secondary_cache: Default::default(),
285
}
286
}
287
288
/// Specializes a resource given the [`Specializer`]'s key type.
289
#[inline]
290
pub fn specialize(
291
&mut self,
292
pipeline_cache: &PipelineCache,
293
key: S::Key,
294
) -> Result<T::CachedId, BevyError> {
295
let entry = self.primary_cache.entry(key.clone());
296
match entry {
297
Entry::Occupied(entry) => Ok(entry.get().clone()),
298
Entry::Vacant(entry) => Self::specialize_slow(
299
&self.specializer,
300
self.base_descriptor.clone(),
301
pipeline_cache,
302
key,
303
entry,
304
&mut self.secondary_cache,
305
),
306
}
307
}
308
309
#[cold]
310
fn specialize_slow(
311
specializer: &S,
312
base_descriptor: T::Descriptor,
313
pipeline_cache: &PipelineCache,
314
key: S::Key,
315
primary_entry: VacantEntry<S::Key, T::CachedId, FixedHasher>,
316
secondary_cache: &mut HashMap<Canonical<S::Key>, T::CachedId>,
317
) -> Result<T::CachedId, BevyError> {
318
let mut descriptor = base_descriptor.clone();
319
let canonical_key = specializer.specialize(key.clone(), &mut descriptor)?;
320
321
// if the whole key is canonical, the secondary cache isn't needed.
322
if <S::Key as SpecializerKey>::IS_CANONICAL {
323
return Ok(primary_entry
324
.insert(<T as Specializable>::queue(pipeline_cache, descriptor))
325
.clone());
326
}
327
328
let id = match secondary_cache.entry(canonical_key) {
329
Entry::Occupied(entry) => {
330
if cfg!(debug_assertions) {
331
let stored_descriptor =
332
<T as Specializable>::get_descriptor(pipeline_cache, entry.get().clone());
333
if &descriptor != stored_descriptor {
334
error!(
335
"Invalid Specializer<{}> impl for {}: the cached descriptor \
336
is not equal to the generated descriptor for the given key. \
337
This means the Specializer implementation uses unused information \
338
from the key to specialize the pipeline. This is not allowed \
339
because it would invalidate the cache.",
340
core::any::type_name::<T>(),
341
core::any::type_name::<S>()
342
);
343
}
344
}
345
entry.into_mut().clone()
346
}
347
Entry::Vacant(entry) => entry
348
.insert(<T as Specializable>::queue(pipeline_cache, descriptor))
349
.clone(),
350
};
351
352
primary_entry.insert(id.clone());
353
Ok(id)
354
}
355
}
356
357