Path: blob/main/crates/bevy_render/src/render_resource/specializer.rs
9353 views
use bevy_material::descriptor::{1CachedComputePipelineId, CachedRenderPipelineId, ComputePipelineDescriptor,2RenderPipelineDescriptor,3};45use super::{ComputePipeline, PipelineCache, RenderPipeline};6use bevy_ecs::error::BevyError;7use bevy_log::error;8use bevy_platform::{9collections::{10hash_map::{Entry, VacantEntry},11HashMap,12},13hash::FixedHasher,14};15use core::{hash::Hash, marker::PhantomData};16use variadics_please::all_tuples;1718pub use bevy_render_macros::{Specializer, SpecializerKey};1920/// Defines a type that is able to be "specialized" and cached by creating and transforming21/// its descriptor type. This is implemented for [`RenderPipeline`] and [`ComputePipeline`], and22/// likely will not have much utility for other types.23///24/// See docs on [`Specializer`] for more info.25pub trait Specializable {26type Descriptor: PartialEq + Clone + Send + Sync;27type CachedId: Clone + Send + Sync;28fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId;29fn get_descriptor(pipeline_cache: &PipelineCache, id: Self::CachedId) -> &Self::Descriptor;30}3132impl Specializable for RenderPipeline {33type Descriptor = RenderPipelineDescriptor;34type CachedId = CachedRenderPipelineId;3536fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId {37pipeline_cache.queue_render_pipeline(descriptor)38}3940fn get_descriptor(41pipeline_cache: &PipelineCache,42id: CachedRenderPipelineId,43) -> &Self::Descriptor {44pipeline_cache.get_render_pipeline_descriptor(id)45}46}4748impl Specializable for ComputePipeline {49type Descriptor = ComputePipelineDescriptor;5051type CachedId = CachedComputePipelineId;5253fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId {54pipeline_cache.queue_compute_pipeline(descriptor)55}5657fn get_descriptor(58pipeline_cache: &PipelineCache,59id: CachedComputePipelineId,60) -> &Self::Descriptor {61pipeline_cache.get_compute_pipeline_descriptor(id)62}63}6465/// Defines a type capable of "specializing" values of a type T.66///67/// Specialization is the process of generating variants of a type T68/// from small hashable keys, and specializers themselves can be69/// thought of as [pure functions] from the key type to `T`, that70/// [memoize] their results based on the key.71///72/// <div class="warning">73/// Because specialization is designed for use with render and compute74/// pipelines, specializers act on <i>descriptors</i> of <code>T</code> rather75/// than produce <code>T</code> itself, but the above comparison is still valid.76/// </div>77///78/// Since compiling render and compute pipelines can be so slow,79/// specialization allows a Bevy app to detect when it would compile80/// a duplicate pipeline and reuse what's already in the cache. While81/// pipelines could all be memoized hashing each whole descriptor, this82/// would be much slower and could still create duplicates. In contrast,83/// memoizing groups of *related* pipelines based on a small hashable84/// key is much faster. See the docs on [`SpecializerKey`] for more info.85///86/// ## Composing Specializers87///88/// This trait can be derived with `#[derive(Specializer)]` for structs whose89/// fields all implement [`Specializer`]. This allows for composing multiple90/// specializers together, and makes encapsulation and separating concerns91/// between specializers much nicer. One could make individual specializers92/// for common operations and place them in entirely separate modules, then93/// compose them together with a single `#[derive]`94///95/// ```rust96/// # use bevy_ecs::error::BevyError;97/// # use bevy_render::render_resource::Specializer;98/// # use bevy_render::render_resource::SpecializerKey;99/// # use bevy_render::render_resource::RenderPipeline;100/// # use bevy_render::render_resource::RenderPipelineDescriptor;101/// struct A;102/// struct B;103/// #[derive(Copy, Clone, PartialEq, Eq, Hash, SpecializerKey)]104/// struct BKey { contrived_number: u32 };105///106/// impl Specializer<RenderPipeline> for A {107/// type Key = ();108///109/// fn specialize(110/// &self,111/// key: (),112/// descriptor: &mut RenderPipelineDescriptor113/// ) -> Result<(), BevyError> {114/// # let _ = descriptor;115/// // mutate the descriptor here116/// Ok(key)117/// }118/// }119///120/// impl Specializer<RenderPipeline> for B {121/// type Key = BKey;122///123/// fn specialize(124/// &self,125/// key: BKey,126/// descriptor: &mut RenderPipelineDescriptor127/// ) -> Result<BKey, BevyError> {128/// # let _ = descriptor;129/// // mutate the descriptor here130/// Ok(key)131/// }132/// }133///134/// #[derive(Specializer)]135/// #[specialize(RenderPipeline)]136/// struct C {137/// #[key(default)]138/// a: A,139/// b: B,140/// }141///142/// /*143/// The generated implementation:144/// impl Specializer<RenderPipeline> for C {145/// type Key = BKey;146/// fn specialize(147/// &self,148/// key: Self::Key,149/// descriptor: &mut RenderPipelineDescriptor150/// ) -> Result<Canonical<Self::Key>, BevyError> {151/// let _ = self.a.specialize((), descriptor);152/// let key = self.b.specialize(key, descriptor);153/// Ok(key)154/// }155/// }156/// */157/// ```158///159/// The key type for a composed specializer will be a tuple of the keys160/// of each field, and their specialization logic will be applied in field161/// order. Since derive macros can't have generic parameters, the derive macro162/// requires an additional `#[specialize(..targets)]` attribute to specify a163/// list of types to target for the implementation. `#[specialize(all)]` is164/// also allowed, and will generate a fully generic implementation at the cost165/// of slightly worse error messages.166///167/// Additionally, each field can optionally take a `#[key]` attribute to168/// specify a "key override". This will hide that field's key from being169/// exposed by the wrapper, and always use the value given by the attribute.170/// Values for this attribute may either be `default` which will use the key's171/// [`Default`] implementation, or a valid rust expression of the key type.172///173/// [pure functions]: https://en.wikipedia.org/wiki/Pure_function174/// [memoize]: https://en.wikipedia.org/wiki/Memoization175pub trait Specializer<T: Specializable>: Send + Sync + 'static {176type Key: SpecializerKey;177fn specialize(178&self,179key: Self::Key,180descriptor: &mut T::Descriptor,181) -> Result<Canonical<Self::Key>, BevyError>;182}183184// TODO: update docs for `SpecializerKey` with a more concrete example185// once we've migrated mesh layout specialization186187/// Defines a type that is able to be used as a key for [`Specializer`]s188///189/// <div class = "warning">190/// <strong>Most types should implement this trait with the included derive macro.</strong> <br/>191/// This generates a "canonical" key type, with <code>IS_CANONICAL = true</code>, and <code>Canonical = Self</code>192/// </div>193///194/// ## What's a "canonical" key?195///196/// The specialization API memoizes pipelines based on the hash of each key, but this197/// can still produce duplicates. For example, if one used a list of vertex attributes198/// as a key, even if all the same attributes were present they could be in any order.199/// In each case, though the keys would be "different" they would produce the same200/// pipeline.201///202/// To address this, during specialization keys are processed into a [canonical]203/// (or "standard") form that represents the actual descriptor that was produced.204/// In the previous example, that would be the final `VertexBufferLayout` contained205/// by the pipeline descriptor. This new key is used by [`Variants`] to206/// perform additional checks for duplicates, but only if required. If a key is207/// canonical from the start, then there's no need.208///209/// For implementors: the main property of a canonical key is that if two keys hash210/// differently, they should nearly always produce different descriptors.211///212/// [canonical]: https://en.wikipedia.org/wiki/Canonicalization213pub trait SpecializerKey: Clone + Hash + Eq {214/// Denotes whether this key is canonical or not. This should only be `true`215/// if and only if `Canonical = Self`.216const IS_CANONICAL: bool;217218/// The canonical key type to convert this into during specialization.219type Canonical: Hash + Eq;220}221222pub type Canonical<T> = <T as SpecializerKey>::Canonical;223224impl<T: Specializable> Specializer<T> for () {225type Key = ();226227fn specialize(228&self,229_key: Self::Key,230_descriptor: &mut T::Descriptor,231) -> Result<(), BevyError> {232Ok(())233}234}235236impl<T: Specializable, V: Send + Sync + 'static> Specializer<T> for PhantomData<V> {237type Key = ();238239fn specialize(240&self,241_key: Self::Key,242_descriptor: &mut T::Descriptor,243) -> Result<(), BevyError> {244Ok(())245}246}247248macro_rules! impl_specialization_key_tuple {249($(#[$meta:meta])* $($T:ident),*) => {250$(#[$meta])*251impl <$($T: SpecializerKey),*> SpecializerKey for ($($T,)*) {252const IS_CANONICAL: bool = true $(&& <$T as SpecializerKey>::IS_CANONICAL)*;253type Canonical = ($(Canonical<$T>,)*);254}255};256}257258all_tuples!(259#[doc(fake_variadic)]260impl_specialization_key_tuple,2610,26212,263T264);265266/// A cache for variants of a resource type created by a specializer.267/// At most one resource will be created for each key.268pub struct Variants<T: Specializable, S: Specializer<T>> {269specializer: S,270base_descriptor: T::Descriptor,271primary_cache: HashMap<S::Key, T::CachedId>,272secondary_cache: HashMap<Canonical<S::Key>, T::CachedId>,273}274275impl<T: Specializable, S: Specializer<T>> Variants<T, S> {276/// Creates a new [`Variants`] from a [`Specializer`] and a base descriptor.277#[inline]278pub fn new(specializer: S, base_descriptor: T::Descriptor) -> Self {279Self {280specializer,281base_descriptor,282primary_cache: Default::default(),283secondary_cache: Default::default(),284}285}286287/// Specializes a resource given the [`Specializer`]'s key type.288#[inline]289pub fn specialize(290&mut self,291pipeline_cache: &PipelineCache,292key: S::Key,293) -> Result<T::CachedId, BevyError> {294let entry = self.primary_cache.entry(key.clone());295match entry {296Entry::Occupied(entry) => Ok(entry.get().clone()),297Entry::Vacant(entry) => Self::specialize_slow(298&self.specializer,299self.base_descriptor.clone(),300pipeline_cache,301key,302entry,303&mut self.secondary_cache,304),305}306}307308#[cold]309fn specialize_slow(310specializer: &S,311base_descriptor: T::Descriptor,312pipeline_cache: &PipelineCache,313key: S::Key,314primary_entry: VacantEntry<S::Key, T::CachedId, FixedHasher>,315secondary_cache: &mut HashMap<Canonical<S::Key>, T::CachedId>,316) -> Result<T::CachedId, BevyError> {317let mut descriptor = base_descriptor.clone();318let canonical_key = specializer.specialize(key.clone(), &mut descriptor)?;319320// if the whole key is canonical, the secondary cache isn't needed.321if <S::Key as SpecializerKey>::IS_CANONICAL {322return Ok(primary_entry323.insert(<T as Specializable>::queue(pipeline_cache, descriptor))324.clone());325}326327let id = match secondary_cache.entry(canonical_key) {328Entry::Occupied(entry) => {329if cfg!(debug_assertions) {330let stored_descriptor =331<T as Specializable>::get_descriptor(pipeline_cache, entry.get().clone());332if &descriptor != stored_descriptor {333error!(334"Invalid Specializer<{}> impl for {}: the cached descriptor \335is not equal to the generated descriptor for the given key. \336This means the Specializer implementation uses unused information \337from the key to specialize the pipeline. This is not allowed \338because it would invalidate the cache.",339core::any::type_name::<T>(),340core::any::type_name::<S>()341);342}343}344entry.into_mut().clone()345}346Entry::Vacant(entry) => entry347.insert(<T as Specializable>::queue(pipeline_cache, descriptor))348.clone(),349};350351primary_entry.insert(id.clone());352Ok(id)353}354}355356357