Path: blob/main/crates/bevy_render/src/render_resource/pipeline_cache.rs
9353 views
use bevy_material::descriptor::{1BindGroupLayoutDescriptor, CachedComputePipelineId, CachedRenderPipelineId,2ComputePipelineDescriptor, PipelineDescriptor, RenderPipelineDescriptor,3};45use crate::{6render_resource::*,7renderer::{RenderAdapter, RenderDevice, WgpuWrapper},8Extract,9};10use alloc::{borrow::Cow, sync::Arc};11use bevy_asset::{AssetEvent, AssetId, Assets, Handle};12use bevy_ecs::{13message::MessageReader,14resource::Resource,15system::{Res, ResMut},16};17use bevy_log::error;18use bevy_platform::collections::{HashMap, HashSet};19use bevy_shader::{20CachedPipelineId, Shader, ShaderCache, ShaderCacheError, ShaderCacheSource, ShaderDefVal,21ValidateShader,22};23use bevy_tasks::Task;24use bevy_utils::default;25use core::{future::Future, mem};26use std::sync::{Mutex, PoisonError};27use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout};2829/// A pipeline defining the data layout and shader logic for a specific GPU task.30///31/// Used to store a heterogenous collection of render and compute pipelines together.32#[derive(Debug)]33pub enum Pipeline {34RenderPipeline(RenderPipeline),35ComputePipeline(ComputePipeline),36}3738pub struct CachedPipeline {39pub descriptor: PipelineDescriptor,40pub state: CachedPipelineState,41}4243/// State of a cached pipeline inserted into a [`PipelineCache`].44#[derive(Debug)]45pub enum CachedPipelineState {46/// The pipeline GPU object is queued for creation.47Queued,48/// The pipeline GPU object is being created.49Creating(Task<Result<Pipeline, ShaderCacheError>>),50/// The pipeline GPU object was created successfully and is available (allocated on the GPU).51Ok(Pipeline),52/// An error occurred while trying to create the pipeline GPU object.53Err(ShaderCacheError),54}5556impl CachedPipelineState {57/// Convenience method to "unwrap" a pipeline state into its underlying GPU object.58///59/// # Returns60///61/// The method returns the allocated pipeline GPU object.62///63/// # Panics64///65/// This method panics if the pipeline GPU object is not available, either because it is66/// pending creation or because an error occurred while attempting to create GPU object.67pub fn unwrap(&self) -> &Pipeline {68match self {69CachedPipelineState::Ok(pipeline) => pipeline,70CachedPipelineState::Queued => {71panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.")72}73CachedPipelineState::Creating(..) => {74panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.")75}76CachedPipelineState::Err(err) => panic!("{}", err),77}78}79}8081type ImmediateSize = u32;82type LayoutCacheKey = (Vec<BindGroupLayoutId>, ImmediateSize);83#[derive(Default)]84struct LayoutCache {85layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>,86}8788impl LayoutCache {89fn get(90&mut self,91render_device: &RenderDevice,92bind_group_layouts: &[BindGroupLayout],93immediate_size: u32,94) -> Arc<WgpuWrapper<PipelineLayout>> {95let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect();96self.layouts97.entry((bind_group_ids, immediate_size))98.or_insert_with_key(|(_, immediate_size)| {99let bind_group_layouts = bind_group_layouts100.iter()101.map(BindGroupLayout::value)102.collect::<Vec<_>>();103Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout(104&PipelineLayoutDescriptor {105bind_group_layouts: &bind_group_layouts,106immediate_size: *immediate_size,107..default()108},109)))110})111.clone()112}113}114115fn load_module(116render_device: &RenderDevice,117shader_source: ShaderCacheSource,118validate_shader: &ValidateShader,119) -> Result<WgpuWrapper<ShaderModule>, ShaderCacheError> {120let shader_source = match shader_source {121#[cfg(feature = "shader_format_spirv")]122ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data),123#[cfg(not(feature = "shader_format_spirv"))]124ShaderCacheSource::SpirV(_) => {125unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders")126}127ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)),128#[cfg(not(feature = "decoupled_naga"))]129ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)),130};131let module_descriptor = ShaderModuleDescriptor {132label: None,133source: shader_source,134};135136let scope = render_device137.wgpu_device()138.push_error_scope(wgpu::ErrorFilter::Validation);139140let shader_module = WgpuWrapper::new(match validate_shader {141ValidateShader::Enabled => {142render_device.create_and_validate_shader_module(module_descriptor)143}144// SAFETY: we are interfacing with shader code, which may contain undefined behavior,145// such as indexing out of bounds.146// The checks required are prohibitively expensive and a poor default for game engines.147ValidateShader::Disabled => unsafe {148render_device.create_shader_module(module_descriptor)149},150});151152let error = scope.pop();153154// `now_or_never` will return Some if the future is ready and None otherwise.155// On native platforms, wgpu will yield the error immediately while on wasm it may take longer since the browser APIs are asynchronous.156// So to keep the complexity of the ShaderCache low, we will only catch this error early on native platforms,157// and on wasm the error will be handled by wgpu and crash the application.158if let Some(Some(wgpu::Error::Validation { description, .. })) =159bevy_tasks::futures::now_or_never(error)160{161return Err(ShaderCacheError::CreateShaderModule(description));162}163164Ok(shader_module)165}166167#[derive(Default)]168struct BindGroupLayoutCache {169bgls: HashMap<BindGroupLayoutDescriptor, BindGroupLayout>,170}171172impl BindGroupLayoutCache {173fn get(174&mut self,175render_device: &RenderDevice,176descriptor: BindGroupLayoutDescriptor,177) -> BindGroupLayout {178self.bgls179.entry(descriptor)180.or_insert_with_key(|descriptor| {181render_device182.create_bind_group_layout(descriptor.label.as_ref(), &descriptor.entries)183})184.clone()185}186}187188/// Cache for render and compute pipelines.189///190/// The cache stores existing render and compute pipelines allocated on the GPU, as well as191/// pending creation. Pipelines inserted into the cache are identified by a unique ID, which192/// can be used to retrieve the actual GPU object once it's ready. The creation of the GPU193/// pipeline object is deferred to the [`RenderSystems::Render`] step, just before the render194/// graph starts being processed, as this requires access to the GPU.195///196/// Note that the cache does not perform automatic deduplication of identical pipelines. It is197/// up to the user not to insert the same pipeline twice to avoid wasting GPU resources.198///199/// [`RenderSystems::Render`]: crate::RenderSystems::Render200#[derive(Resource)]201pub struct PipelineCache {202layout_cache: Arc<Mutex<LayoutCache>>,203bindgroup_layout_cache: Arc<Mutex<BindGroupLayoutCache>>,204shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>,205device: RenderDevice,206pipelines: Vec<CachedPipeline>,207waiting_pipelines: HashSet<CachedPipelineId>,208new_pipelines: Mutex<Vec<CachedPipeline>>,209global_shader_defs: Vec<ShaderDefVal>,210/// If `true`, disables asynchronous pipeline compilation.211/// This has no effect on macOS, wasm, or without the `multi_threaded` feature.212pub(crate) synchronous_pipeline_compilation: bool,213}214215impl PipelineCache {216/// Returns an iterator over the pipelines in the pipeline cache.217pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> {218self.pipelines.iter()219}220221/// Returns a iterator of the IDs of all currently waiting pipelines.222pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ {223self.waiting_pipelines.iter().copied()224}225226/// Create a new pipeline cache associated with the given render device.227pub fn new(228device: RenderDevice,229render_adapter: RenderAdapter,230synchronous_pipeline_compilation: bool,231) -> Self {232let mut global_shader_defs = Vec::new();233#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]234{235global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into());236global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());237global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());238}239240if cfg!(target_abi = "sim") {241global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());242}243244global_shader_defs.push(ShaderDefVal::UInt(245String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"),246device.limits().max_storage_buffers_per_shader_stage,247));248249Self {250shader_cache: Arc::new(Mutex::new(ShaderCache::new(251device.clone(),252device.features(),253render_adapter.get_downlevel_capabilities().flags,254load_module,255))),256device,257layout_cache: default(),258bindgroup_layout_cache: default(),259waiting_pipelines: default(),260new_pipelines: default(),261pipelines: default(),262global_shader_defs,263synchronous_pipeline_compilation,264}265}266267/// Get the state of a cached render pipeline.268///269/// See [`PipelineCache::queue_render_pipeline()`].270#[inline]271pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState {272// If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines`273self.pipelines274.get(id.id())275.map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)276}277278/// Get the state of a cached compute pipeline.279///280/// See [`PipelineCache::queue_compute_pipeline()`].281#[inline]282pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState {283// If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines`284self.pipelines285.get(id.id())286.map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)287}288289/// Get the render pipeline descriptor a cached render pipeline was inserted from.290///291/// See [`PipelineCache::queue_render_pipeline()`].292///293/// **Note**: Be careful calling this method. It will panic if called with a pipeline that294/// has been queued but has not yet been processed by [`PipelineCache::process_queue()`].295#[inline]296pub fn get_render_pipeline_descriptor(297&self,298id: CachedRenderPipelineId,299) -> &RenderPipelineDescriptor {300match &self.pipelines[id.id()].descriptor {301PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor,302PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(),303}304}305306/// Get the compute pipeline descriptor a cached render pipeline was inserted from.307///308/// See [`PipelineCache::queue_compute_pipeline()`].309///310/// **Note**: Be careful calling this method. It will panic if called with a pipeline that311/// has been queued but has not yet been processed by [`PipelineCache::process_queue()`].312#[inline]313pub fn get_compute_pipeline_descriptor(314&self,315id: CachedComputePipelineId,316) -> &ComputePipelineDescriptor {317match &self.pipelines[id.id()].descriptor {318PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(),319PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor,320}321}322323/// Try to retrieve a render pipeline GPU object from a cached ID.324///325/// # Returns326///327/// This method returns a successfully created render pipeline if any, or `None` if the pipeline328/// was not created yet or if there was an error during creation. You can check the actual creation329/// state with [`PipelineCache::get_render_pipeline_state()`].330#[inline]331pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> {332if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) =333&self.pipelines.get(id.id())?.state334{335Some(pipeline)336} else {337None338}339}340341/// Wait for a render pipeline to finish compiling.342#[inline]343pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) {344if self.pipelines.len() <= id.id() {345self.process_queue();346}347348let state = &mut self.pipelines[id.id()].state;349if let CachedPipelineState::Creating(task) = state {350*state = match bevy_tasks::block_on(task) {351Ok(p) => CachedPipelineState::Ok(p),352Err(e) => CachedPipelineState::Err(e),353};354}355}356357/// Try to retrieve a compute pipeline GPU object from a cached ID.358///359/// # Returns360///361/// This method returns a successfully created compute pipeline if any, or `None` if the pipeline362/// was not created yet or if there was an error during creation. You can check the actual creation363/// state with [`PipelineCache::get_compute_pipeline_state()`].364#[inline]365pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> {366if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) =367&self.pipelines.get(id.id())?.state368{369Some(pipeline)370} else {371None372}373}374375/// Insert a render pipeline into the cache, and queue its creation.376///377/// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with378/// an already cached pipeline.379///380/// # Returns381///382/// This method returns the unique render shader ID of the cached pipeline, which can be used to query383/// the caching state with [`get_render_pipeline_state()`] and to retrieve the created GPU pipeline once384/// it's ready with [`get_render_pipeline()`].385///386/// [`get_render_pipeline_state()`]: PipelineCache::get_render_pipeline_state387/// [`get_render_pipeline()`]: PipelineCache::get_render_pipeline388pub fn queue_render_pipeline(389&self,390descriptor: RenderPipelineDescriptor,391) -> CachedRenderPipelineId {392let mut new_pipelines = self393.new_pipelines394.lock()395.unwrap_or_else(PoisonError::into_inner);396let id = CachedRenderPipelineId::new(self.pipelines.len() + new_pipelines.len());397new_pipelines.push(CachedPipeline {398descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)),399state: CachedPipelineState::Queued,400});401id402}403404/// Insert a compute pipeline into the cache, and queue its creation.405///406/// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with407/// an already cached pipeline.408///409/// # Returns410///411/// This method returns the unique compute shader ID of the cached pipeline, which can be used to query412/// the caching state with [`get_compute_pipeline_state()`] and to retrieve the created GPU pipeline once413/// it's ready with [`get_compute_pipeline()`].414///415/// [`get_compute_pipeline_state()`]: PipelineCache::get_compute_pipeline_state416/// [`get_compute_pipeline()`]: PipelineCache::get_compute_pipeline417pub fn queue_compute_pipeline(418&self,419descriptor: ComputePipelineDescriptor,420) -> CachedComputePipelineId {421let mut new_pipelines = self422.new_pipelines423.lock()424.unwrap_or_else(PoisonError::into_inner);425let id = CachedComputePipelineId::new(self.pipelines.len() + new_pipelines.len());426new_pipelines.push(CachedPipeline {427descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)),428state: CachedPipelineState::Queued,429});430id431}432433pub fn get_bind_group_layout(434&self,435bind_group_layout_descriptor: &BindGroupLayoutDescriptor,436) -> BindGroupLayout {437self.bindgroup_layout_cache438.lock()439.unwrap()440.get(&self.device, bind_group_layout_descriptor.clone())441}442443/// Inserts a [`Shader`] into this cache with the provided [`AssetId`].444pub fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) {445let mut shader_cache = self.shader_cache.lock().unwrap();446let pipelines_to_queue = shader_cache.set_shader(id, shader);447for cached_pipeline in pipelines_to_queue {448self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;449self.waiting_pipelines.insert(cached_pipeline);450}451}452453/// Removes a [`Shader`] from this cache if it exists.454pub fn remove_shader(&mut self, shader: AssetId<Shader>) {455let mut shader_cache = self.shader_cache.lock().unwrap();456let pipelines_to_queue = shader_cache.remove(shader);457for cached_pipeline in pipelines_to_queue {458self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;459self.waiting_pipelines.insert(cached_pipeline);460}461}462463fn start_create_render_pipeline(464&mut self,465id: CachedPipelineId,466descriptor: RenderPipelineDescriptor,467) -> CachedPipelineState {468let device = self.device.clone();469let shader_cache = self.shader_cache.clone();470let layout_cache = self.layout_cache.clone();471let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();472let bind_group_layout = descriptor473.layout474.iter()475.map(|bind_group_layout_descriptor| {476bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())477})478.collect::<Vec<_>>();479480create_pipeline_task(481async move {482let mut shader_cache = shader_cache.lock().unwrap();483let mut layout_cache = layout_cache.lock().unwrap();484485let vertex_module = match shader_cache.get(486id,487descriptor.vertex.shader.id(),488&descriptor.vertex.shader_defs,489) {490Ok(module) => module,491Err(err) => return Err(err),492};493494let fragment_module = match &descriptor.fragment {495Some(fragment) => {496match shader_cache.get(id, fragment.shader.id(), &fragment.shader_defs) {497Ok(module) => Some(module),498Err(err) => return Err(err),499}500}501None => None,502};503504let layout = if descriptor.layout.is_empty() && descriptor.immediate_size == 0 {505None506} else {507Some(layout_cache.get(&device, &bind_group_layout, descriptor.immediate_size))508};509510drop((shader_cache, layout_cache));511512let vertex_buffer_layouts = descriptor513.vertex514.buffers515.iter()516.map(|layout| RawVertexBufferLayout {517array_stride: layout.array_stride,518attributes: &layout.attributes,519step_mode: layout.step_mode,520})521.collect::<Vec<_>>();522523let fragment_data = descriptor.fragment.as_ref().map(|fragment| {524(525fragment_module.unwrap(),526fragment.entry_point.as_deref(),527fragment.targets.as_slice(),528)529});530531// TODO: Expose the rest of this somehow532let compilation_options = PipelineCompilationOptions {533constants: &[],534zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory,535};536537let descriptor = RawRenderPipelineDescriptor {538multiview_mask: None,539depth_stencil: descriptor.depth_stencil.clone(),540label: descriptor.label.as_deref(),541layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),542multisample: descriptor.multisample,543primitive: descriptor.primitive,544vertex: RawVertexState {545buffers: &vertex_buffer_layouts,546entry_point: descriptor.vertex.entry_point.as_deref(),547module: &vertex_module,548// TODO: Should this be the same as the fragment compilation options?549compilation_options: compilation_options.clone(),550},551fragment: fragment_data552.as_ref()553.map(|(module, entry_point, targets)| RawFragmentState {554entry_point: entry_point.as_deref(),555module,556targets,557// TODO: Should this be the same as the vertex compilation options?558compilation_options,559}),560cache: None,561};562563Ok(Pipeline::RenderPipeline(564device.create_render_pipeline(&descriptor),565))566},567self.synchronous_pipeline_compilation,568)569}570571fn start_create_compute_pipeline(572&mut self,573id: CachedPipelineId,574descriptor: ComputePipelineDescriptor,575) -> CachedPipelineState {576let device = self.device.clone();577let shader_cache = self.shader_cache.clone();578let layout_cache = self.layout_cache.clone();579let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();580let bind_group_layout = descriptor581.layout582.iter()583.map(|bind_group_layout_descriptor| {584bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())585})586.collect::<Vec<_>>();587588create_pipeline_task(589async move {590let mut shader_cache = shader_cache.lock().unwrap();591let mut layout_cache = layout_cache.lock().unwrap();592593let compute_module =594match shader_cache.get(id, descriptor.shader.id(), &descriptor.shader_defs) {595Ok(module) => module,596Err(err) => return Err(err),597};598599let layout = if descriptor.layout.is_empty() && descriptor.immediate_size == 0 {600None601} else {602Some(layout_cache.get(&device, &bind_group_layout, descriptor.immediate_size))603};604605drop((shader_cache, layout_cache));606607let descriptor = RawComputePipelineDescriptor {608label: descriptor.label.as_deref(),609layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),610module: &compute_module,611entry_point: descriptor.entry_point.as_deref(),612// TODO: Expose the rest of this somehow613compilation_options: PipelineCompilationOptions {614constants: &[],615zero_initialize_workgroup_memory: descriptor616.zero_initialize_workgroup_memory,617},618cache: None,619};620621Ok(Pipeline::ComputePipeline(622device.create_compute_pipeline(&descriptor),623))624},625self.synchronous_pipeline_compilation,626)627}628629/// Process the pipeline queue and create all pending pipelines if possible.630///631/// This is generally called automatically during the [`RenderSystems::Render`] step, but can632/// be called manually to force creation at a different time.633///634/// [`RenderSystems::Render`]: crate::RenderSystems::Render635pub fn process_queue(&mut self) {636let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines);637let mut pipelines = mem::take(&mut self.pipelines);638639{640let mut new_pipelines = self641.new_pipelines642.lock()643.unwrap_or_else(PoisonError::into_inner);644for new_pipeline in new_pipelines.drain(..) {645let id = pipelines.len();646pipelines.push(new_pipeline);647waiting_pipelines.insert(id);648}649}650651for id in waiting_pipelines {652self.process_pipeline(&mut pipelines[id], id);653}654655self.pipelines = pipelines;656}657658fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) {659match &mut cached_pipeline.state {660CachedPipelineState::Queued => {661cached_pipeline.state = match &cached_pipeline.descriptor {662PipelineDescriptor::RenderPipelineDescriptor(descriptor) => {663self.start_create_render_pipeline(id, *descriptor.clone())664}665PipelineDescriptor::ComputePipelineDescriptor(descriptor) => {666self.start_create_compute_pipeline(id, *descriptor.clone())667}668};669}670671CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) {672Some(Ok(pipeline)) => {673cached_pipeline.state = CachedPipelineState::Ok(pipeline);674return;675}676Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err),677_ => (),678},679680CachedPipelineState::Err(err) => match err {681// Retry682ShaderCacheError::ShaderNotLoaded(_)683| ShaderCacheError::ShaderImportNotYetAvailable => {684cached_pipeline.state = CachedPipelineState::Queued;685}686687// Shader could not be processed ... retrying won't help688ShaderCacheError::ProcessShaderError(err) => {689let error_detail =690err.emit_to_string(&self.shader_cache.lock().unwrap().composer);691if std::env::var("VERBOSE_SHADER_ERROR")692.is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false"))693{694error!("{}", pipeline_error_context(cached_pipeline));695}696error!("failed to process shader error:\n{}", error_detail);697return;698}699ShaderCacheError::CreateShaderModule(description) => {700error!("failed to create shader module: {}", description);701return;702}703},704705CachedPipelineState::Ok(_) => return,706}707708// Retry709self.waiting_pipelines.insert(id);710}711712pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) {713cache.process_queue();714}715716pub(crate) fn extract_shaders(717mut cache: ResMut<Self>,718shaders: Extract<Res<Assets<Shader>>>,719mut events: Extract<MessageReader<AssetEvent<Shader>>>,720) {721for event in events.read() {722#[expect(723clippy::match_same_arms,724reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon."725)]726match event {727// PERF: Instead of blocking waiting for the shader cache lock, try again next frame if the lock is currently held728AssetEvent::Added { id } | AssetEvent::Modified { id } => {729if let Some(shader) = shaders.get(*id) {730let mut shader = shader.clone();731shader.shader_defs.extend(cache.global_shader_defs.clone());732733cache.set_shader(*id, shader);734}735}736AssetEvent::Removed { id } => cache.remove_shader(*id),737AssetEvent::Unused { .. } => {}738AssetEvent::LoadedWithDependencies { .. } => {739// TODO: handle this740}741}742}743}744}745746fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String {747fn format(748shader: &Handle<Shader>,749entry: &Option<Cow<'static, str>>,750shader_defs: &[ShaderDefVal],751) -> String {752let source = match shader.path() {753Some(path) => path.path().to_string_lossy().to_string(),754None => String::new(),755};756let entry = match entry {757Some(entry) => entry.to_string(),758None => String::new(),759};760let shader_defs = shader_defs761.iter()762.flat_map(|def| match def {763ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()),764ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")),765ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")),766_ => None,767})768.collect::<Vec<_>>()769.join(", ");770format!("{source}:{entry}\nshader defs: {shader_defs}")771}772match &cached_pipeline.descriptor {773PipelineDescriptor::RenderPipelineDescriptor(desc) => {774let vert = &desc.vertex;775let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs);776let Some(frag) = desc.fragment.as_ref() else {777return vert_str;778};779let frag_str = format(&frag.shader, &frag.entry_point, &frag.shader_defs);780format!("vertex {vert_str}\nfragment {frag_str}")781}782PipelineDescriptor::ComputePipelineDescriptor(desc) => {783format(&desc.shader, &desc.entry_point, &desc.shader_defs)784}785}786}787788#[cfg(all(789not(target_arch = "wasm32"),790not(target_os = "macos"),791feature = "multi_threaded"792))]793fn create_pipeline_task(794task: impl Future<Output = Result<Pipeline, ShaderCacheError>> + Send + 'static,795sync: bool,796) -> CachedPipelineState {797if !sync {798return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));799}800801match bevy_tasks::block_on(task) {802Ok(pipeline) => CachedPipelineState::Ok(pipeline),803Err(err) => CachedPipelineState::Err(err),804}805}806807#[cfg(any(808target_arch = "wasm32",809target_os = "macos",810not(feature = "multi_threaded")811))]812fn create_pipeline_task(813task: impl Future<Output = Result<Pipeline, ShaderCacheError>> + Send + 'static,814_sync: bool,815) -> CachedPipelineState {816match bevy_tasks::block_on(task) {817Ok(pipeline) => CachedPipelineState::Ok(pipeline),818Err(err) => CachedPipelineState::Err(err),819}820}821822823