Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_render/src/render_resource/pipeline_cache.rs
9353 views
1
use bevy_material::descriptor::{
2
BindGroupLayoutDescriptor, CachedComputePipelineId, CachedRenderPipelineId,
3
ComputePipelineDescriptor, PipelineDescriptor, RenderPipelineDescriptor,
4
};
5
6
use crate::{
7
render_resource::*,
8
renderer::{RenderAdapter, RenderDevice, WgpuWrapper},
9
Extract,
10
};
11
use alloc::{borrow::Cow, sync::Arc};
12
use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
13
use bevy_ecs::{
14
message::MessageReader,
15
resource::Resource,
16
system::{Res, ResMut},
17
};
18
use bevy_log::error;
19
use bevy_platform::collections::{HashMap, HashSet};
20
use bevy_shader::{
21
CachedPipelineId, Shader, ShaderCache, ShaderCacheError, ShaderCacheSource, ShaderDefVal,
22
ValidateShader,
23
};
24
use bevy_tasks::Task;
25
use bevy_utils::default;
26
use core::{future::Future, mem};
27
use std::sync::{Mutex, PoisonError};
28
use wgpu::{PipelineCompilationOptions, VertexBufferLayout as RawVertexBufferLayout};
29
30
/// A pipeline defining the data layout and shader logic for a specific GPU task.
31
///
32
/// Used to store a heterogenous collection of render and compute pipelines together.
33
#[derive(Debug)]
34
pub enum Pipeline {
35
RenderPipeline(RenderPipeline),
36
ComputePipeline(ComputePipeline),
37
}
38
39
pub struct CachedPipeline {
40
pub descriptor: PipelineDescriptor,
41
pub state: CachedPipelineState,
42
}
43
44
/// State of a cached pipeline inserted into a [`PipelineCache`].
45
#[derive(Debug)]
46
pub enum CachedPipelineState {
47
/// The pipeline GPU object is queued for creation.
48
Queued,
49
/// The pipeline GPU object is being created.
50
Creating(Task<Result<Pipeline, ShaderCacheError>>),
51
/// The pipeline GPU object was created successfully and is available (allocated on the GPU).
52
Ok(Pipeline),
53
/// An error occurred while trying to create the pipeline GPU object.
54
Err(ShaderCacheError),
55
}
56
57
impl CachedPipelineState {
58
/// Convenience method to "unwrap" a pipeline state into its underlying GPU object.
59
///
60
/// # Returns
61
///
62
/// The method returns the allocated pipeline GPU object.
63
///
64
/// # Panics
65
///
66
/// This method panics if the pipeline GPU object is not available, either because it is
67
/// pending creation or because an error occurred while attempting to create GPU object.
68
pub fn unwrap(&self) -> &Pipeline {
69
match self {
70
CachedPipelineState::Ok(pipeline) => pipeline,
71
CachedPipelineState::Queued => {
72
panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.")
73
}
74
CachedPipelineState::Creating(..) => {
75
panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.")
76
}
77
CachedPipelineState::Err(err) => panic!("{}", err),
78
}
79
}
80
}
81
82
type ImmediateSize = u32;
83
type LayoutCacheKey = (Vec<BindGroupLayoutId>, ImmediateSize);
84
#[derive(Default)]
85
struct LayoutCache {
86
layouts: HashMap<LayoutCacheKey, Arc<WgpuWrapper<PipelineLayout>>>,
87
}
88
89
impl LayoutCache {
90
fn get(
91
&mut self,
92
render_device: &RenderDevice,
93
bind_group_layouts: &[BindGroupLayout],
94
immediate_size: u32,
95
) -> Arc<WgpuWrapper<PipelineLayout>> {
96
let bind_group_ids = bind_group_layouts.iter().map(BindGroupLayout::id).collect();
97
self.layouts
98
.entry((bind_group_ids, immediate_size))
99
.or_insert_with_key(|(_, immediate_size)| {
100
let bind_group_layouts = bind_group_layouts
101
.iter()
102
.map(BindGroupLayout::value)
103
.collect::<Vec<_>>();
104
Arc::new(WgpuWrapper::new(render_device.create_pipeline_layout(
105
&PipelineLayoutDescriptor {
106
bind_group_layouts: &bind_group_layouts,
107
immediate_size: *immediate_size,
108
..default()
109
},
110
)))
111
})
112
.clone()
113
}
114
}
115
116
fn load_module(
117
render_device: &RenderDevice,
118
shader_source: ShaderCacheSource,
119
validate_shader: &ValidateShader,
120
) -> Result<WgpuWrapper<ShaderModule>, ShaderCacheError> {
121
let shader_source = match shader_source {
122
#[cfg(feature = "shader_format_spirv")]
123
ShaderCacheSource::SpirV(data) => wgpu::util::make_spirv(data),
124
#[cfg(not(feature = "shader_format_spirv"))]
125
ShaderCacheSource::SpirV(_) => {
126
unimplemented!("Enable feature \"shader_format_spirv\" to use SPIR-V shaders")
127
}
128
ShaderCacheSource::Wgsl(src) => ShaderSource::Wgsl(Cow::Owned(src)),
129
#[cfg(not(feature = "decoupled_naga"))]
130
ShaderCacheSource::Naga(src) => ShaderSource::Naga(Cow::Owned(src)),
131
};
132
let module_descriptor = ShaderModuleDescriptor {
133
label: None,
134
source: shader_source,
135
};
136
137
let scope = render_device
138
.wgpu_device()
139
.push_error_scope(wgpu::ErrorFilter::Validation);
140
141
let shader_module = WgpuWrapper::new(match validate_shader {
142
ValidateShader::Enabled => {
143
render_device.create_and_validate_shader_module(module_descriptor)
144
}
145
// SAFETY: we are interfacing with shader code, which may contain undefined behavior,
146
// such as indexing out of bounds.
147
// The checks required are prohibitively expensive and a poor default for game engines.
148
ValidateShader::Disabled => unsafe {
149
render_device.create_shader_module(module_descriptor)
150
},
151
});
152
153
let error = scope.pop();
154
155
// `now_or_never` will return Some if the future is ready and None otherwise.
156
// On native platforms, wgpu will yield the error immediately while on wasm it may take longer since the browser APIs are asynchronous.
157
// So to keep the complexity of the ShaderCache low, we will only catch this error early on native platforms,
158
// and on wasm the error will be handled by wgpu and crash the application.
159
if let Some(Some(wgpu::Error::Validation { description, .. })) =
160
bevy_tasks::futures::now_or_never(error)
161
{
162
return Err(ShaderCacheError::CreateShaderModule(description));
163
}
164
165
Ok(shader_module)
166
}
167
168
#[derive(Default)]
169
struct BindGroupLayoutCache {
170
bgls: HashMap<BindGroupLayoutDescriptor, BindGroupLayout>,
171
}
172
173
impl BindGroupLayoutCache {
174
fn get(
175
&mut self,
176
render_device: &RenderDevice,
177
descriptor: BindGroupLayoutDescriptor,
178
) -> BindGroupLayout {
179
self.bgls
180
.entry(descriptor)
181
.or_insert_with_key(|descriptor| {
182
render_device
183
.create_bind_group_layout(descriptor.label.as_ref(), &descriptor.entries)
184
})
185
.clone()
186
}
187
}
188
189
/// Cache for render and compute pipelines.
190
///
191
/// The cache stores existing render and compute pipelines allocated on the GPU, as well as
192
/// pending creation. Pipelines inserted into the cache are identified by a unique ID, which
193
/// can be used to retrieve the actual GPU object once it's ready. The creation of the GPU
194
/// pipeline object is deferred to the [`RenderSystems::Render`] step, just before the render
195
/// graph starts being processed, as this requires access to the GPU.
196
///
197
/// Note that the cache does not perform automatic deduplication of identical pipelines. It is
198
/// up to the user not to insert the same pipeline twice to avoid wasting GPU resources.
199
///
200
/// [`RenderSystems::Render`]: crate::RenderSystems::Render
201
#[derive(Resource)]
202
pub struct PipelineCache {
203
layout_cache: Arc<Mutex<LayoutCache>>,
204
bindgroup_layout_cache: Arc<Mutex<BindGroupLayoutCache>>,
205
shader_cache: Arc<Mutex<ShaderCache<WgpuWrapper<ShaderModule>, RenderDevice>>>,
206
device: RenderDevice,
207
pipelines: Vec<CachedPipeline>,
208
waiting_pipelines: HashSet<CachedPipelineId>,
209
new_pipelines: Mutex<Vec<CachedPipeline>>,
210
global_shader_defs: Vec<ShaderDefVal>,
211
/// If `true`, disables asynchronous pipeline compilation.
212
/// This has no effect on macOS, wasm, or without the `multi_threaded` feature.
213
pub(crate) synchronous_pipeline_compilation: bool,
214
}
215
216
impl PipelineCache {
217
/// Returns an iterator over the pipelines in the pipeline cache.
218
pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline> {
219
self.pipelines.iter()
220
}
221
222
/// Returns a iterator of the IDs of all currently waiting pipelines.
223
pub fn waiting_pipelines(&self) -> impl Iterator<Item = CachedPipelineId> + '_ {
224
self.waiting_pipelines.iter().copied()
225
}
226
227
/// Create a new pipeline cache associated with the given render device.
228
pub fn new(
229
device: RenderDevice,
230
render_adapter: RenderAdapter,
231
synchronous_pipeline_compilation: bool,
232
) -> Self {
233
let mut global_shader_defs = Vec::new();
234
#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
235
{
236
global_shader_defs.push("NO_ARRAY_TEXTURES_SUPPORT".into());
237
global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
238
global_shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into());
239
}
240
241
if cfg!(target_abi = "sim") {
242
global_shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into());
243
}
244
245
global_shader_defs.push(ShaderDefVal::UInt(
246
String::from("AVAILABLE_STORAGE_BUFFER_BINDINGS"),
247
device.limits().max_storage_buffers_per_shader_stage,
248
));
249
250
Self {
251
shader_cache: Arc::new(Mutex::new(ShaderCache::new(
252
device.clone(),
253
device.features(),
254
render_adapter.get_downlevel_capabilities().flags,
255
load_module,
256
))),
257
device,
258
layout_cache: default(),
259
bindgroup_layout_cache: default(),
260
waiting_pipelines: default(),
261
new_pipelines: default(),
262
pipelines: default(),
263
global_shader_defs,
264
synchronous_pipeline_compilation,
265
}
266
}
267
268
/// Get the state of a cached render pipeline.
269
///
270
/// See [`PipelineCache::queue_render_pipeline()`].
271
#[inline]
272
pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState {
273
// If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines`
274
self.pipelines
275
.get(id.id())
276
.map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
277
}
278
279
/// Get the state of a cached compute pipeline.
280
///
281
/// See [`PipelineCache::queue_compute_pipeline()`].
282
#[inline]
283
pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState {
284
// If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines`
285
self.pipelines
286
.get(id.id())
287
.map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state)
288
}
289
290
/// Get the render pipeline descriptor a cached render pipeline was inserted from.
291
///
292
/// See [`PipelineCache::queue_render_pipeline()`].
293
///
294
/// **Note**: Be careful calling this method. It will panic if called with a pipeline that
295
/// has been queued but has not yet been processed by [`PipelineCache::process_queue()`].
296
#[inline]
297
pub fn get_render_pipeline_descriptor(
298
&self,
299
id: CachedRenderPipelineId,
300
) -> &RenderPipelineDescriptor {
301
match &self.pipelines[id.id()].descriptor {
302
PipelineDescriptor::RenderPipelineDescriptor(descriptor) => descriptor,
303
PipelineDescriptor::ComputePipelineDescriptor(_) => unreachable!(),
304
}
305
}
306
307
/// Get the compute pipeline descriptor a cached render pipeline was inserted from.
308
///
309
/// See [`PipelineCache::queue_compute_pipeline()`].
310
///
311
/// **Note**: Be careful calling this method. It will panic if called with a pipeline that
312
/// has been queued but has not yet been processed by [`PipelineCache::process_queue()`].
313
#[inline]
314
pub fn get_compute_pipeline_descriptor(
315
&self,
316
id: CachedComputePipelineId,
317
) -> &ComputePipelineDescriptor {
318
match &self.pipelines[id.id()].descriptor {
319
PipelineDescriptor::RenderPipelineDescriptor(_) => unreachable!(),
320
PipelineDescriptor::ComputePipelineDescriptor(descriptor) => descriptor,
321
}
322
}
323
324
/// Try to retrieve a render pipeline GPU object from a cached ID.
325
///
326
/// # Returns
327
///
328
/// This method returns a successfully created render pipeline if any, or `None` if the pipeline
329
/// was not created yet or if there was an error during creation. You can check the actual creation
330
/// state with [`PipelineCache::get_render_pipeline_state()`].
331
#[inline]
332
pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> {
333
if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) =
334
&self.pipelines.get(id.id())?.state
335
{
336
Some(pipeline)
337
} else {
338
None
339
}
340
}
341
342
/// Wait for a render pipeline to finish compiling.
343
#[inline]
344
pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) {
345
if self.pipelines.len() <= id.id() {
346
self.process_queue();
347
}
348
349
let state = &mut self.pipelines[id.id()].state;
350
if let CachedPipelineState::Creating(task) = state {
351
*state = match bevy_tasks::block_on(task) {
352
Ok(p) => CachedPipelineState::Ok(p),
353
Err(e) => CachedPipelineState::Err(e),
354
};
355
}
356
}
357
358
/// Try to retrieve a compute pipeline GPU object from a cached ID.
359
///
360
/// # Returns
361
///
362
/// This method returns a successfully created compute pipeline if any, or `None` if the pipeline
363
/// was not created yet or if there was an error during creation. You can check the actual creation
364
/// state with [`PipelineCache::get_compute_pipeline_state()`].
365
#[inline]
366
pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> {
367
if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) =
368
&self.pipelines.get(id.id())?.state
369
{
370
Some(pipeline)
371
} else {
372
None
373
}
374
}
375
376
/// Insert a render pipeline into the cache, and queue its creation.
377
///
378
/// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with
379
/// an already cached pipeline.
380
///
381
/// # Returns
382
///
383
/// This method returns the unique render shader ID of the cached pipeline, which can be used to query
384
/// the caching state with [`get_render_pipeline_state()`] and to retrieve the created GPU pipeline once
385
/// it's ready with [`get_render_pipeline()`].
386
///
387
/// [`get_render_pipeline_state()`]: PipelineCache::get_render_pipeline_state
388
/// [`get_render_pipeline()`]: PipelineCache::get_render_pipeline
389
pub fn queue_render_pipeline(
390
&self,
391
descriptor: RenderPipelineDescriptor,
392
) -> CachedRenderPipelineId {
393
let mut new_pipelines = self
394
.new_pipelines
395
.lock()
396
.unwrap_or_else(PoisonError::into_inner);
397
let id = CachedRenderPipelineId::new(self.pipelines.len() + new_pipelines.len());
398
new_pipelines.push(CachedPipeline {
399
descriptor: PipelineDescriptor::RenderPipelineDescriptor(Box::new(descriptor)),
400
state: CachedPipelineState::Queued,
401
});
402
id
403
}
404
405
/// Insert a compute pipeline into the cache, and queue its creation.
406
///
407
/// The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with
408
/// an already cached pipeline.
409
///
410
/// # Returns
411
///
412
/// This method returns the unique compute shader ID of the cached pipeline, which can be used to query
413
/// the caching state with [`get_compute_pipeline_state()`] and to retrieve the created GPU pipeline once
414
/// it's ready with [`get_compute_pipeline()`].
415
///
416
/// [`get_compute_pipeline_state()`]: PipelineCache::get_compute_pipeline_state
417
/// [`get_compute_pipeline()`]: PipelineCache::get_compute_pipeline
418
pub fn queue_compute_pipeline(
419
&self,
420
descriptor: ComputePipelineDescriptor,
421
) -> CachedComputePipelineId {
422
let mut new_pipelines = self
423
.new_pipelines
424
.lock()
425
.unwrap_or_else(PoisonError::into_inner);
426
let id = CachedComputePipelineId::new(self.pipelines.len() + new_pipelines.len());
427
new_pipelines.push(CachedPipeline {
428
descriptor: PipelineDescriptor::ComputePipelineDescriptor(Box::new(descriptor)),
429
state: CachedPipelineState::Queued,
430
});
431
id
432
}
433
434
pub fn get_bind_group_layout(
435
&self,
436
bind_group_layout_descriptor: &BindGroupLayoutDescriptor,
437
) -> BindGroupLayout {
438
self.bindgroup_layout_cache
439
.lock()
440
.unwrap()
441
.get(&self.device, bind_group_layout_descriptor.clone())
442
}
443
444
/// Inserts a [`Shader`] into this cache with the provided [`AssetId`].
445
pub fn set_shader(&mut self, id: AssetId<Shader>, shader: Shader) {
446
let mut shader_cache = self.shader_cache.lock().unwrap();
447
let pipelines_to_queue = shader_cache.set_shader(id, shader);
448
for cached_pipeline in pipelines_to_queue {
449
self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
450
self.waiting_pipelines.insert(cached_pipeline);
451
}
452
}
453
454
/// Removes a [`Shader`] from this cache if it exists.
455
pub fn remove_shader(&mut self, shader: AssetId<Shader>) {
456
let mut shader_cache = self.shader_cache.lock().unwrap();
457
let pipelines_to_queue = shader_cache.remove(shader);
458
for cached_pipeline in pipelines_to_queue {
459
self.pipelines[cached_pipeline].state = CachedPipelineState::Queued;
460
self.waiting_pipelines.insert(cached_pipeline);
461
}
462
}
463
464
fn start_create_render_pipeline(
465
&mut self,
466
id: CachedPipelineId,
467
descriptor: RenderPipelineDescriptor,
468
) -> CachedPipelineState {
469
let device = self.device.clone();
470
let shader_cache = self.shader_cache.clone();
471
let layout_cache = self.layout_cache.clone();
472
let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
473
let bind_group_layout = descriptor
474
.layout
475
.iter()
476
.map(|bind_group_layout_descriptor| {
477
bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
478
})
479
.collect::<Vec<_>>();
480
481
create_pipeline_task(
482
async move {
483
let mut shader_cache = shader_cache.lock().unwrap();
484
let mut layout_cache = layout_cache.lock().unwrap();
485
486
let vertex_module = match shader_cache.get(
487
id,
488
descriptor.vertex.shader.id(),
489
&descriptor.vertex.shader_defs,
490
) {
491
Ok(module) => module,
492
Err(err) => return Err(err),
493
};
494
495
let fragment_module = match &descriptor.fragment {
496
Some(fragment) => {
497
match shader_cache.get(id, fragment.shader.id(), &fragment.shader_defs) {
498
Ok(module) => Some(module),
499
Err(err) => return Err(err),
500
}
501
}
502
None => None,
503
};
504
505
let layout = if descriptor.layout.is_empty() && descriptor.immediate_size == 0 {
506
None
507
} else {
508
Some(layout_cache.get(&device, &bind_group_layout, descriptor.immediate_size))
509
};
510
511
drop((shader_cache, layout_cache));
512
513
let vertex_buffer_layouts = descriptor
514
.vertex
515
.buffers
516
.iter()
517
.map(|layout| RawVertexBufferLayout {
518
array_stride: layout.array_stride,
519
attributes: &layout.attributes,
520
step_mode: layout.step_mode,
521
})
522
.collect::<Vec<_>>();
523
524
let fragment_data = descriptor.fragment.as_ref().map(|fragment| {
525
(
526
fragment_module.unwrap(),
527
fragment.entry_point.as_deref(),
528
fragment.targets.as_slice(),
529
)
530
});
531
532
// TODO: Expose the rest of this somehow
533
let compilation_options = PipelineCompilationOptions {
534
constants: &[],
535
zero_initialize_workgroup_memory: descriptor.zero_initialize_workgroup_memory,
536
};
537
538
let descriptor = RawRenderPipelineDescriptor {
539
multiview_mask: None,
540
depth_stencil: descriptor.depth_stencil.clone(),
541
label: descriptor.label.as_deref(),
542
layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
543
multisample: descriptor.multisample,
544
primitive: descriptor.primitive,
545
vertex: RawVertexState {
546
buffers: &vertex_buffer_layouts,
547
entry_point: descriptor.vertex.entry_point.as_deref(),
548
module: &vertex_module,
549
// TODO: Should this be the same as the fragment compilation options?
550
compilation_options: compilation_options.clone(),
551
},
552
fragment: fragment_data
553
.as_ref()
554
.map(|(module, entry_point, targets)| RawFragmentState {
555
entry_point: entry_point.as_deref(),
556
module,
557
targets,
558
// TODO: Should this be the same as the vertex compilation options?
559
compilation_options,
560
}),
561
cache: None,
562
};
563
564
Ok(Pipeline::RenderPipeline(
565
device.create_render_pipeline(&descriptor),
566
))
567
},
568
self.synchronous_pipeline_compilation,
569
)
570
}
571
572
fn start_create_compute_pipeline(
573
&mut self,
574
id: CachedPipelineId,
575
descriptor: ComputePipelineDescriptor,
576
) -> CachedPipelineState {
577
let device = self.device.clone();
578
let shader_cache = self.shader_cache.clone();
579
let layout_cache = self.layout_cache.clone();
580
let mut bindgroup_layout_cache = self.bindgroup_layout_cache.lock().unwrap();
581
let bind_group_layout = descriptor
582
.layout
583
.iter()
584
.map(|bind_group_layout_descriptor| {
585
bindgroup_layout_cache.get(&self.device, bind_group_layout_descriptor.clone())
586
})
587
.collect::<Vec<_>>();
588
589
create_pipeline_task(
590
async move {
591
let mut shader_cache = shader_cache.lock().unwrap();
592
let mut layout_cache = layout_cache.lock().unwrap();
593
594
let compute_module =
595
match shader_cache.get(id, descriptor.shader.id(), &descriptor.shader_defs) {
596
Ok(module) => module,
597
Err(err) => return Err(err),
598
};
599
600
let layout = if descriptor.layout.is_empty() && descriptor.immediate_size == 0 {
601
None
602
} else {
603
Some(layout_cache.get(&device, &bind_group_layout, descriptor.immediate_size))
604
};
605
606
drop((shader_cache, layout_cache));
607
608
let descriptor = RawComputePipelineDescriptor {
609
label: descriptor.label.as_deref(),
610
layout: layout.as_ref().map(|layout| -> &PipelineLayout { layout }),
611
module: &compute_module,
612
entry_point: descriptor.entry_point.as_deref(),
613
// TODO: Expose the rest of this somehow
614
compilation_options: PipelineCompilationOptions {
615
constants: &[],
616
zero_initialize_workgroup_memory: descriptor
617
.zero_initialize_workgroup_memory,
618
},
619
cache: None,
620
};
621
622
Ok(Pipeline::ComputePipeline(
623
device.create_compute_pipeline(&descriptor),
624
))
625
},
626
self.synchronous_pipeline_compilation,
627
)
628
}
629
630
/// Process the pipeline queue and create all pending pipelines if possible.
631
///
632
/// This is generally called automatically during the [`RenderSystems::Render`] step, but can
633
/// be called manually to force creation at a different time.
634
///
635
/// [`RenderSystems::Render`]: crate::RenderSystems::Render
636
pub fn process_queue(&mut self) {
637
let mut waiting_pipelines = mem::take(&mut self.waiting_pipelines);
638
let mut pipelines = mem::take(&mut self.pipelines);
639
640
{
641
let mut new_pipelines = self
642
.new_pipelines
643
.lock()
644
.unwrap_or_else(PoisonError::into_inner);
645
for new_pipeline in new_pipelines.drain(..) {
646
let id = pipelines.len();
647
pipelines.push(new_pipeline);
648
waiting_pipelines.insert(id);
649
}
650
}
651
652
for id in waiting_pipelines {
653
self.process_pipeline(&mut pipelines[id], id);
654
}
655
656
self.pipelines = pipelines;
657
}
658
659
fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) {
660
match &mut cached_pipeline.state {
661
CachedPipelineState::Queued => {
662
cached_pipeline.state = match &cached_pipeline.descriptor {
663
PipelineDescriptor::RenderPipelineDescriptor(descriptor) => {
664
self.start_create_render_pipeline(id, *descriptor.clone())
665
}
666
PipelineDescriptor::ComputePipelineDescriptor(descriptor) => {
667
self.start_create_compute_pipeline(id, *descriptor.clone())
668
}
669
};
670
}
671
672
CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) {
673
Some(Ok(pipeline)) => {
674
cached_pipeline.state = CachedPipelineState::Ok(pipeline);
675
return;
676
}
677
Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err),
678
_ => (),
679
},
680
681
CachedPipelineState::Err(err) => match err {
682
// Retry
683
ShaderCacheError::ShaderNotLoaded(_)
684
| ShaderCacheError::ShaderImportNotYetAvailable => {
685
cached_pipeline.state = CachedPipelineState::Queued;
686
}
687
688
// Shader could not be processed ... retrying won't help
689
ShaderCacheError::ProcessShaderError(err) => {
690
let error_detail =
691
err.emit_to_string(&self.shader_cache.lock().unwrap().composer);
692
if std::env::var("VERBOSE_SHADER_ERROR")
693
.is_ok_and(|v| !(v.is_empty() || v == "0" || v == "false"))
694
{
695
error!("{}", pipeline_error_context(cached_pipeline));
696
}
697
error!("failed to process shader error:\n{}", error_detail);
698
return;
699
}
700
ShaderCacheError::CreateShaderModule(description) => {
701
error!("failed to create shader module: {}", description);
702
return;
703
}
704
},
705
706
CachedPipelineState::Ok(_) => return,
707
}
708
709
// Retry
710
self.waiting_pipelines.insert(id);
711
}
712
713
pub(crate) fn process_pipeline_queue_system(mut cache: ResMut<Self>) {
714
cache.process_queue();
715
}
716
717
pub(crate) fn extract_shaders(
718
mut cache: ResMut<Self>,
719
shaders: Extract<Res<Assets<Shader>>>,
720
mut events: Extract<MessageReader<AssetEvent<Shader>>>,
721
) {
722
for event in events.read() {
723
#[expect(
724
clippy::match_same_arms,
725
reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon."
726
)]
727
match event {
728
// PERF: Instead of blocking waiting for the shader cache lock, try again next frame if the lock is currently held
729
AssetEvent::Added { id } | AssetEvent::Modified { id } => {
730
if let Some(shader) = shaders.get(*id) {
731
let mut shader = shader.clone();
732
shader.shader_defs.extend(cache.global_shader_defs.clone());
733
734
cache.set_shader(*id, shader);
735
}
736
}
737
AssetEvent::Removed { id } => cache.remove_shader(*id),
738
AssetEvent::Unused { .. } => {}
739
AssetEvent::LoadedWithDependencies { .. } => {
740
// TODO: handle this
741
}
742
}
743
}
744
}
745
}
746
747
fn pipeline_error_context(cached_pipeline: &CachedPipeline) -> String {
748
fn format(
749
shader: &Handle<Shader>,
750
entry: &Option<Cow<'static, str>>,
751
shader_defs: &[ShaderDefVal],
752
) -> String {
753
let source = match shader.path() {
754
Some(path) => path.path().to_string_lossy().to_string(),
755
None => String::new(),
756
};
757
let entry = match entry {
758
Some(entry) => entry.to_string(),
759
None => String::new(),
760
};
761
let shader_defs = shader_defs
762
.iter()
763
.flat_map(|def| match def {
764
ShaderDefVal::Bool(k, v) if *v => Some(k.to_string()),
765
ShaderDefVal::Int(k, v) => Some(format!("{k} = {v}")),
766
ShaderDefVal::UInt(k, v) => Some(format!("{k} = {v}")),
767
_ => None,
768
})
769
.collect::<Vec<_>>()
770
.join(", ");
771
format!("{source}:{entry}\nshader defs: {shader_defs}")
772
}
773
match &cached_pipeline.descriptor {
774
PipelineDescriptor::RenderPipelineDescriptor(desc) => {
775
let vert = &desc.vertex;
776
let vert_str = format(&vert.shader, &vert.entry_point, &vert.shader_defs);
777
let Some(frag) = desc.fragment.as_ref() else {
778
return vert_str;
779
};
780
let frag_str = format(&frag.shader, &frag.entry_point, &frag.shader_defs);
781
format!("vertex {vert_str}\nfragment {frag_str}")
782
}
783
PipelineDescriptor::ComputePipelineDescriptor(desc) => {
784
format(&desc.shader, &desc.entry_point, &desc.shader_defs)
785
}
786
}
787
}
788
789
#[cfg(all(
790
not(target_arch = "wasm32"),
791
not(target_os = "macos"),
792
feature = "multi_threaded"
793
))]
794
fn create_pipeline_task(
795
task: impl Future<Output = Result<Pipeline, ShaderCacheError>> + Send + 'static,
796
sync: bool,
797
) -> CachedPipelineState {
798
if !sync {
799
return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task));
800
}
801
802
match bevy_tasks::block_on(task) {
803
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
804
Err(err) => CachedPipelineState::Err(err),
805
}
806
}
807
808
#[cfg(any(
809
target_arch = "wasm32",
810
target_os = "macos",
811
not(feature = "multi_threaded")
812
))]
813
fn create_pipeline_task(
814
task: impl Future<Output = Result<Pipeline, ShaderCacheError>> + Send + 'static,
815
_sync: bool,
816
) -> CachedPipelineState {
817
match bevy_tasks::block_on(task) {
818
Ok(pipeline) => CachedPipelineState::Ok(pipeline),
819
Err(err) => CachedPipelineState::Err(err),
820
}
821
}
822
823