Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_camera/src/camera.rs
9358 views
1
use crate::primitives::Frustum;
2
3
use super::{
4
visibility::{Visibility, VisibleEntities},
5
ClearColorConfig, MsaaWriteback,
6
};
7
use bevy_asset::Handle;
8
use bevy_derive::Deref;
9
use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};
10
use bevy_image::Image;
11
use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};
12
use bevy_reflect::prelude::*;
13
use bevy_transform::components::{GlobalTransform, Transform};
14
use bevy_window::{NormalizedWindowRef, WindowRef};
15
use core::ops::Range;
16
use derive_more::derive::From;
17
use thiserror::Error;
18
use wgpu_types::{BlendState, TextureUsages};
19
20
/// Render viewport configuration for the [`Camera`] component.
21
///
22
/// The viewport defines the area on the render target to which the camera renders its image.
23
/// You can overlay multiple cameras in a single window using viewports to create effects like
24
/// split screen, minimaps, and character viewers.
25
#[derive(Reflect, Debug, Clone)]
26
#[reflect(Default, Clone)]
27
pub struct Viewport {
28
/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
29
/// (0,0) corresponds to the top-left corner
30
pub physical_position: UVec2,
31
/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
32
/// The origin of the rectangle is in the top-left corner.
33
pub physical_size: UVec2,
34
/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
35
pub depth: Range<f32>,
36
}
37
38
impl Default for Viewport {
39
fn default() -> Self {
40
Self {
41
physical_position: Default::default(),
42
physical_size: UVec2::new(1, 1),
43
depth: 0.0..1.0,
44
}
45
}
46
}
47
48
impl Viewport {
49
/// Cut the viewport rectangle so that it lies inside a rectangle of the
50
/// given size.
51
///
52
/// If either of the viewport's position coordinates lies outside the given
53
/// dimensions, it will be moved just inside first. If either of the given
54
/// dimensions is zero, the position and size of the viewport rectangle will
55
/// both be set to zero in that dimension.
56
pub fn clamp_to_size(&mut self, size: UVec2) {
57
// If the origin of the viewport rect is outside, then adjust so that
58
// it's just barely inside. Then, cut off the part that is outside.
59
if self.physical_size.x + self.physical_position.x > size.x {
60
if self.physical_position.x < size.x {
61
self.physical_size.x = size.x - self.physical_position.x;
62
} else if size.x > 0 {
63
self.physical_position.x = size.x - 1;
64
self.physical_size.x = 1;
65
} else {
66
self.physical_position.x = 0;
67
self.physical_size.x = 0;
68
}
69
}
70
if self.physical_size.y + self.physical_position.y > size.y {
71
if self.physical_position.y < size.y {
72
self.physical_size.y = size.y - self.physical_position.y;
73
} else if size.y > 0 {
74
self.physical_position.y = size.y - 1;
75
self.physical_size.y = 1;
76
} else {
77
self.physical_position.y = 0;
78
self.physical_size.y = 0;
79
}
80
}
81
}
82
83
pub fn from_viewport_and_override(
84
viewport: Option<&Self>,
85
main_pass_resolution_override: Option<&MainPassResolutionOverride>,
86
) -> Option<Self> {
87
if let Some(override_size) = main_pass_resolution_override {
88
let mut vp = viewport.map_or_else(Self::default, Self::clone);
89
vp.physical_size = **override_size;
90
Some(vp)
91
} else {
92
viewport.cloned()
93
}
94
}
95
}
96
97
/// Override the resolution a 3d camera's main pass is rendered at.
98
///
99
/// Does not affect post processing.
100
///
101
/// ## Usage
102
///
103
/// * Insert this component on a 3d camera entity in the render world.
104
/// * The resolution override must be smaller than the camera's viewport size.
105
/// * The resolution override is specified in physical pixels.
106
/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.
107
#[derive(Component, Reflect, Deref, Debug)]
108
#[reflect(Component)]
109
pub struct MainPassResolutionOverride(pub UVec2);
110
111
/// Settings to define a camera sub view.
112
///
113
/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
114
/// image defined by `size` and `offset` (relative to the `full_size` of the
115
/// whole image) is projected to the cameras viewport.
116
///
117
/// Take the example of the following multi-monitor setup:
118
/// ```css
119
/// ┌───┬───┐
120
/// │ A │ B │
121
/// ├───┼───┤
122
/// │ C │ D │
123
/// └───┴───┘
124
/// ```
125
/// If each monitor is 1920x1080, the whole image will have a resolution of
126
/// 3840x2160. For each monitor we can use a single camera with a viewport of
127
/// the same size as the monitor it corresponds to. To ensure that the image is
128
/// cohesive, we can use a different sub view on each camera:
129
/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
130
/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
131
/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
132
/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
133
/// 1920,1080
134
///
135
/// However since only the ratio between the values is important, they could all
136
/// be divided by 120 and still produce the same image. Camera D would for
137
/// example have the following values:
138
/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
139
#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
140
#[reflect(Clone, PartialEq, Default)]
141
pub struct SubCameraView {
142
/// Size of the entire camera view
143
pub full_size: UVec2,
144
/// Offset of the sub camera
145
pub offset: Vec2,
146
/// Size of the sub camera
147
pub size: UVec2,
148
}
149
150
impl Default for SubCameraView {
151
fn default() -> Self {
152
Self {
153
full_size: UVec2::new(1, 1),
154
offset: Vec2::new(0., 0.),
155
size: UVec2::new(1, 1),
156
}
157
}
158
}
159
160
/// Information about the current [`RenderTarget`].
161
#[derive(Debug, Reflect, Clone)]
162
pub struct RenderTargetInfo {
163
/// The physical size of this render target (in physical pixels, ignoring scale factor).
164
pub physical_size: UVec2,
165
/// The scale factor of this render target.
166
///
167
/// When rendering to a window, typically it is a value greater or equal than 1.0,
168
/// representing the ratio between the size of the window in physical pixels and the logical size of the window.
169
pub scale_factor: f32,
170
}
171
172
impl Default for RenderTargetInfo {
173
fn default() -> Self {
174
Self {
175
physical_size: Default::default(),
176
scale_factor: 1.,
177
}
178
}
179
}
180
181
/// Holds internally computed [`Camera`] values.
182
#[derive(Default, Debug, Reflect, Clone)]
183
pub struct ComputedCameraValues {
184
pub clip_from_view: Mat4,
185
pub target_info: Option<RenderTargetInfo>,
186
// size of the `Viewport`
187
pub old_viewport_size: Option<UVec2>,
188
pub old_sub_camera_view: Option<SubCameraView>,
189
}
190
191
/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.
192
///
193
/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
194
#[derive(Component, Clone, Copy, Reflect)]
195
#[reflect(opaque)]
196
#[reflect(Component, Default, Clone)]
197
pub struct Exposure {
198
/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
199
pub ev100: f32,
200
}
201
202
impl Exposure {
203
pub const SUNLIGHT: Self = Self {
204
ev100: Self::EV100_SUNLIGHT,
205
};
206
pub const OVERCAST: Self = Self {
207
ev100: Self::EV100_OVERCAST,
208
};
209
pub const INDOOR: Self = Self {
210
ev100: Self::EV100_INDOOR,
211
};
212
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
213
/// It also happens to be a reasonable default.
214
///
215
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
216
pub const BLENDER: Self = Self {
217
ev100: Self::EV100_BLENDER,
218
};
219
220
pub const EV100_SUNLIGHT: f32 = 15.0;
221
pub const EV100_OVERCAST: f32 = 12.0;
222
pub const EV100_INDOOR: f32 = 7.0;
223
224
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
225
/// It also happens to be a reasonable default.
226
///
227
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
228
pub const EV100_BLENDER: f32 = 9.7;
229
230
pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
231
Self {
232
ev100: physical_camera_parameters.ev100(),
233
}
234
}
235
236
/// Converts EV100 values to exposure values.
237
/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
238
#[inline]
239
pub fn exposure(&self) -> f32 {
240
ops::exp2(-self.ev100) / 1.2
241
}
242
}
243
244
impl Default for Exposure {
245
fn default() -> Self {
246
Self::BLENDER
247
}
248
}
249
250
/// Parameters based on physical camera characteristics for calculating EV100
251
/// values for use with [`Exposure`]. This is also used for depth of field.
252
#[derive(Clone, Copy)]
253
pub struct PhysicalCameraParameters {
254
/// <https://en.wikipedia.org/wiki/F-number>
255
pub aperture_f_stops: f32,
256
/// <https://en.wikipedia.org/wiki/Shutter_speed>
257
pub shutter_speed_s: f32,
258
/// <https://en.wikipedia.org/wiki/Film_speed>
259
pub sensitivity_iso: f32,
260
/// The height of the [image sensor format] in meters.
261
///
262
/// Focal length is derived from the FOV and this value. The default is
263
/// 18.66mm, matching the [Super 35] format, which is popular in cinema.
264
///
265
/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
266
///
267
/// [Super 35]: https://en.wikipedia.org/wiki/Super_35
268
pub sensor_height: f32,
269
}
270
271
impl PhysicalCameraParameters {
272
/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
273
pub fn ev100(&self) -> f32 {
274
ops::log2(
275
self.aperture_f_stops * self.aperture_f_stops * 100.0
276
/ (self.shutter_speed_s * self.sensitivity_iso),
277
)
278
}
279
}
280
281
impl Default for PhysicalCameraParameters {
282
fn default() -> Self {
283
Self {
284
aperture_f_stops: 1.0,
285
shutter_speed_s: 1.0 / 125.0,
286
sensitivity_iso: 100.0,
287
sensor_height: 0.01866,
288
}
289
}
290
}
291
292
/// Error returned when a conversion between world-space and viewport-space coordinates fails.
293
///
294
/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
295
#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
296
pub enum ViewportConversionError {
297
/// The pre-computed size of the viewport was not available.
298
///
299
/// This may be because the `Camera` was just created and `camera_system` has not been executed
300
/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
301
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
302
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
303
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
304
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
305
#[error("pre-computed size of viewport not available")]
306
NoViewportSize,
307
/// The computed coordinate was beyond the `Camera`'s near plane.
308
///
309
/// Only applicable when converting from world-space to viewport-space.
310
#[error("computed coordinate beyond `Camera`'s near plane")]
311
PastNearPlane,
312
/// The computed coordinate was beyond the `Camera`'s far plane.
313
///
314
/// Only applicable when converting from world-space to viewport-space.
315
#[error("computed coordinate beyond `Camera`'s far plane")]
316
PastFarPlane,
317
/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
318
/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)
319
/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
320
#[error("found NaN while computing NDC")]
321
InvalidData,
322
}
323
324
/// The defining [`Component`] for camera entities,
325
/// storing information about how and what to render through this camera.
326
///
327
/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
328
/// which rendering occurs. It defines the position of the view to render, the projection method
329
/// to transform the 3D objects into a 2D image, as well as the render target into which that image
330
/// is produced.
331
///
332
/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.
333
/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
334
/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
335
/// graph will emit an error at runtime.
336
///
337
/// [`Camera2d`]: crate::Camera2d
338
/// [`Camera3d`]: crate::Camera3d
339
#[derive(Component, Debug, Reflect, Clone)]
340
#[reflect(Component, Default, Debug, Clone)]
341
#[require(
342
Frustum,
343
CameraMainTextureUsages,
344
VisibleEntities,
345
Transform,
346
Visibility,
347
RenderTarget
348
)]
349
pub struct Camera {
350
/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
351
pub viewport: Option<Viewport>,
352
/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
353
pub order: isize,
354
/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
355
/// camera will not be rendered.
356
pub is_active: bool,
357
/// Computed values for this camera, such as the projection matrix and the render target size.
358
pub computed: ComputedCameraValues,
359
// todo: reflect this when #6042 lands
360
/// The [`CameraOutputMode`] for this camera.
361
pub output_mode: CameraOutputMode,
362
/// Controls when MSAA writeback occurs for this camera.
363
/// See [`MsaaWriteback`] for available options.
364
pub msaa_writeback: MsaaWriteback,
365
/// The clear color operation to perform on the render target.
366
pub clear_color: ClearColorConfig,
367
/// Whether to switch culling mode so that materials that request backface
368
/// culling cull front faces, and vice versa.
369
///
370
/// This is typically used for cameras that mirror the world that they
371
/// render across a plane, because doing that flips the winding of each
372
/// polygon.
373
///
374
/// This setting doesn't affect materials that disable backface culling.
375
pub invert_culling: bool,
376
/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
377
pub sub_camera_view: Option<SubCameraView>,
378
}
379
380
impl Default for Camera {
381
fn default() -> Self {
382
Self {
383
is_active: true,
384
order: 0,
385
viewport: None,
386
computed: Default::default(),
387
output_mode: Default::default(),
388
msaa_writeback: MsaaWriteback::default(),
389
clear_color: Default::default(),
390
invert_culling: false,
391
sub_camera_view: None,
392
}
393
}
394
}
395
396
impl Camera {
397
/// Converts a physical size in this `Camera` to a logical size.
398
#[inline]
399
pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
400
let scale = self.computed.target_info.as_ref()?.scale_factor;
401
Some(physical_size.as_vec2() / scale)
402
}
403
404
/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
405
/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
406
/// the full physical rect of the current [`RenderTarget`].
407
#[inline]
408
pub fn physical_viewport_rect(&self) -> Option<URect> {
409
let min = self
410
.viewport
411
.as_ref()
412
.map(|v| v.physical_position)
413
.unwrap_or(UVec2::ZERO);
414
let max = min + self.physical_viewport_size()?;
415
Some(URect { min, max })
416
}
417
418
/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
419
/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
420
/// full logical rect of the current [`RenderTarget`].
421
#[inline]
422
pub fn logical_viewport_rect(&self) -> Option<Rect> {
423
let URect { min, max } = self.physical_viewport_rect()?;
424
Some(Rect {
425
min: self.to_logical(min)?,
426
max: self.to_logical(max)?,
427
})
428
}
429
430
/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
431
/// will be the size of that custom viewport. Otherwise it will default to the full logical size
432
/// of the current [`RenderTarget`].
433
/// For logic that requires the full logical size of the
434
/// [`RenderTarget`], prefer [`Camera::logical_target_size`].
435
///
436
/// Returns `None` if either:
437
/// - the function is called just after the `Camera` is created, before `camera_system` is executed,
438
/// - the [`RenderTarget`] isn't correctly set:
439
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
440
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
441
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
442
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
443
#[inline]
444
pub fn logical_viewport_size(&self) -> Option<Vec2> {
445
self.viewport
446
.as_ref()
447
.and_then(|v| self.to_logical(v.physical_size))
448
.or_else(|| self.logical_target_size())
449
}
450
451
/// The physical size of this camera's viewport (in physical pixels).
452
/// If the `viewport` field is set to [`Some`], this
453
/// will be the size of that custom viewport. Otherwise it will default to the full physical size of
454
/// the current [`RenderTarget`].
455
/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
456
#[inline]
457
pub fn physical_viewport_size(&self) -> Option<UVec2> {
458
self.viewport
459
.as_ref()
460
.map(|v| v.physical_size)
461
.or_else(|| self.physical_target_size())
462
}
463
464
/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
465
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
466
/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
467
#[inline]
468
pub fn logical_target_size(&self) -> Option<Vec2> {
469
self.computed
470
.target_info
471
.as_ref()
472
.and_then(|t| self.to_logical(t.physical_size))
473
}
474
475
/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
476
/// ignoring custom `viewport` configuration.
477
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
478
/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
479
#[inline]
480
pub fn physical_target_size(&self) -> Option<UVec2> {
481
self.computed.target_info.as_ref().map(|t| t.physical_size)
482
}
483
484
#[inline]
485
pub fn target_scaling_factor(&self) -> Option<f32> {
486
self.computed
487
.target_info
488
.as_ref()
489
.map(|t: &RenderTargetInfo| t.scale_factor)
490
}
491
492
/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).
493
#[inline]
494
pub fn clip_from_view(&self) -> Mat4 {
495
self.computed.clip_from_view
496
}
497
498
/// Core conversion logic to compute viewport coordinates
499
///
500
/// This function is shared by `world_to_viewport` and `world_to_viewport_with_depth`
501
/// to avoid code duplication.
502
///
503
/// Returns a tuple `(viewport_position, depth)`.
504
fn world_to_viewport_core(
505
&self,
506
camera_transform: &GlobalTransform,
507
world_position: Vec3,
508
) -> Result<(Vec2, f32), ViewportConversionError> {
509
let target_rect = self
510
.logical_viewport_rect()
511
.ok_or(ViewportConversionError::NoViewportSize)?;
512
let mut ndc_space_coords = self
513
.world_to_ndc(camera_transform, world_position)
514
.ok_or(ViewportConversionError::InvalidData)?;
515
// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
516
if ndc_space_coords.z < 0.0 {
517
return Err(ViewportConversionError::PastFarPlane);
518
}
519
if ndc_space_coords.z > 1.0 {
520
return Err(ViewportConversionError::PastNearPlane);
521
}
522
523
let depth = ndc_space_coords.z;
524
525
// Flip the Y co-ordinate origin from the bottom to the top.
526
ndc_space_coords.y = -ndc_space_coords.y;
527
528
// Once in NDC space, we can discard the z element and map x/y to the viewport rect
529
let viewport_position =
530
(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
531
Ok((viewport_position, depth))
532
}
533
534
/// Given a position in world space, use the camera to compute the viewport-space coordinates.
535
///
536
/// To get the coordinates in Normalized Device Coordinates, you should use
537
/// [`world_to_ndc`](Self::world_to_ndc).
538
///
539
/// # Panics
540
///
541
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
542
/// (see [`world_to_ndc`][Self::world_to_ndc]).
543
#[doc(alias = "world_to_screen")]
544
pub fn world_to_viewport(
545
&self,
546
camera_transform: &GlobalTransform,
547
world_position: Vec3,
548
) -> Result<Vec2, ViewportConversionError> {
549
Ok(self
550
.world_to_viewport_core(camera_transform, world_position)?
551
.0)
552
}
553
554
/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
555
///
556
/// To get the coordinates in Normalized Device Coordinates, you should use
557
/// [`world_to_ndc`](Self::world_to_ndc).
558
///
559
/// # Panics
560
///
561
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
562
/// (see [`world_to_ndc`][Self::world_to_ndc]).
563
#[doc(alias = "world_to_screen_with_depth")]
564
pub fn world_to_viewport_with_depth(
565
&self,
566
camera_transform: &GlobalTransform,
567
world_position: Vec3,
568
) -> Result<Vec3, ViewportConversionError> {
569
let result = self.world_to_viewport_core(camera_transform, world_position)?;
570
// Stretching ndc depth to value via near plane and negating result to be in positive room again.
571
let depth = -self.depth_ndc_to_view_z(result.1);
572
Ok(result.0.extend(depth))
573
}
574
575
/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
576
///
577
/// The resulting ray starts on the near plane of the camera.
578
///
579
/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
580
///
581
/// To get the world space coordinates with Normalized Device Coordinates, you should use
582
/// [`ndc_to_world`](Self::ndc_to_world).
583
///
584
/// # Example
585
/// ```no_run
586
/// # use bevy_window::Window;
587
/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};
588
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
589
/// # use bevy_camera::Camera;
590
/// # use bevy_app::{App, PostUpdate};
591
/// #
592
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
593
/// let (camera, camera_transform) = *camera_query;
594
///
595
/// if let Some(cursor_position) = window.cursor_position()
596
/// // Calculate a ray pointing from the camera into the world based on the cursor's position.
597
/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)
598
/// {
599
/// println!("{ray:?}");
600
/// }
601
/// }
602
///
603
/// # let mut app = App::new();
604
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
605
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
606
/// ```
607
///
608
/// # Panics
609
///
610
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
611
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
612
pub fn viewport_to_world(
613
&self,
614
camera_transform: &GlobalTransform,
615
viewport_position: Vec2,
616
) -> Result<Ray3d, ViewportConversionError> {
617
let ndc_xy = self.viewport_to_ndc(viewport_position)?;
618
619
let ndc_point_near = ndc_xy.extend(1.0).into();
620
// Using EPSILON because an ndc with Z = 0 returns NaNs.
621
let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();
622
let view_from_clip = self.computed.clip_from_view.inverse();
623
let world_from_view = camera_transform.affine();
624
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
625
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
626
// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.
627
let view_point_near = view_from_clip.project_point3a(ndc_point_near);
628
let view_point_far = view_from_clip.project_point3a(ndc_point_far);
629
let view_dir = view_point_far - view_point_near;
630
let origin = world_from_view.transform_point3a(view_point_near).into();
631
let direction = world_from_view.transform_vector3a(view_dir).into();
632
633
// The fallible direction constructor ensures that direction isn't NaN.
634
Dir3::new(direction)
635
.map_err(|_| ViewportConversionError::InvalidData)
636
.map(|direction| Ray3d { origin, direction })
637
}
638
639
/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
640
///
641
/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
642
///
643
/// To get the world space coordinates with Normalized Device Coordinates, you should use
644
/// [`ndc_to_world`](Self::ndc_to_world).
645
///
646
/// # Example
647
/// ```no_run
648
/// # use bevy_window::Window;
649
/// # use bevy_ecs::prelude::*;
650
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
651
/// # use bevy_camera::Camera;
652
/// # use bevy_app::{App, PostUpdate};
653
/// #
654
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
655
/// let (camera, camera_transform) = *camera_query;
656
///
657
/// if let Some(cursor_position) = window.cursor_position()
658
/// // Calculate a world position based on the cursor's position.
659
/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)
660
/// {
661
/// println!("World position: {world_pos:.2}");
662
/// }
663
/// }
664
///
665
/// # let mut app = App::new();
666
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
667
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
668
/// ```
669
///
670
/// # Panics
671
///
672
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
673
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
674
pub fn viewport_to_world_2d(
675
&self,
676
camera_transform: &GlobalTransform,
677
viewport_position: Vec2,
678
) -> Result<Vec2, ViewportConversionError> {
679
let ndc = self.viewport_to_ndc(viewport_position)?;
680
681
let world_near_plane = self
682
.ndc_to_world(camera_transform, ndc.extend(1.))
683
.ok_or(ViewportConversionError::InvalidData)?;
684
685
Ok(world_near_plane.truncate())
686
}
687
688
/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.
689
///
690
/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)
691
/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.
692
/// To get the coordinates in the render target's viewport dimensions, you should use
693
/// [`world_to_viewport`](Self::world_to_viewport).
694
///
695
/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
696
/// [`Projection`](super::projection::Projection) contain `NAN`.
697
///
698
/// # Panics
699
///
700
/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
701
pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(
702
&self,
703
camera_transform: &GlobalTransform,
704
world_point: V,
705
) -> Option<V> {
706
let view_from_world = camera_transform.affine().inverse();
707
let view_point = view_from_world.transform_point3a(world_point.into());
708
let ndc_point = self.computed.clip_from_view.project_point3a(view_point);
709
710
(!ndc_point.is_nan()).then_some(ndc_point.into())
711
}
712
713
/// Given a position in Normalized Device Coordinates,
714
/// use the camera's viewport to compute the world space position.
715
///
716
/// The input is expected to be in NDC: `x` and `y` in the range `[-1.0, 1.0]`, and `z` in `[0.0, 1.0]`
717
/// (with `z = 0.0` at the far plane and `z = 1.0` at the near plane).
718
/// The returned value is a position in world space (your game's world units) and is not limited to `[-1.0, 1.0]`.
719
/// To convert from a viewport position to world space, you should use
720
/// [`viewport_to_world`](Self::viewport_to_world).
721
///
722
/// Returns `None` if the `camera_transform`, the `ndc_point`, or the projection matrix defined by
723
/// [`Projection`](super::projection::Projection) contain `NAN`.
724
///
725
/// # Panics
726
///
727
/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
728
pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(
729
&self,
730
camera_transform: &GlobalTransform,
731
ndc_point: V,
732
) -> Option<V> {
733
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
734
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
735
let view_point = self
736
.computed
737
.clip_from_view
738
.inverse()
739
.project_point3a(ndc_point.into());
740
let world_point = camera_transform.affine().transform_point3a(view_point);
741
742
(!world_point.is_nan()).then_some(world_point.into())
743
}
744
745
/// Converts the depth in Normalized Device Coordinates
746
/// to linear view z for perspective projections.
747
///
748
/// Note: Depth values in front of the camera will be negative as -z is forward
749
pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
750
let near = self.clip_from_view().w_axis.z; // [3][2]
751
-near / ndc_depth
752
}
753
754
/// Converts the depth in Normalized Device Coordinates
755
/// to linear view z for orthographic projections.
756
///
757
/// Note: Depth values in front of the camera will be negative as -z is forward
758
pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
759
-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
760
// [3][2] [2][2]
761
}
762
763
/// Converts a position in viewport coordinates to NDC.
764
pub fn viewport_to_ndc(
765
&self,
766
viewport_position: Vec2,
767
) -> Result<Vec2, ViewportConversionError> {
768
let target_rect = self
769
.logical_viewport_rect()
770
.ok_or(ViewportConversionError::NoViewportSize)?;
771
let rect_relative = (viewport_position - target_rect.min) / target_rect.size();
772
let mut ndc = rect_relative * 2. - Vec2::ONE;
773
// Flip the Y co-ordinate from the top to the bottom to enter NDC.
774
ndc.y = -ndc.y;
775
Ok(ndc)
776
}
777
}
778
779
/// Control how this [`Camera`] outputs once rendering is completed.
780
#[derive(Debug, Clone, Copy, Reflect)]
781
pub enum CameraOutputMode {
782
/// Writes the camera output to configured render target.
783
Write {
784
/// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
785
/// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.
786
blend_state: Option<BlendState>,
787
/// The clear color operation to perform on the final render target texture.
788
clear_color: ClearColorConfig,
789
},
790
/// Skips writing the camera output to the configured render target. The output will remain in the
791
/// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
792
/// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
793
/// them to be lost. Only use this if you know what you are doing!
794
/// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
795
/// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
796
Skip,
797
}
798
799
impl Default for CameraOutputMode {
800
fn default() -> Self {
801
CameraOutputMode::Write {
802
blend_state: None,
803
clear_color: ClearColorConfig::Default,
804
}
805
}
806
}
807
808
/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`
809
/// swapchain or an [`Image`].
810
#[derive(Component, Debug, Clone, Reflect, From)]
811
#[reflect(Clone, Component)]
812
pub enum RenderTarget {
813
/// Window to which the camera's view is rendered.
814
Window(WindowRef),
815
/// Image to which the camera's view is rendered.
816
Image(ImageRenderTarget),
817
/// Texture View to which the camera's view is rendered.
818
/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
819
TextureView(ManualTextureViewHandle),
820
/// The camera won't render to any color target.
821
///
822
/// This is useful when you want a camera that *only* renders prepasses, for
823
/// example a depth prepass. See the `render_depth_to_texture` example.
824
None {
825
/// The physical size of the viewport.
826
size: UVec2,
827
},
828
}
829
830
impl RenderTarget {
831
/// Get a handle to the render target's image,
832
/// or `None` if the render target is another variant.
833
pub fn as_image(&self) -> Option<&Handle<Image>> {
834
if let Self::Image(image_target) = self {
835
Some(&image_target.handle)
836
} else {
837
None
838
}
839
}
840
}
841
842
impl RenderTarget {
843
/// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
844
pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
845
match self {
846
RenderTarget::Window(window_ref) => window_ref
847
.normalize(primary_window)
848
.map(NormalizedRenderTarget::Window),
849
RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
850
RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
851
RenderTarget::None { size } => Some(NormalizedRenderTarget::None {
852
width: size.x,
853
height: size.y,
854
}),
855
}
856
}
857
}
858
859
/// Normalized version of the render target.
860
///
861
/// Once we have this we shouldn't need to resolve it down anymore.
862
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
863
#[reflect(Clone, PartialEq, Hash)]
864
pub enum NormalizedRenderTarget {
865
/// Window to which the camera's view is rendered.
866
Window(NormalizedWindowRef),
867
/// Image to which the camera's view is rendered.
868
Image(ImageRenderTarget),
869
/// Texture View to which the camera's view is rendered.
870
/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
871
TextureView(ManualTextureViewHandle),
872
/// The camera won't render to any color target.
873
///
874
/// This is useful when you want a camera that *only* renders prepasses, for
875
/// example a depth prepass. See the `render_depth_to_texture` example.
876
None {
877
/// The physical width of the viewport.
878
width: u32,
879
/// The physical height of the viewport.
880
height: u32,
881
},
882
}
883
884
/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.
885
///
886
/// See `ManualTextureViews` in `bevy_camera` for more details.
887
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]
888
#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]
889
pub struct ManualTextureViewHandle(pub u32);
890
891
/// A render target that renders to an [`Image`].
892
#[derive(Debug, Clone, Reflect)]
893
#[reflect(Clone, PartialEq, Hash)]
894
pub struct ImageRenderTarget {
895
/// The image to render to.
896
pub handle: Handle<Image>,
897
/// The scale factor of the render target image, corresponding to the scale
898
/// factor for a window target. This should almost always be 1.0.
899
pub scale_factor: f32,
900
}
901
902
impl Eq for ImageRenderTarget {}
903
904
impl PartialEq for ImageRenderTarget {
905
fn eq(&self, other: &Self) -> bool {
906
self.handle == other.handle && FloatOrd(self.scale_factor) == FloatOrd(other.scale_factor)
907
}
908
}
909
910
impl core::hash::Hash for ImageRenderTarget {
911
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
912
self.handle.hash(state);
913
FloatOrd(self.scale_factor).hash(state);
914
}
915
}
916
917
impl PartialOrd for ImageRenderTarget {
918
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
919
Some(self.cmp(other))
920
}
921
}
922
923
impl Ord for ImageRenderTarget {
924
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
925
self.handle
926
.cmp(&other.handle)
927
.then_with(|| FloatOrd(self.scale_factor).cmp(&FloatOrd(other.scale_factor)))
928
}
929
}
930
931
impl From<Handle<Image>> for RenderTarget {
932
fn from(handle: Handle<Image>) -> Self {
933
Self::Image(handle.into())
934
}
935
}
936
937
impl From<Handle<Image>> for ImageRenderTarget {
938
fn from(handle: Handle<Image>) -> Self {
939
Self {
940
handle,
941
scale_factor: 1.0,
942
}
943
}
944
}
945
946
impl Default for RenderTarget {
947
fn default() -> Self {
948
Self::Window(Default::default())
949
}
950
}
951
952
/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
953
#[derive(Component, Clone, Copy, Reflect)]
954
#[reflect(opaque)]
955
#[reflect(Component, Default, Clone)]
956
pub struct CameraMainTextureUsages(pub TextureUsages);
957
958
impl Default for CameraMainTextureUsages {
959
fn default() -> Self {
960
Self(
961
TextureUsages::RENDER_ATTACHMENT
962
| TextureUsages::TEXTURE_BINDING
963
| TextureUsages::COPY_SRC,
964
)
965
}
966
}
967
968
impl CameraMainTextureUsages {
969
pub fn with(mut self, usages: TextureUsages) -> Self {
970
self.0 |= usages;
971
self
972
}
973
}
974
975
#[cfg(test)]
976
mod test {
977
use bevy_math::{Vec2, Vec3};
978
use bevy_transform::components::GlobalTransform;
979
980
use crate::{
981
Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,
982
Viewport,
983
};
984
985
fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {
986
let viewport = Viewport {
987
physical_size: physical_size.as_uvec2(),
988
..Default::default()
989
};
990
let mut camera = Camera {
991
viewport: Some(viewport.clone()),
992
..Default::default()
993
};
994
camera.computed.target_info = Some(RenderTargetInfo {
995
physical_size: viewport.physical_size,
996
scale_factor: 1.0,
997
});
998
projection.update(
999
viewport.physical_size.x as f32,
1000
viewport.physical_size.y as f32,
1001
);
1002
camera.computed.clip_from_view = projection.get_clip_from_view();
1003
camera
1004
}
1005
1006
#[test]
1007
fn viewport_to_world_orthographic_3d_returns_forward() {
1008
let transform = GlobalTransform::default();
1009
let size = Vec2::new(1600.0, 900.0);
1010
let camera = make_camera(
1011
Projection::Orthographic(OrthographicProjection::default_3d()),
1012
size,
1013
);
1014
let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1015
assert_eq!(ray.direction, transform.forward());
1016
assert!(ray
1017
.origin
1018
.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));
1019
let ray = camera.viewport_to_world(&transform, size).unwrap();
1020
assert_eq!(ray.direction, transform.forward());
1021
assert!(ray
1022
.origin
1023
.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));
1024
}
1025
1026
#[test]
1027
fn viewport_to_world_orthographic_2d_returns_forward() {
1028
let transform = GlobalTransform::default();
1029
let size = Vec2::new(1600.0, 900.0);
1030
let camera = make_camera(
1031
Projection::Orthographic(OrthographicProjection::default_2d()),
1032
size,
1033
);
1034
let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1035
assert_eq!(ray.direction, transform.forward());
1036
assert!(ray
1037
.origin
1038
.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));
1039
let ray = camera.viewport_to_world(&transform, size).unwrap();
1040
assert_eq!(ray.direction, transform.forward());
1041
assert!(ray
1042
.origin
1043
.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));
1044
}
1045
1046
#[test]
1047
fn viewport_to_world_perspective_center_returns_forward() {
1048
let transform = GlobalTransform::default();
1049
let size = Vec2::new(1600.0, 900.0);
1050
let camera = make_camera(
1051
Projection::Perspective(PerspectiveProjection::default()),
1052
size,
1053
);
1054
let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();
1055
assert_eq!(ray.direction, transform.forward());
1056
assert_eq!(ray.origin, transform.forward() * 0.1);
1057
}
1058
}
1059
1060