Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bevyengine
GitHub Repository: bevyengine/bevy
Path: blob/main/crates/bevy_camera/src/camera.rs
6598 views
1
use crate::primitives::Frustum;
2
3
use super::{
4
visibility::{Visibility, VisibleEntities},
5
ClearColorConfig,
6
};
7
use bevy_asset::Handle;
8
use bevy_derive::Deref;
9
use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};
10
use bevy_image::Image;
11
use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};
12
use bevy_reflect::prelude::*;
13
use bevy_transform::components::{GlobalTransform, Transform};
14
use bevy_window::{NormalizedWindowRef, WindowRef};
15
use core::ops::Range;
16
use derive_more::derive::From;
17
use thiserror::Error;
18
use wgpu_types::{BlendState, TextureUsages};
19
20
/// Render viewport configuration for the [`Camera`] component.
21
///
22
/// The viewport defines the area on the render target to which the camera renders its image.
23
/// You can overlay multiple cameras in a single window using viewports to create effects like
24
/// split screen, minimaps, and character viewers.
25
#[derive(Reflect, Debug, Clone)]
26
#[reflect(Default, Clone)]
27
pub struct Viewport {
28
/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
29
/// (0,0) corresponds to the top-left corner
30
pub physical_position: UVec2,
31
/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
32
/// The origin of the rectangle is in the top-left corner.
33
pub physical_size: UVec2,
34
/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
35
pub depth: Range<f32>,
36
}
37
38
impl Default for Viewport {
39
fn default() -> Self {
40
Self {
41
physical_position: Default::default(),
42
physical_size: UVec2::new(1, 1),
43
depth: 0.0..1.0,
44
}
45
}
46
}
47
48
impl Viewport {
49
/// Cut the viewport rectangle so that it lies inside a rectangle of the
50
/// given size.
51
///
52
/// If either of the viewport's position coordinates lies outside the given
53
/// dimensions, it will be moved just inside first. If either of the given
54
/// dimensions is zero, the position and size of the viewport rectangle will
55
/// both be set to zero in that dimension.
56
pub fn clamp_to_size(&mut self, size: UVec2) {
57
// If the origin of the viewport rect is outside, then adjust so that
58
// it's just barely inside. Then, cut off the part that is outside.
59
if self.physical_size.x + self.physical_position.x > size.x {
60
if self.physical_position.x < size.x {
61
self.physical_size.x = size.x - self.physical_position.x;
62
} else if size.x > 0 {
63
self.physical_position.x = size.x - 1;
64
self.physical_size.x = 1;
65
} else {
66
self.physical_position.x = 0;
67
self.physical_size.x = 0;
68
}
69
}
70
if self.physical_size.y + self.physical_position.y > size.y {
71
if self.physical_position.y < size.y {
72
self.physical_size.y = size.y - self.physical_position.y;
73
} else if size.y > 0 {
74
self.physical_position.y = size.y - 1;
75
self.physical_size.y = 1;
76
} else {
77
self.physical_position.y = 0;
78
self.physical_size.y = 0;
79
}
80
}
81
}
82
83
pub fn from_viewport_and_override(
84
viewport: Option<&Self>,
85
main_pass_resolution_override: Option<&MainPassResolutionOverride>,
86
) -> Option<Self> {
87
let mut viewport = viewport.cloned();
88
89
if let Some(override_size) = main_pass_resolution_override {
90
if viewport.is_none() {
91
viewport = Some(Viewport::default());
92
}
93
94
viewport.as_mut().unwrap().physical_size = **override_size;
95
}
96
97
viewport
98
}
99
}
100
101
/// Override the resolution a 3d camera's main pass is rendered at.
102
///
103
/// Does not affect post processing.
104
///
105
/// ## Usage
106
///
107
/// * Insert this component on a 3d camera entity in the render world.
108
/// * The resolution override must be smaller than the camera's viewport size.
109
/// * The resolution override is specified in physical pixels.
110
/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.
111
#[derive(Component, Reflect, Deref, Debug)]
112
#[reflect(Component)]
113
pub struct MainPassResolutionOverride(pub UVec2);
114
115
/// Settings to define a camera sub view.
116
///
117
/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
118
/// image defined by `size` and `offset` (relative to the `full_size` of the
119
/// whole image) is projected to the cameras viewport.
120
///
121
/// Take the example of the following multi-monitor setup:
122
/// ```css
123
/// ┌───┬───┐
124
/// │ A │ B │
125
/// ├───┼───┤
126
/// │ C │ D │
127
/// └───┴───┘
128
/// ```
129
/// If each monitor is 1920x1080, the whole image will have a resolution of
130
/// 3840x2160. For each monitor we can use a single camera with a viewport of
131
/// the same size as the monitor it corresponds to. To ensure that the image is
132
/// cohesive, we can use a different sub view on each camera:
133
/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
134
/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
135
/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
136
/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
137
/// 1920,1080
138
///
139
/// However since only the ratio between the values is important, they could all
140
/// be divided by 120 and still produce the same image. Camera D would for
141
/// example have the following values:
142
/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
143
#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
144
#[reflect(Clone, PartialEq, Default)]
145
pub struct SubCameraView {
146
/// Size of the entire camera view
147
pub full_size: UVec2,
148
/// Offset of the sub camera
149
pub offset: Vec2,
150
/// Size of the sub camera
151
pub size: UVec2,
152
}
153
154
impl Default for SubCameraView {
155
fn default() -> Self {
156
Self {
157
full_size: UVec2::new(1, 1),
158
offset: Vec2::new(0., 0.),
159
size: UVec2::new(1, 1),
160
}
161
}
162
}
163
164
/// Information about the current [`RenderTarget`].
165
#[derive(Default, Debug, Clone)]
166
pub struct RenderTargetInfo {
167
/// The physical size of this render target (in physical pixels, ignoring scale factor).
168
pub physical_size: UVec2,
169
/// The scale factor of this render target.
170
///
171
/// When rendering to a window, typically it is a value greater or equal than 1.0,
172
/// representing the ratio between the size of the window in physical pixels and the logical size of the window.
173
pub scale_factor: f32,
174
}
175
176
/// Holds internally computed [`Camera`] values.
177
#[derive(Default, Debug, Clone)]
178
pub struct ComputedCameraValues {
179
pub clip_from_view: Mat4,
180
pub target_info: Option<RenderTargetInfo>,
181
// size of the `Viewport`
182
pub old_viewport_size: Option<UVec2>,
183
pub old_sub_camera_view: Option<SubCameraView>,
184
}
185
186
/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.
187
///
188
/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
189
#[derive(Component, Clone, Copy, Reflect)]
190
#[reflect(opaque)]
191
#[reflect(Component, Default, Clone)]
192
pub struct Exposure {
193
/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
194
pub ev100: f32,
195
}
196
197
impl Exposure {
198
pub const SUNLIGHT: Self = Self {
199
ev100: Self::EV100_SUNLIGHT,
200
};
201
pub const OVERCAST: Self = Self {
202
ev100: Self::EV100_OVERCAST,
203
};
204
pub const INDOOR: Self = Self {
205
ev100: Self::EV100_INDOOR,
206
};
207
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
208
/// It also happens to be a reasonable default.
209
///
210
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
211
pub const BLENDER: Self = Self {
212
ev100: Self::EV100_BLENDER,
213
};
214
215
pub const EV100_SUNLIGHT: f32 = 15.0;
216
pub const EV100_OVERCAST: f32 = 12.0;
217
pub const EV100_INDOOR: f32 = 7.0;
218
219
/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
220
/// It also happens to be a reasonable default.
221
///
222
/// See <https://github.com/bevyengine/bevy/issues/11577> for details.
223
pub const EV100_BLENDER: f32 = 9.7;
224
225
pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
226
Self {
227
ev100: physical_camera_parameters.ev100(),
228
}
229
}
230
231
/// Converts EV100 values to exposure values.
232
/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
233
#[inline]
234
pub fn exposure(&self) -> f32 {
235
ops::exp2(-self.ev100) / 1.2
236
}
237
}
238
239
impl Default for Exposure {
240
fn default() -> Self {
241
Self::BLENDER
242
}
243
}
244
245
/// Parameters based on physical camera characteristics for calculating EV100
246
/// values for use with [`Exposure`]. This is also used for depth of field.
247
#[derive(Clone, Copy)]
248
pub struct PhysicalCameraParameters {
249
/// <https://en.wikipedia.org/wiki/F-number>
250
pub aperture_f_stops: f32,
251
/// <https://en.wikipedia.org/wiki/Shutter_speed>
252
pub shutter_speed_s: f32,
253
/// <https://en.wikipedia.org/wiki/Film_speed>
254
pub sensitivity_iso: f32,
255
/// The height of the [image sensor format] in meters.
256
///
257
/// Focal length is derived from the FOV and this value. The default is
258
/// 18.66mm, matching the [Super 35] format, which is popular in cinema.
259
///
260
/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
261
///
262
/// [Super 35]: https://en.wikipedia.org/wiki/Super_35
263
pub sensor_height: f32,
264
}
265
266
impl PhysicalCameraParameters {
267
/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
268
pub fn ev100(&self) -> f32 {
269
ops::log2(
270
self.aperture_f_stops * self.aperture_f_stops * 100.0
271
/ (self.shutter_speed_s * self.sensitivity_iso),
272
)
273
}
274
}
275
276
impl Default for PhysicalCameraParameters {
277
fn default() -> Self {
278
Self {
279
aperture_f_stops: 1.0,
280
shutter_speed_s: 1.0 / 125.0,
281
sensitivity_iso: 100.0,
282
sensor_height: 0.01866,
283
}
284
}
285
}
286
287
/// Error returned when a conversion between world-space and viewport-space coordinates fails.
288
///
289
/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
290
#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
291
pub enum ViewportConversionError {
292
/// The pre-computed size of the viewport was not available.
293
///
294
/// This may be because the `Camera` was just created and `camera_system` has not been executed
295
/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
296
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
297
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
298
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
299
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
300
#[error("pre-computed size of viewport not available")]
301
NoViewportSize,
302
/// The computed coordinate was beyond the `Camera`'s near plane.
303
///
304
/// Only applicable when converting from world-space to viewport-space.
305
#[error("computed coordinate beyond `Camera`'s near plane")]
306
PastNearPlane,
307
/// The computed coordinate was beyond the `Camera`'s far plane.
308
///
309
/// Only applicable when converting from world-space to viewport-space.
310
#[error("computed coordinate beyond `Camera`'s far plane")]
311
PastFarPlane,
312
/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
313
/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)
314
/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
315
#[error("found NaN while computing NDC")]
316
InvalidData,
317
}
318
319
/// The defining [`Component`] for camera entities,
320
/// storing information about how and what to render through this camera.
321
///
322
/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
323
/// which rendering occurs. It defines the position of the view to render, the projection method
324
/// to transform the 3D objects into a 2D image, as well as the render target into which that image
325
/// is produced.
326
///
327
/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.
328
/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
329
/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
330
/// graph will emit an error at runtime.
331
///
332
/// [`Camera2d`]: crate::Camera2d
333
/// [`Camera3d`]: crate::Camera3d
334
#[derive(Component, Debug, Reflect, Clone)]
335
#[reflect(Component, Default, Debug, Clone)]
336
#[require(
337
Frustum,
338
CameraMainTextureUsages,
339
VisibleEntities,
340
Transform,
341
Visibility
342
)]
343
pub struct Camera {
344
/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
345
pub viewport: Option<Viewport>,
346
/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
347
pub order: isize,
348
/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
349
/// camera will not be rendered.
350
pub is_active: bool,
351
/// Computed values for this camera, such as the projection matrix and the render target size.
352
#[reflect(ignore, clone)]
353
pub computed: ComputedCameraValues,
354
/// The "target" that this camera will render to.
355
pub target: RenderTarget,
356
// todo: reflect this when #6042 lands
357
/// The [`CameraOutputMode`] for this camera.
358
#[reflect(ignore, clone)]
359
pub output_mode: CameraOutputMode,
360
/// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's
361
/// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to
362
/// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure
363
/// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
364
pub msaa_writeback: bool,
365
/// The clear color operation to perform on the render target.
366
pub clear_color: ClearColorConfig,
367
/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
368
pub sub_camera_view: Option<SubCameraView>,
369
}
370
371
impl Default for Camera {
372
fn default() -> Self {
373
Self {
374
is_active: true,
375
order: 0,
376
viewport: None,
377
computed: Default::default(),
378
target: Default::default(),
379
output_mode: Default::default(),
380
msaa_writeback: true,
381
clear_color: Default::default(),
382
sub_camera_view: None,
383
}
384
}
385
}
386
387
impl Camera {
388
/// Converts a physical size in this `Camera` to a logical size.
389
#[inline]
390
pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
391
let scale = self.computed.target_info.as_ref()?.scale_factor;
392
Some(physical_size.as_vec2() / scale)
393
}
394
395
/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
396
/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
397
/// the full physical rect of the current [`RenderTarget`].
398
#[inline]
399
pub fn physical_viewport_rect(&self) -> Option<URect> {
400
let min = self
401
.viewport
402
.as_ref()
403
.map(|v| v.physical_position)
404
.unwrap_or(UVec2::ZERO);
405
let max = min + self.physical_viewport_size()?;
406
Some(URect { min, max })
407
}
408
409
/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
410
/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
411
/// full logical rect of the current [`RenderTarget`].
412
#[inline]
413
pub fn logical_viewport_rect(&self) -> Option<Rect> {
414
let URect { min, max } = self.physical_viewport_rect()?;
415
Some(Rect {
416
min: self.to_logical(min)?,
417
max: self.to_logical(max)?,
418
})
419
}
420
421
/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
422
/// will be the size of that custom viewport. Otherwise it will default to the full logical size
423
/// of the current [`RenderTarget`].
424
/// For logic that requires the full logical size of the
425
/// [`RenderTarget`], prefer [`Camera::logical_target_size`].
426
///
427
/// Returns `None` if either:
428
/// - the function is called just after the `Camera` is created, before `camera_system` is executed,
429
/// - the [`RenderTarget`] isn't correctly set:
430
/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
431
/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
432
/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
433
/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
434
#[inline]
435
pub fn logical_viewport_size(&self) -> Option<Vec2> {
436
self.viewport
437
.as_ref()
438
.and_then(|v| self.to_logical(v.physical_size))
439
.or_else(|| self.logical_target_size())
440
}
441
442
/// The physical size of this camera's viewport (in physical pixels).
443
/// If the `viewport` field is set to [`Some`], this
444
/// will be the size of that custom viewport. Otherwise it will default to the full physical size of
445
/// the current [`RenderTarget`].
446
/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
447
#[inline]
448
pub fn physical_viewport_size(&self) -> Option<UVec2> {
449
self.viewport
450
.as_ref()
451
.map(|v| v.physical_size)
452
.or_else(|| self.physical_target_size())
453
}
454
455
/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
456
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
457
/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
458
#[inline]
459
pub fn logical_target_size(&self) -> Option<Vec2> {
460
self.computed
461
.target_info
462
.as_ref()
463
.and_then(|t| self.to_logical(t.physical_size))
464
}
465
466
/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
467
/// ignoring custom `viewport` configuration.
468
/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
469
/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
470
#[inline]
471
pub fn physical_target_size(&self) -> Option<UVec2> {
472
self.computed.target_info.as_ref().map(|t| t.physical_size)
473
}
474
475
#[inline]
476
pub fn target_scaling_factor(&self) -> Option<f32> {
477
self.computed
478
.target_info
479
.as_ref()
480
.map(|t: &RenderTargetInfo| t.scale_factor)
481
}
482
483
/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).
484
#[inline]
485
pub fn clip_from_view(&self) -> Mat4 {
486
self.computed.clip_from_view
487
}
488
489
/// Given a position in world space, use the camera to compute the viewport-space coordinates.
490
///
491
/// To get the coordinates in Normalized Device Coordinates, you should use
492
/// [`world_to_ndc`](Self::world_to_ndc).
493
///
494
/// # Panics
495
///
496
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
497
/// (see [`world_to_ndc`][Self::world_to_ndc]).
498
#[doc(alias = "world_to_screen")]
499
pub fn world_to_viewport(
500
&self,
501
camera_transform: &GlobalTransform,
502
world_position: Vec3,
503
) -> Result<Vec2, ViewportConversionError> {
504
let target_rect = self
505
.logical_viewport_rect()
506
.ok_or(ViewportConversionError::NoViewportSize)?;
507
let mut ndc_space_coords = self
508
.world_to_ndc(camera_transform, world_position)
509
.ok_or(ViewportConversionError::InvalidData)?;
510
// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
511
if ndc_space_coords.z < 0.0 {
512
return Err(ViewportConversionError::PastFarPlane);
513
}
514
if ndc_space_coords.z > 1.0 {
515
return Err(ViewportConversionError::PastNearPlane);
516
}
517
518
// Flip the Y co-ordinate origin from the bottom to the top.
519
ndc_space_coords.y = -ndc_space_coords.y;
520
521
// Once in NDC space, we can discard the z element and map x/y to the viewport rect
522
let viewport_position =
523
(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
524
Ok(viewport_position)
525
}
526
527
/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
528
///
529
/// To get the coordinates in Normalized Device Coordinates, you should use
530
/// [`world_to_ndc`](Self::world_to_ndc).
531
///
532
/// # Panics
533
///
534
/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
535
/// (see [`world_to_ndc`][Self::world_to_ndc]).
536
#[doc(alias = "world_to_screen_with_depth")]
537
pub fn world_to_viewport_with_depth(
538
&self,
539
camera_transform: &GlobalTransform,
540
world_position: Vec3,
541
) -> Result<Vec3, ViewportConversionError> {
542
let target_rect = self
543
.logical_viewport_rect()
544
.ok_or(ViewportConversionError::NoViewportSize)?;
545
let mut ndc_space_coords = self
546
.world_to_ndc(camera_transform, world_position)
547
.ok_or(ViewportConversionError::InvalidData)?;
548
// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
549
if ndc_space_coords.z < 0.0 {
550
return Err(ViewportConversionError::PastFarPlane);
551
}
552
if ndc_space_coords.z > 1.0 {
553
return Err(ViewportConversionError::PastNearPlane);
554
}
555
556
// Stretching ndc depth to value via near plane and negating result to be in positive room again.
557
let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
558
559
// Flip the Y co-ordinate origin from the bottom to the top.
560
ndc_space_coords.y = -ndc_space_coords.y;
561
562
// Once in NDC space, we can discard the z element and map x/y to the viewport rect
563
let viewport_position =
564
(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
565
Ok(viewport_position.extend(depth))
566
}
567
568
/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
569
///
570
/// The resulting ray starts on the near plane of the camera.
571
///
572
/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
573
///
574
/// To get the world space coordinates with Normalized Device Coordinates, you should use
575
/// [`ndc_to_world`](Self::ndc_to_world).
576
///
577
/// # Example
578
/// ```no_run
579
/// # use bevy_window::Window;
580
/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};
581
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
582
/// # use bevy_camera::Camera;
583
/// # use bevy_app::{App, PostUpdate};
584
/// #
585
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
586
/// let (camera, camera_transform) = *camera_query;
587
///
588
/// if let Some(cursor_position) = window.cursor_position()
589
/// // Calculate a ray pointing from the camera into the world based on the cursor's position.
590
/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)
591
/// {
592
/// println!("{ray:?}");
593
/// }
594
/// }
595
///
596
/// # let mut app = App::new();
597
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
598
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
599
/// ```
600
///
601
/// # Panics
602
///
603
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
604
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
605
pub fn viewport_to_world(
606
&self,
607
camera_transform: &GlobalTransform,
608
viewport_position: Vec2,
609
) -> Result<Ray3d, ViewportConversionError> {
610
let target_rect = self
611
.logical_viewport_rect()
612
.ok_or(ViewportConversionError::NoViewportSize)?;
613
let rect_relative = (viewport_position - target_rect.min) / target_rect.size();
614
let mut ndc_xy = rect_relative * 2. - Vec2::ONE;
615
// Flip the Y co-ordinate from the top to the bottom to enter NDC.
616
ndc_xy.y = -ndc_xy.y;
617
618
let ndc_point_near = ndc_xy.extend(1.0).into();
619
// Using EPSILON because an ndc with Z = 0 returns NaNs.
620
let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();
621
let view_from_clip = self.computed.clip_from_view.inverse();
622
let world_from_view = camera_transform.affine();
623
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
624
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
625
// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.
626
let view_point_near = view_from_clip.project_point3a(ndc_point_near);
627
let view_point_far = view_from_clip.project_point3a(ndc_point_far);
628
let view_dir = view_point_far - view_point_near;
629
let origin = world_from_view.transform_point3a(view_point_near).into();
630
let direction = world_from_view.transform_vector3a(view_dir).into();
631
632
// The fallible direction constructor ensures that direction isn't NaN.
633
Dir3::new(direction)
634
.map_err(|_| ViewportConversionError::InvalidData)
635
.map(|direction| Ray3d { origin, direction })
636
}
637
638
/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
639
///
640
/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
641
///
642
/// To get the world space coordinates with Normalized Device Coordinates, you should use
643
/// [`ndc_to_world`](Self::ndc_to_world).
644
///
645
/// # Example
646
/// ```no_run
647
/// # use bevy_window::Window;
648
/// # use bevy_ecs::prelude::*;
649
/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
650
/// # use bevy_camera::Camera;
651
/// # use bevy_app::{App, PostUpdate};
652
/// #
653
/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
654
/// let (camera, camera_transform) = *camera_query;
655
///
656
/// if let Some(cursor_position) = window.cursor_position()
657
/// // Calculate a world position based on the cursor's position.
658
/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)
659
/// {
660
/// println!("World position: {world_pos:.2}");
661
/// }
662
/// }
663
///
664
/// # let mut app = App::new();
665
/// // Run the system after transform propagation so the camera's global transform is up-to-date.
666
/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
667
/// ```
668
///
669
/// # Panics
670
///
671
/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
672
/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
673
pub fn viewport_to_world_2d(
674
&self,
675
camera_transform: &GlobalTransform,
676
viewport_position: Vec2,
677
) -> Result<Vec2, ViewportConversionError> {
678
let target_rect = self
679
.logical_viewport_rect()
680
.ok_or(ViewportConversionError::NoViewportSize)?;
681
let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
682
683
// Flip the Y co-ordinate origin from the top to the bottom.
684
rect_relative.y = 1.0 - rect_relative.y;
685
686
let ndc = rect_relative * 2. - Vec2::ONE;
687
688
let world_near_plane = self
689
.ndc_to_world(camera_transform, ndc.extend(1.))
690
.ok_or(ViewportConversionError::InvalidData)?;
691
692
Ok(world_near_plane.truncate())
693
}
694
695
/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.
696
///
697
/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)
698
/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.
699
/// To get the coordinates in the render target's viewport dimensions, you should use
700
/// [`world_to_viewport`](Self::world_to_viewport).
701
///
702
/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
703
/// [`Projection`](super::projection::Projection) contain `NAN`.
704
///
705
/// # Panics
706
///
707
/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
708
pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(
709
&self,
710
camera_transform: &GlobalTransform,
711
world_point: V,
712
) -> Option<V> {
713
let view_from_world = camera_transform.affine().inverse();
714
let view_point = view_from_world.transform_point3a(world_point.into());
715
let ndc_point = self.computed.clip_from_view.project_point3a(view_point);
716
717
(!ndc_point.is_nan()).then_some(ndc_point.into())
718
}
719
720
/// Given a position in Normalized Device Coordinates,
721
/// use the camera's viewport to compute the world space position.
722
///
723
/// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
724
/// and between 0.0 and 1.0 on the Z axis.
725
/// To get the world space coordinates with the viewport position, you should use
726
/// [`world_to_viewport`](Self::world_to_viewport).
727
///
728
/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
729
/// [`Projection`](super::projection::Projection) contain `NAN`.
730
///
731
/// # Panics
732
///
733
/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
734
pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(
735
&self,
736
camera_transform: &GlobalTransform,
737
ndc_point: V,
738
) -> Option<V> {
739
// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
740
// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
741
let view_point = self
742
.computed
743
.clip_from_view
744
.inverse()
745
.project_point3a(ndc_point.into());
746
let world_point = camera_transform.affine().transform_point3a(view_point);
747
748
(!world_point.is_nan()).then_some(world_point.into())
749
}
750
751
/// Converts the depth in Normalized Device Coordinates
752
/// to linear view z for perspective projections.
753
///
754
/// Note: Depth values in front of the camera will be negative as -z is forward
755
pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
756
let near = self.clip_from_view().w_axis.z; // [3][2]
757
-near / ndc_depth
758
}
759
760
/// Converts the depth in Normalized Device Coordinates
761
/// to linear view z for orthographic projections.
762
///
763
/// Note: Depth values in front of the camera will be negative as -z is forward
764
pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
765
-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
766
// [3][2] [2][2]
767
}
768
}
769
770
/// Control how this [`Camera`] outputs once rendering is completed.
771
#[derive(Debug, Clone, Copy)]
772
pub enum CameraOutputMode {
773
/// Writes the camera output to configured render target.
774
Write {
775
/// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
776
/// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.
777
blend_state: Option<BlendState>,
778
/// The clear color operation to perform on the final render target texture.
779
clear_color: ClearColorConfig,
780
},
781
/// Skips writing the camera output to the configured render target. The output will remain in the
782
/// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
783
/// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
784
/// them to be lost. Only use this if you know what you are doing!
785
/// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
786
/// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
787
Skip,
788
}
789
790
impl Default for CameraOutputMode {
791
fn default() -> Self {
792
CameraOutputMode::Write {
793
blend_state: None,
794
clear_color: ClearColorConfig::Default,
795
}
796
}
797
}
798
799
/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`
800
/// swapchain or an [`Image`].
801
#[derive(Debug, Clone, Reflect, From)]
802
#[reflect(Clone)]
803
pub enum RenderTarget {
804
/// Window to which the camera's view is rendered.
805
Window(WindowRef),
806
/// Image to which the camera's view is rendered.
807
Image(ImageRenderTarget),
808
/// Texture View to which the camera's view is rendered.
809
/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
810
TextureView(ManualTextureViewHandle),
811
/// The camera won't render to any color target.
812
///
813
/// This is useful when you want a camera that *only* renders prepasses, for
814
/// example a depth prepass. See the `render_depth_to_texture` example.
815
None {
816
/// The physical size of the viewport.
817
size: UVec2,
818
},
819
}
820
821
impl RenderTarget {
822
/// Get a handle to the render target's image,
823
/// or `None` if the render target is another variant.
824
pub fn as_image(&self) -> Option<&Handle<Image>> {
825
if let Self::Image(image_target) = self {
826
Some(&image_target.handle)
827
} else {
828
None
829
}
830
}
831
}
832
833
impl RenderTarget {
834
/// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
835
pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
836
match self {
837
RenderTarget::Window(window_ref) => window_ref
838
.normalize(primary_window)
839
.map(NormalizedRenderTarget::Window),
840
RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
841
RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
842
RenderTarget::None { size } => Some(NormalizedRenderTarget::None {
843
width: size.x,
844
height: size.y,
845
}),
846
}
847
}
848
}
849
850
/// Normalized version of the render target.
851
///
852
/// Once we have this we shouldn't need to resolve it down anymore.
853
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
854
#[reflect(Clone, PartialEq, Hash)]
855
pub enum NormalizedRenderTarget {
856
/// Window to which the camera's view is rendered.
857
Window(NormalizedWindowRef),
858
/// Image to which the camera's view is rendered.
859
Image(ImageRenderTarget),
860
/// Texture View to which the camera's view is rendered.
861
/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
862
TextureView(ManualTextureViewHandle),
863
/// The camera won't render to any color target.
864
///
865
/// This is useful when you want a camera that *only* renders prepasses, for
866
/// example a depth prepass. See the `render_depth_to_texture` example.
867
None {
868
/// The physical width of the viewport.
869
width: u32,
870
/// The physical height of the viewport.
871
height: u32,
872
},
873
}
874
875
/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.
876
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]
877
#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]
878
pub struct ManualTextureViewHandle(pub u32);
879
880
/// A render target that renders to an [`Image`].
881
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)]
882
#[reflect(Clone, PartialEq, Hash)]
883
pub struct ImageRenderTarget {
884
/// The image to render to.
885
pub handle: Handle<Image>,
886
/// The scale factor of the render target image, corresponding to the scale
887
/// factor for a window target. This should almost always be 1.0.
888
pub scale_factor: FloatOrd,
889
}
890
891
impl From<Handle<Image>> for RenderTarget {
892
fn from(handle: Handle<Image>) -> Self {
893
Self::Image(handle.into())
894
}
895
}
896
897
impl From<Handle<Image>> for ImageRenderTarget {
898
fn from(handle: Handle<Image>) -> Self {
899
Self {
900
handle,
901
scale_factor: FloatOrd(1.0),
902
}
903
}
904
}
905
906
impl Default for RenderTarget {
907
fn default() -> Self {
908
Self::Window(Default::default())
909
}
910
}
911
912
/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
913
#[derive(Component, Clone, Copy, Reflect)]
914
#[reflect(opaque)]
915
#[reflect(Component, Default, Clone)]
916
pub struct CameraMainTextureUsages(pub TextureUsages);
917
918
impl Default for CameraMainTextureUsages {
919
fn default() -> Self {
920
Self(
921
TextureUsages::RENDER_ATTACHMENT
922
| TextureUsages::TEXTURE_BINDING
923
| TextureUsages::COPY_SRC,
924
)
925
}
926
}
927
928
impl CameraMainTextureUsages {
929
pub fn with(mut self, usages: TextureUsages) -> Self {
930
self.0 |= usages;
931
self
932
}
933
}
934
935
#[cfg(test)]
936
mod test {
937
use bevy_math::{Vec2, Vec3};
938
use bevy_transform::components::GlobalTransform;
939
940
use crate::{
941
Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,
942
Viewport,
943
};
944
945
fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {
946
let viewport = Viewport {
947
physical_size: physical_size.as_uvec2(),
948
..Default::default()
949
};
950
let mut camera = Camera {
951
viewport: Some(viewport.clone()),
952
..Default::default()
953
};
954
camera.computed.target_info = Some(RenderTargetInfo {
955
physical_size: viewport.physical_size,
956
scale_factor: 1.0,
957
});
958
projection.update(
959
viewport.physical_size.x as f32,
960
viewport.physical_size.y as f32,
961
);
962
camera.computed.clip_from_view = projection.get_clip_from_view();
963
camera
964
}
965
966
#[test]
967
fn viewport_to_world_orthographic_3d_returns_forward() {
968
let transform = GlobalTransform::default();
969
let size = Vec2::new(1600.0, 900.0);
970
let camera = make_camera(
971
Projection::Orthographic(OrthographicProjection::default_3d()),
972
size,
973
);
974
let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
975
assert_eq!(ray.direction, transform.forward());
976
assert!(ray
977
.origin
978
.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));
979
let ray = camera.viewport_to_world(&transform, size).unwrap();
980
assert_eq!(ray.direction, transform.forward());
981
assert!(ray
982
.origin
983
.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));
984
}
985
986
#[test]
987
fn viewport_to_world_orthographic_2d_returns_forward() {
988
let transform = GlobalTransform::default();
989
let size = Vec2::new(1600.0, 900.0);
990
let camera = make_camera(
991
Projection::Orthographic(OrthographicProjection::default_2d()),
992
size,
993
);
994
let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
995
assert_eq!(ray.direction, transform.forward());
996
assert!(ray
997
.origin
998
.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));
999
let ray = camera.viewport_to_world(&transform, size).unwrap();
1000
assert_eq!(ray.direction, transform.forward());
1001
assert!(ray
1002
.origin
1003
.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));
1004
}
1005
1006
#[test]
1007
fn viewport_to_world_perspective_center_returns_forward() {
1008
let transform = GlobalTransform::default();
1009
let size = Vec2::new(1600.0, 900.0);
1010
let camera = make_camera(
1011
Projection::Perspective(PerspectiveProjection::default()),
1012
size,
1013
);
1014
let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();
1015
assert_eq!(ray.direction, transform.forward());
1016
assert_eq!(ray.origin, transform.forward() * 0.1);
1017
}
1018
}
1019
1020