use crate::primitives::Frustum;12use super::{3visibility::{Visibility, VisibleEntities},4ClearColorConfig,5};6use bevy_asset::Handle;7use bevy_derive::Deref;8use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};9use bevy_image::Image;10use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};11use bevy_reflect::prelude::*;12use bevy_transform::components::{GlobalTransform, Transform};13use bevy_window::{NormalizedWindowRef, WindowRef};14use core::ops::Range;15use derive_more::derive::From;16use thiserror::Error;17use wgpu_types::{BlendState, TextureUsages};1819/// Render viewport configuration for the [`Camera`] component.20///21/// The viewport defines the area on the render target to which the camera renders its image.22/// You can overlay multiple cameras in a single window using viewports to create effects like23/// split screen, minimaps, and character viewers.24#[derive(Reflect, Debug, Clone)]25#[reflect(Default, Clone)]26pub struct Viewport {27/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].28/// (0,0) corresponds to the top-left corner29pub physical_position: UVec2,30/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].31/// The origin of the rectangle is in the top-left corner.32pub physical_size: UVec2,33/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).34pub depth: Range<f32>,35}3637impl Default for Viewport {38fn default() -> Self {39Self {40physical_position: Default::default(),41physical_size: UVec2::new(1, 1),42depth: 0.0..1.0,43}44}45}4647impl Viewport {48/// Cut the viewport rectangle so that it lies inside a rectangle of the49/// given size.50///51/// If either of the viewport's position coordinates lies outside the given52/// dimensions, it will be moved just inside first. If either of the given53/// dimensions is zero, the position and size of the viewport rectangle will54/// both be set to zero in that dimension.55pub fn clamp_to_size(&mut self, size: UVec2) {56// If the origin of the viewport rect is outside, then adjust so that57// it's just barely inside. Then, cut off the part that is outside.58if self.physical_size.x + self.physical_position.x > size.x {59if self.physical_position.x < size.x {60self.physical_size.x = size.x - self.physical_position.x;61} else if size.x > 0 {62self.physical_position.x = size.x - 1;63self.physical_size.x = 1;64} else {65self.physical_position.x = 0;66self.physical_size.x = 0;67}68}69if self.physical_size.y + self.physical_position.y > size.y {70if self.physical_position.y < size.y {71self.physical_size.y = size.y - self.physical_position.y;72} else if size.y > 0 {73self.physical_position.y = size.y - 1;74self.physical_size.y = 1;75} else {76self.physical_position.y = 0;77self.physical_size.y = 0;78}79}80}8182pub fn from_viewport_and_override(83viewport: Option<&Self>,84main_pass_resolution_override: Option<&MainPassResolutionOverride>,85) -> Option<Self> {86let mut viewport = viewport.cloned();8788if let Some(override_size) = main_pass_resolution_override {89if viewport.is_none() {90viewport = Some(Viewport::default());91}9293viewport.as_mut().unwrap().physical_size = **override_size;94}9596viewport97}98}99100/// Override the resolution a 3d camera's main pass is rendered at.101///102/// Does not affect post processing.103///104/// ## Usage105///106/// * Insert this component on a 3d camera entity in the render world.107/// * The resolution override must be smaller than the camera's viewport size.108/// * The resolution override is specified in physical pixels.109/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.110#[derive(Component, Reflect, Deref, Debug)]111#[reflect(Component)]112pub struct MainPassResolutionOverride(pub UVec2);113114/// Settings to define a camera sub view.115///116/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the117/// image defined by `size` and `offset` (relative to the `full_size` of the118/// whole image) is projected to the cameras viewport.119///120/// Take the example of the following multi-monitor setup:121/// ```css122/// ┌───┬───┐123/// │ A │ B │124/// ├───┼───┤125/// │ C │ D │126/// └───┴───┘127/// ```128/// If each monitor is 1920x1080, the whole image will have a resolution of129/// 3840x2160. For each monitor we can use a single camera with a viewport of130/// the same size as the monitor it corresponds to. To ensure that the image is131/// cohesive, we can use a different sub view on each camera:132/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0133/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0134/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080135/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =136/// 1920,1080137///138/// However since only the ratio between the values is important, they could all139/// be divided by 120 and still produce the same image. Camera D would for140/// example have the following values:141/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9142#[derive(Debug, Clone, Copy, Reflect, PartialEq)]143#[reflect(Clone, PartialEq, Default)]144pub struct SubCameraView {145/// Size of the entire camera view146pub full_size: UVec2,147/// Offset of the sub camera148pub offset: Vec2,149/// Size of the sub camera150pub size: UVec2,151}152153impl Default for SubCameraView {154fn default() -> Self {155Self {156full_size: UVec2::new(1, 1),157offset: Vec2::new(0., 0.),158size: UVec2::new(1, 1),159}160}161}162163/// Information about the current [`RenderTarget`].164#[derive(Default, Debug, Clone)]165pub struct RenderTargetInfo {166/// The physical size of this render target (in physical pixels, ignoring scale factor).167pub physical_size: UVec2,168/// The scale factor of this render target.169///170/// When rendering to a window, typically it is a value greater or equal than 1.0,171/// representing the ratio between the size of the window in physical pixels and the logical size of the window.172pub scale_factor: f32,173}174175/// Holds internally computed [`Camera`] values.176#[derive(Default, Debug, Clone)]177pub struct ComputedCameraValues {178pub clip_from_view: Mat4,179pub target_info: Option<RenderTargetInfo>,180// size of the `Viewport`181pub old_viewport_size: Option<UVec2>,182pub old_sub_camera_view: Option<SubCameraView>,183}184185/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.186///187/// <https://en.wikipedia.org/wiki/Exposure_(photography)>188#[derive(Component, Clone, Copy, Reflect)]189#[reflect(opaque)]190#[reflect(Component, Default, Clone)]191pub struct Exposure {192/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>193pub ev100: f32,194}195196impl Exposure {197pub const SUNLIGHT: Self = Self {198ev100: Self::EV100_SUNLIGHT,199};200pub const OVERCAST: Self = Self {201ev100: Self::EV100_OVERCAST,202};203pub const INDOOR: Self = Self {204ev100: Self::EV100_INDOOR,205};206/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.207/// It also happens to be a reasonable default.208///209/// See <https://github.com/bevyengine/bevy/issues/11577> for details.210pub const BLENDER: Self = Self {211ev100: Self::EV100_BLENDER,212};213214pub const EV100_SUNLIGHT: f32 = 15.0;215pub const EV100_OVERCAST: f32 = 12.0;216pub const EV100_INDOOR: f32 = 7.0;217218/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.219/// It also happens to be a reasonable default.220///221/// See <https://github.com/bevyengine/bevy/issues/11577> for details.222pub const EV100_BLENDER: f32 = 9.7;223224pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {225Self {226ev100: physical_camera_parameters.ev100(),227}228}229230/// Converts EV100 values to exposure values.231/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>232#[inline]233pub fn exposure(&self) -> f32 {234ops::exp2(-self.ev100) / 1.2235}236}237238impl Default for Exposure {239fn default() -> Self {240Self::BLENDER241}242}243244/// Parameters based on physical camera characteristics for calculating EV100245/// values for use with [`Exposure`]. This is also used for depth of field.246#[derive(Clone, Copy)]247pub struct PhysicalCameraParameters {248/// <https://en.wikipedia.org/wiki/F-number>249pub aperture_f_stops: f32,250/// <https://en.wikipedia.org/wiki/Shutter_speed>251pub shutter_speed_s: f32,252/// <https://en.wikipedia.org/wiki/Film_speed>253pub sensitivity_iso: f32,254/// The height of the [image sensor format] in meters.255///256/// Focal length is derived from the FOV and this value. The default is257/// 18.66mm, matching the [Super 35] format, which is popular in cinema.258///259/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format260///261/// [Super 35]: https://en.wikipedia.org/wiki/Super_35262pub sensor_height: f32,263}264265impl PhysicalCameraParameters {266/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).267pub fn ev100(&self) -> f32 {268ops::log2(269self.aperture_f_stops * self.aperture_f_stops * 100.0270/ (self.shutter_speed_s * self.sensitivity_iso),271)272}273}274275impl Default for PhysicalCameraParameters {276fn default() -> Self {277Self {278aperture_f_stops: 1.0,279shutter_speed_s: 1.0 / 125.0,280sensitivity_iso: 100.0,281sensor_height: 0.01866,282}283}284}285286/// Error returned when a conversion between world-space and viewport-space coordinates fails.287///288/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].289#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]290pub enum ViewportConversionError {291/// The pre-computed size of the viewport was not available.292///293/// This may be because the `Camera` was just created and `camera_system` has not been executed294/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:295/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,296/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,297/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),298/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).299#[error("pre-computed size of viewport not available")]300NoViewportSize,301/// The computed coordinate was beyond the `Camera`'s near plane.302///303/// Only applicable when converting from world-space to viewport-space.304#[error("computed coordinate beyond `Camera`'s near plane")]305PastNearPlane,306/// The computed coordinate was beyond the `Camera`'s far plane.307///308/// Only applicable when converting from world-space to viewport-space.309#[error("computed coordinate beyond `Camera`'s far plane")]310PastFarPlane,311/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the312/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)313/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).314#[error("found NaN while computing NDC")]315InvalidData,316}317318/// The defining [`Component`] for camera entities,319/// storing information about how and what to render through this camera.320///321/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from322/// which rendering occurs. It defines the position of the view to render, the projection method323/// to transform the 3D objects into a 2D image, as well as the render target into which that image324/// is produced.325///326/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.327/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,328/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render329/// graph will emit an error at runtime.330///331/// [`Camera2d`]: crate::Camera2d332/// [`Camera3d`]: crate::Camera3d333#[derive(Component, Debug, Reflect, Clone)]334#[reflect(Component, Default, Debug, Clone)]335#[require(336Frustum,337CameraMainTextureUsages,338VisibleEntities,339Transform,340Visibility341)]342pub struct Camera {343/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].344pub viewport: Option<Viewport>,345/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.346pub order: isize,347/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this348/// camera will not be rendered.349pub is_active: bool,350/// Computed values for this camera, such as the projection matrix and the render target size.351#[reflect(ignore, clone)]352pub computed: ComputedCameraValues,353/// The "target" that this camera will render to.354pub target: RenderTarget,355// todo: reflect this when #6042 lands356/// The [`CameraOutputMode`] for this camera.357#[reflect(ignore, clone)]358pub output_mode: CameraOutputMode,359/// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's360/// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to361/// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure362/// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.363pub msaa_writeback: bool,364/// The clear color operation to perform on the render target.365pub clear_color: ClearColorConfig,366/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].367pub sub_camera_view: Option<SubCameraView>,368}369370impl Default for Camera {371fn default() -> Self {372Self {373is_active: true,374order: 0,375viewport: None,376computed: Default::default(),377target: Default::default(),378output_mode: Default::default(),379msaa_writeback: true,380clear_color: Default::default(),381sub_camera_view: None,382}383}384}385386impl Camera {387/// Converts a physical size in this `Camera` to a logical size.388#[inline]389pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {390let scale = self.computed.target_info.as_ref()?.scale_factor;391Some(physical_size.as_vec2() / scale)392}393394/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is395/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to396/// the full physical rect of the current [`RenderTarget`].397#[inline]398pub fn physical_viewport_rect(&self) -> Option<URect> {399let min = self400.viewport401.as_ref()402.map(|v| v.physical_position)403.unwrap_or(UVec2::ZERO);404let max = min + self.physical_viewport_size()?;405Some(URect { min, max })406}407408/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to409/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the410/// full logical rect of the current [`RenderTarget`].411#[inline]412pub fn logical_viewport_rect(&self) -> Option<Rect> {413let URect { min, max } = self.physical_viewport_rect()?;414Some(Rect {415min: self.to_logical(min)?,416max: self.to_logical(max)?,417})418}419420/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this421/// will be the size of that custom viewport. Otherwise it will default to the full logical size422/// of the current [`RenderTarget`].423/// For logic that requires the full logical size of the424/// [`RenderTarget`], prefer [`Camera::logical_target_size`].425///426/// Returns `None` if either:427/// - the function is called just after the `Camera` is created, before `camera_system` is executed,428/// - the [`RenderTarget`] isn't correctly set:429/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,430/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,431/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),432/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).433#[inline]434pub fn logical_viewport_size(&self) -> Option<Vec2> {435self.viewport436.as_ref()437.and_then(|v| self.to_logical(v.physical_size))438.or_else(|| self.logical_target_size())439}440441/// The physical size of this camera's viewport (in physical pixels).442/// If the `viewport` field is set to [`Some`], this443/// will be the size of that custom viewport. Otherwise it will default to the full physical size of444/// the current [`RenderTarget`].445/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].446#[inline]447pub fn physical_viewport_size(&self) -> Option<UVec2> {448self.viewport449.as_ref()450.map(|v| v.physical_size)451.or_else(|| self.physical_target_size())452}453454/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.455/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.456/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].457#[inline]458pub fn logical_target_size(&self) -> Option<Vec2> {459self.computed460.target_info461.as_ref()462.and_then(|t| self.to_logical(t.physical_size))463}464465/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),466/// ignoring custom `viewport` configuration.467/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.468/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].469#[inline]470pub fn physical_target_size(&self) -> Option<UVec2> {471self.computed.target_info.as_ref().map(|t| t.physical_size)472}473474#[inline]475pub fn target_scaling_factor(&self) -> Option<f32> {476self.computed477.target_info478.as_ref()479.map(|t: &RenderTargetInfo| t.scale_factor)480}481482/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).483#[inline]484pub fn clip_from_view(&self) -> Mat4 {485self.computed.clip_from_view486}487488/// Given a position in world space, use the camera to compute the viewport-space coordinates.489///490/// To get the coordinates in Normalized Device Coordinates, you should use491/// [`world_to_ndc`](Self::world_to_ndc).492///493/// # Panics494///495/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`496/// (see [`world_to_ndc`][Self::world_to_ndc]).497#[doc(alias = "world_to_screen")]498pub fn world_to_viewport(499&self,500camera_transform: &GlobalTransform,501world_position: Vec3,502) -> Result<Vec2, ViewportConversionError> {503let target_rect = self504.logical_viewport_rect()505.ok_or(ViewportConversionError::NoViewportSize)?;506let mut ndc_space_coords = self507.world_to_ndc(camera_transform, world_position)508.ok_or(ViewportConversionError::InvalidData)?;509// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space510if ndc_space_coords.z < 0.0 {511return Err(ViewportConversionError::PastFarPlane);512}513if ndc_space_coords.z > 1.0 {514return Err(ViewportConversionError::PastNearPlane);515}516517// Flip the Y co-ordinate origin from the bottom to the top.518ndc_space_coords.y = -ndc_space_coords.y;519520// Once in NDC space, we can discard the z element and map x/y to the viewport rect521let viewport_position =522(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;523Ok(viewport_position)524}525526/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.527///528/// To get the coordinates in Normalized Device Coordinates, you should use529/// [`world_to_ndc`](Self::world_to_ndc).530///531/// # Panics532///533/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`534/// (see [`world_to_ndc`][Self::world_to_ndc]).535#[doc(alias = "world_to_screen_with_depth")]536pub fn world_to_viewport_with_depth(537&self,538camera_transform: &GlobalTransform,539world_position: Vec3,540) -> Result<Vec3, ViewportConversionError> {541let target_rect = self542.logical_viewport_rect()543.ok_or(ViewportConversionError::NoViewportSize)?;544let mut ndc_space_coords = self545.world_to_ndc(camera_transform, world_position)546.ok_or(ViewportConversionError::InvalidData)?;547// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space548if ndc_space_coords.z < 0.0 {549return Err(ViewportConversionError::PastFarPlane);550}551if ndc_space_coords.z > 1.0 {552return Err(ViewportConversionError::PastNearPlane);553}554555// Stretching ndc depth to value via near plane and negating result to be in positive room again.556let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);557558// Flip the Y co-ordinate origin from the bottom to the top.559ndc_space_coords.y = -ndc_space_coords.y;560561// Once in NDC space, we can discard the z element and map x/y to the viewport rect562let viewport_position =563(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;564Ok(viewport_position.extend(depth))565}566567/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.568///569/// The resulting ray starts on the near plane of the camera.570///571/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.572///573/// To get the world space coordinates with Normalized Device Coordinates, you should use574/// [`ndc_to_world`](Self::ndc_to_world).575///576/// # Example577/// ```no_run578/// # use bevy_window::Window;579/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};580/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};581/// # use bevy_camera::Camera;582/// # use bevy_app::{App, PostUpdate};583/// #584/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {585/// let (camera, camera_transform) = *camera_query;586///587/// if let Some(cursor_position) = window.cursor_position()588/// // Calculate a ray pointing from the camera into the world based on the cursor's position.589/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)590/// {591/// println!("{ray:?}");592/// }593/// }594///595/// # let mut app = App::new();596/// // Run the system after transform propagation so the camera's global transform is up-to-date.597/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));598/// ```599///600/// # Panics601///602/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and603/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).604pub fn viewport_to_world(605&self,606camera_transform: &GlobalTransform,607viewport_position: Vec2,608) -> Result<Ray3d, ViewportConversionError> {609let target_rect = self610.logical_viewport_rect()611.ok_or(ViewportConversionError::NoViewportSize)?;612let rect_relative = (viewport_position - target_rect.min) / target_rect.size();613let mut ndc_xy = rect_relative * 2. - Vec2::ONE;614// Flip the Y co-ordinate from the top to the bottom to enter NDC.615ndc_xy.y = -ndc_xy.y;616617let ndc_point_near = ndc_xy.extend(1.0).into();618// Using EPSILON because an ndc with Z = 0 returns NaNs.619let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();620let view_from_clip = self.computed.clip_from_view.inverse();621let world_from_view = camera_transform.affine();622// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss623// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.624// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.625let view_point_near = view_from_clip.project_point3a(ndc_point_near);626let view_point_far = view_from_clip.project_point3a(ndc_point_far);627let view_dir = view_point_far - view_point_near;628let origin = world_from_view.transform_point3a(view_point_near).into();629let direction = world_from_view.transform_vector3a(view_dir).into();630631// The fallible direction constructor ensures that direction isn't NaN.632Dir3::new(direction)633.map_err(|_| ViewportConversionError::InvalidData)634.map(|direction| Ray3d { origin, direction })635}636637/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.638///639/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.640///641/// To get the world space coordinates with Normalized Device Coordinates, you should use642/// [`ndc_to_world`](Self::ndc_to_world).643///644/// # Example645/// ```no_run646/// # use bevy_window::Window;647/// # use bevy_ecs::prelude::*;648/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};649/// # use bevy_camera::Camera;650/// # use bevy_app::{App, PostUpdate};651/// #652/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {653/// let (camera, camera_transform) = *camera_query;654///655/// if let Some(cursor_position) = window.cursor_position()656/// // Calculate a world position based on the cursor's position.657/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)658/// {659/// println!("World position: {world_pos:.2}");660/// }661/// }662///663/// # let mut app = App::new();664/// // Run the system after transform propagation so the camera's global transform is up-to-date.665/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));666/// ```667///668/// # Panics669///670/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and671/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).672pub fn viewport_to_world_2d(673&self,674camera_transform: &GlobalTransform,675viewport_position: Vec2,676) -> Result<Vec2, ViewportConversionError> {677let target_rect = self678.logical_viewport_rect()679.ok_or(ViewportConversionError::NoViewportSize)?;680let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();681682// Flip the Y co-ordinate origin from the top to the bottom.683rect_relative.y = 1.0 - rect_relative.y;684685let ndc = rect_relative * 2. - Vec2::ONE;686687let world_near_plane = self688.ndc_to_world(camera_transform, ndc.extend(1.))689.ok_or(ViewportConversionError::InvalidData)?;690691Ok(world_near_plane.truncate())692}693694/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.695///696/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)697/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.698/// To get the coordinates in the render target's viewport dimensions, you should use699/// [`world_to_viewport`](Self::world_to_viewport).700///701/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by702/// [`Projection`](super::projection::Projection) contain `NAN`.703///704/// # Panics705///706/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.707pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(708&self,709camera_transform: &GlobalTransform,710world_point: V,711) -> Option<V> {712let view_from_world = camera_transform.affine().inverse();713let view_point = view_from_world.transform_point3a(world_point.into());714let ndc_point = self.computed.clip_from_view.project_point3a(view_point);715716(!ndc_point.is_nan()).then_some(ndc_point.into())717}718719/// Given a position in Normalized Device Coordinates,720/// use the camera's viewport to compute the world space position.721///722/// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,723/// and between 0.0 and 1.0 on the Z axis.724/// To get the world space coordinates with the viewport position, you should use725/// [`world_to_viewport`](Self::world_to_viewport).726///727/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by728/// [`Projection`](super::projection::Projection) contain `NAN`.729///730/// # Panics731///732/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.733pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(734&self,735camera_transform: &GlobalTransform,736ndc_point: V,737) -> Option<V> {738// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss739// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.740let view_point = self741.computed742.clip_from_view743.inverse()744.project_point3a(ndc_point.into());745let world_point = camera_transform.affine().transform_point3a(view_point);746747(!world_point.is_nan()).then_some(world_point.into())748}749750/// Converts the depth in Normalized Device Coordinates751/// to linear view z for perspective projections.752///753/// Note: Depth values in front of the camera will be negative as -z is forward754pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {755let near = self.clip_from_view().w_axis.z; // [3][2]756-near / ndc_depth757}758759/// Converts the depth in Normalized Device Coordinates760/// to linear view z for orthographic projections.761///762/// Note: Depth values in front of the camera will be negative as -z is forward763pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {764-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z765// [3][2] [2][2]766}767}768769/// Control how this [`Camera`] outputs once rendering is completed.770#[derive(Debug, Clone, Copy)]771pub enum CameraOutputMode {772/// Writes the camera output to configured render target.773Write {774/// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.775/// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.776blend_state: Option<BlendState>,777/// The clear color operation to perform on the final render target texture.778clear_color: ClearColorConfig,779},780/// Skips writing the camera output to the configured render target. The output will remain in the781/// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target782/// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause783/// them to be lost. Only use this if you know what you are doing!784/// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove785/// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.786Skip,787}788789impl Default for CameraOutputMode {790fn default() -> Self {791CameraOutputMode::Write {792blend_state: None,793clear_color: ClearColorConfig::Default,794}795}796}797798/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`799/// swapchain or an [`Image`].800#[derive(Debug, Clone, Reflect, From)]801#[reflect(Clone)]802pub enum RenderTarget {803/// Window to which the camera's view is rendered.804Window(WindowRef),805/// Image to which the camera's view is rendered.806Image(ImageRenderTarget),807/// Texture View to which the camera's view is rendered.808/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.809TextureView(ManualTextureViewHandle),810/// The camera won't render to any color target.811///812/// This is useful when you want a camera that *only* renders prepasses, for813/// example a depth prepass. See the `render_depth_to_texture` example.814None {815/// The physical size of the viewport.816size: UVec2,817},818}819820impl RenderTarget {821/// Get a handle to the render target's image,822/// or `None` if the render target is another variant.823pub fn as_image(&self) -> Option<&Handle<Image>> {824if let Self::Image(image_target) = self {825Some(&image_target.handle)826} else {827None828}829}830}831832impl RenderTarget {833/// Normalize the render target down to a more concrete value, mostly used for equality comparisons.834pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {835match self {836RenderTarget::Window(window_ref) => window_ref837.normalize(primary_window)838.map(NormalizedRenderTarget::Window),839RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),840RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),841RenderTarget::None { size } => Some(NormalizedRenderTarget::None {842width: size.x,843height: size.y,844}),845}846}847}848849/// Normalized version of the render target.850///851/// Once we have this we shouldn't need to resolve it down anymore.852#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]853#[reflect(Clone, PartialEq, Hash)]854pub enum NormalizedRenderTarget {855/// Window to which the camera's view is rendered.856Window(NormalizedWindowRef),857/// Image to which the camera's view is rendered.858Image(ImageRenderTarget),859/// Texture View to which the camera's view is rendered.860/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.861TextureView(ManualTextureViewHandle),862/// The camera won't render to any color target.863///864/// This is useful when you want a camera that *only* renders prepasses, for865/// example a depth prepass. See the `render_depth_to_texture` example.866None {867/// The physical width of the viewport.868width: u32,869/// The physical height of the viewport.870height: u32,871},872}873874/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.875#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]876#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]877pub struct ManualTextureViewHandle(pub u32);878879/// A render target that renders to an [`Image`].880#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)]881#[reflect(Clone, PartialEq, Hash)]882pub struct ImageRenderTarget {883/// The image to render to.884pub handle: Handle<Image>,885/// The scale factor of the render target image, corresponding to the scale886/// factor for a window target. This should almost always be 1.0.887pub scale_factor: FloatOrd,888}889890impl From<Handle<Image>> for RenderTarget {891fn from(handle: Handle<Image>) -> Self {892Self::Image(handle.into())893}894}895896impl From<Handle<Image>> for ImageRenderTarget {897fn from(handle: Handle<Image>) -> Self {898Self {899handle,900scale_factor: FloatOrd(1.0),901}902}903}904905impl Default for RenderTarget {906fn default() -> Self {907Self::Window(Default::default())908}909}910911/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera912#[derive(Component, Clone, Copy, Reflect)]913#[reflect(opaque)]914#[reflect(Component, Default, Clone)]915pub struct CameraMainTextureUsages(pub TextureUsages);916917impl Default for CameraMainTextureUsages {918fn default() -> Self {919Self(920TextureUsages::RENDER_ATTACHMENT921| TextureUsages::TEXTURE_BINDING922| TextureUsages::COPY_SRC,923)924}925}926927impl CameraMainTextureUsages {928pub fn with(mut self, usages: TextureUsages) -> Self {929self.0 |= usages;930self931}932}933934#[cfg(test)]935mod test {936use bevy_math::{Vec2, Vec3};937use bevy_transform::components::GlobalTransform;938939use crate::{940Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,941Viewport,942};943944fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {945let viewport = Viewport {946physical_size: physical_size.as_uvec2(),947..Default::default()948};949let mut camera = Camera {950viewport: Some(viewport.clone()),951..Default::default()952};953camera.computed.target_info = Some(RenderTargetInfo {954physical_size: viewport.physical_size,955scale_factor: 1.0,956});957projection.update(958viewport.physical_size.x as f32,959viewport.physical_size.y as f32,960);961camera.computed.clip_from_view = projection.get_clip_from_view();962camera963}964965#[test]966fn viewport_to_world_orthographic_3d_returns_forward() {967let transform = GlobalTransform::default();968let size = Vec2::new(1600.0, 900.0);969let camera = make_camera(970Projection::Orthographic(OrthographicProjection::default_3d()),971size,972);973let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();974assert_eq!(ray.direction, transform.forward());975assert!(ray976.origin977.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));978let ray = camera.viewport_to_world(&transform, size).unwrap();979assert_eq!(ray.direction, transform.forward());980assert!(ray981.origin982.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));983}984985#[test]986fn viewport_to_world_orthographic_2d_returns_forward() {987let transform = GlobalTransform::default();988let size = Vec2::new(1600.0, 900.0);989let camera = make_camera(990Projection::Orthographic(OrthographicProjection::default_2d()),991size,992);993let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();994assert_eq!(ray.direction, transform.forward());995assert!(ray996.origin997.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));998let ray = camera.viewport_to_world(&transform, size).unwrap();999assert_eq!(ray.direction, transform.forward());1000assert!(ray1001.origin1002.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));1003}10041005#[test]1006fn viewport_to_world_perspective_center_returns_forward() {1007let transform = GlobalTransform::default();1008let size = Vec2::new(1600.0, 900.0);1009let camera = make_camera(1010Projection::Perspective(PerspectiveProjection::default()),1011size,1012);1013let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();1014assert_eq!(ray.direction, transform.forward());1015assert_eq!(ray.origin, transform.forward() * 0.1);1016}1017}101810191020