use crate::primitives::Frustum;12use super::{3visibility::{Visibility, VisibleEntities},4ClearColorConfig, MsaaWriteback,5};6use bevy_asset::Handle;7use bevy_derive::Deref;8use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};9use bevy_image::Image;10use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};11use bevy_reflect::prelude::*;12use bevy_transform::components::{GlobalTransform, Transform};13use bevy_window::{NormalizedWindowRef, WindowRef};14use core::ops::Range;15use derive_more::derive::From;16use thiserror::Error;17use wgpu_types::{BlendState, TextureUsages};1819/// Render viewport configuration for the [`Camera`] component.20///21/// The viewport defines the area on the render target to which the camera renders its image.22/// You can overlay multiple cameras in a single window using viewports to create effects like23/// split screen, minimaps, and character viewers.24#[derive(Reflect, Debug, Clone)]25#[reflect(Default, Clone)]26pub struct Viewport {27/// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].28/// (0,0) corresponds to the top-left corner29pub physical_position: UVec2,30/// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].31/// The origin of the rectangle is in the top-left corner.32pub physical_size: UVec2,33/// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).34pub depth: Range<f32>,35}3637impl Default for Viewport {38fn default() -> Self {39Self {40physical_position: Default::default(),41physical_size: UVec2::new(1, 1),42depth: 0.0..1.0,43}44}45}4647impl Viewport {48/// Cut the viewport rectangle so that it lies inside a rectangle of the49/// given size.50///51/// If either of the viewport's position coordinates lies outside the given52/// dimensions, it will be moved just inside first. If either of the given53/// dimensions is zero, the position and size of the viewport rectangle will54/// both be set to zero in that dimension.55pub fn clamp_to_size(&mut self, size: UVec2) {56// If the origin of the viewport rect is outside, then adjust so that57// it's just barely inside. Then, cut off the part that is outside.58if self.physical_size.x + self.physical_position.x > size.x {59if self.physical_position.x < size.x {60self.physical_size.x = size.x - self.physical_position.x;61} else if size.x > 0 {62self.physical_position.x = size.x - 1;63self.physical_size.x = 1;64} else {65self.physical_position.x = 0;66self.physical_size.x = 0;67}68}69if self.physical_size.y + self.physical_position.y > size.y {70if self.physical_position.y < size.y {71self.physical_size.y = size.y - self.physical_position.y;72} else if size.y > 0 {73self.physical_position.y = size.y - 1;74self.physical_size.y = 1;75} else {76self.physical_position.y = 0;77self.physical_size.y = 0;78}79}80}8182pub fn from_viewport_and_override(83viewport: Option<&Self>,84main_pass_resolution_override: Option<&MainPassResolutionOverride>,85) -> Option<Self> {86if let Some(override_size) = main_pass_resolution_override {87let mut vp = viewport.map_or_else(Self::default, Self::clone);88vp.physical_size = **override_size;89Some(vp)90} else {91viewport.cloned()92}93}94}9596/// Override the resolution a 3d camera's main pass is rendered at.97///98/// Does not affect post processing.99///100/// ## Usage101///102/// * Insert this component on a 3d camera entity in the render world.103/// * The resolution override must be smaller than the camera's viewport size.104/// * The resolution override is specified in physical pixels.105/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.106#[derive(Component, Reflect, Deref, Debug)]107#[reflect(Component)]108pub struct MainPassResolutionOverride(pub UVec2);109110/// Settings to define a camera sub view.111///112/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the113/// image defined by `size` and `offset` (relative to the `full_size` of the114/// whole image) is projected to the cameras viewport.115///116/// Take the example of the following multi-monitor setup:117/// ```css118/// ┌───┬───┐119/// │ A │ B │120/// ├───┼───┤121/// │ C │ D │122/// └───┴───┘123/// ```124/// If each monitor is 1920x1080, the whole image will have a resolution of125/// 3840x2160. For each monitor we can use a single camera with a viewport of126/// the same size as the monitor it corresponds to. To ensure that the image is127/// cohesive, we can use a different sub view on each camera:128/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0129/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0130/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080131/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =132/// 1920,1080133///134/// However since only the ratio between the values is important, they could all135/// be divided by 120 and still produce the same image. Camera D would for136/// example have the following values:137/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9138#[derive(Debug, Clone, Copy, Reflect, PartialEq)]139#[reflect(Clone, PartialEq, Default)]140pub struct SubCameraView {141/// Size of the entire camera view142pub full_size: UVec2,143/// Offset of the sub camera144pub offset: Vec2,145/// Size of the sub camera146pub size: UVec2,147}148149impl Default for SubCameraView {150fn default() -> Self {151Self {152full_size: UVec2::new(1, 1),153offset: Vec2::new(0., 0.),154size: UVec2::new(1, 1),155}156}157}158159/// Information about the current [`RenderTarget`].160#[derive(Debug, Reflect, Clone)]161pub struct RenderTargetInfo {162/// The physical size of this render target (in physical pixels, ignoring scale factor).163pub physical_size: UVec2,164/// The scale factor of this render target.165///166/// When rendering to a window, typically it is a value greater or equal than 1.0,167/// representing the ratio between the size of the window in physical pixels and the logical size of the window.168pub scale_factor: f32,169}170171impl Default for RenderTargetInfo {172fn default() -> Self {173Self {174physical_size: Default::default(),175scale_factor: 1.,176}177}178}179180/// Holds internally computed [`Camera`] values.181#[derive(Default, Debug, Reflect, Clone)]182pub struct ComputedCameraValues {183pub clip_from_view: Mat4,184pub target_info: Option<RenderTargetInfo>,185// size of the `Viewport`186pub old_viewport_size: Option<UVec2>,187pub old_sub_camera_view: Option<SubCameraView>,188}189190/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.191///192/// <https://en.wikipedia.org/wiki/Exposure_(photography)>193#[derive(Component, Clone, Copy, Reflect)]194#[reflect(opaque)]195#[reflect(Component, Default, Clone)]196pub struct Exposure {197/// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>198pub ev100: f32,199}200201impl Exposure {202pub const SUNLIGHT: Self = Self {203ev100: Self::EV100_SUNLIGHT,204};205pub const OVERCAST: Self = Self {206ev100: Self::EV100_OVERCAST,207};208pub const INDOOR: Self = Self {209ev100: Self::EV100_INDOOR,210};211/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.212/// It also happens to be a reasonable default.213///214/// See <https://github.com/bevyengine/bevy/issues/11577> for details.215pub const BLENDER: Self = Self {216ev100: Self::EV100_BLENDER,217};218219pub const EV100_SUNLIGHT: f32 = 15.0;220pub const EV100_OVERCAST: f32 = 12.0;221pub const EV100_INDOOR: f32 = 7.0;222223/// This value was calibrated to match Blender's implicit/default exposure as closely as possible.224/// It also happens to be a reasonable default.225///226/// See <https://github.com/bevyengine/bevy/issues/11577> for details.227pub const EV100_BLENDER: f32 = 9.7;228229pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {230Self {231ev100: physical_camera_parameters.ev100(),232}233}234235/// Converts EV100 values to exposure values.236/// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>237#[inline]238pub fn exposure(&self) -> f32 {239ops::exp2(-self.ev100) / 1.2240}241}242243impl Default for Exposure {244fn default() -> Self {245Self::BLENDER246}247}248249/// Parameters based on physical camera characteristics for calculating EV100250/// values for use with [`Exposure`]. This is also used for depth of field.251#[derive(Clone, Copy)]252pub struct PhysicalCameraParameters {253/// <https://en.wikipedia.org/wiki/F-number>254pub aperture_f_stops: f32,255/// <https://en.wikipedia.org/wiki/Shutter_speed>256pub shutter_speed_s: f32,257/// <https://en.wikipedia.org/wiki/Film_speed>258pub sensitivity_iso: f32,259/// The height of the [image sensor format] in meters.260///261/// Focal length is derived from the FOV and this value. The default is262/// 18.66mm, matching the [Super 35] format, which is popular in cinema.263///264/// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format265///266/// [Super 35]: https://en.wikipedia.org/wiki/Super_35267pub sensor_height: f32,268}269270impl PhysicalCameraParameters {271/// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).272pub fn ev100(&self) -> f32 {273ops::log2(274self.aperture_f_stops * self.aperture_f_stops * 100.0275/ (self.shutter_speed_s * self.sensitivity_iso),276)277}278}279280impl Default for PhysicalCameraParameters {281fn default() -> Self {282Self {283aperture_f_stops: 1.0,284shutter_speed_s: 1.0 / 125.0,285sensitivity_iso: 100.0,286sensor_height: 0.01866,287}288}289}290291/// Error returned when a conversion between world-space and viewport-space coordinates fails.292///293/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].294#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]295pub enum ViewportConversionError {296/// The pre-computed size of the viewport was not available.297///298/// This may be because the `Camera` was just created and `camera_system` has not been executed299/// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:300/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,301/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,302/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),303/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).304#[error("pre-computed size of viewport not available")]305NoViewportSize,306/// The computed coordinate was beyond the `Camera`'s near plane.307///308/// Only applicable when converting from world-space to viewport-space.309#[error("computed coordinate beyond `Camera`'s near plane")]310PastNearPlane,311/// The computed coordinate was beyond the `Camera`'s far plane.312///313/// Only applicable when converting from world-space to viewport-space.314#[error("computed coordinate beyond `Camera`'s far plane")]315PastFarPlane,316/// The Normalized Device Coordinates could not be computed because the `camera_transform`, the317/// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)318/// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).319#[error("found NaN while computing NDC")]320InvalidData,321}322323/// The defining [`Component`] for camera entities,324/// storing information about how and what to render through this camera.325///326/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from327/// which rendering occurs. It defines the position of the view to render, the projection method328/// to transform the 3D objects into a 2D image, as well as the render target into which that image329/// is produced.330///331/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.332/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,333/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render334/// graph will emit an error at runtime.335///336/// [`Camera2d`]: crate::Camera2d337/// [`Camera3d`]: crate::Camera3d338#[derive(Component, Debug, Reflect, Clone)]339#[reflect(Component, Default, Debug, Clone)]340#[require(341Frustum,342CameraMainTextureUsages,343VisibleEntities,344Transform,345Visibility,346RenderTarget347)]348pub struct Camera {349/// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].350pub viewport: Option<Viewport>,351/// Cameras with a higher order are rendered later, and thus on top of lower order cameras.352pub order: isize,353/// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this354/// camera will not be rendered.355pub is_active: bool,356/// Computed values for this camera, such as the projection matrix and the render target size.357pub computed: ComputedCameraValues,358// todo: reflect this when #6042 lands359/// The [`CameraOutputMode`] for this camera.360pub output_mode: CameraOutputMode,361/// Controls when MSAA writeback occurs for this camera.362/// See [`MsaaWriteback`] for available options.363pub msaa_writeback: MsaaWriteback,364/// The clear color operation to perform on the render target.365pub clear_color: ClearColorConfig,366/// Whether to switch culling mode so that materials that request backface367/// culling cull front faces, and vice versa.368///369/// This is typically used for cameras that mirror the world that they370/// render across a plane, because doing that flips the winding of each371/// polygon.372///373/// This setting doesn't affect materials that disable backface culling.374pub invert_culling: bool,375/// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].376pub sub_camera_view: Option<SubCameraView>,377}378379impl Default for Camera {380fn default() -> Self {381Self {382is_active: true,383order: 0,384viewport: None,385computed: Default::default(),386output_mode: Default::default(),387msaa_writeback: MsaaWriteback::default(),388clear_color: Default::default(),389invert_culling: false,390sub_camera_view: None,391}392}393}394395impl Camera {396/// Converts a physical size in this `Camera` to a logical size.397#[inline]398pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {399let scale = self.computed.target_info.as_ref()?.scale_factor;400Some(physical_size.as_vec2() / scale)401}402403/// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is404/// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to405/// the full physical rect of the current [`RenderTarget`].406#[inline]407pub fn physical_viewport_rect(&self) -> Option<URect> {408let min = self409.viewport410.as_ref()411.map(|v| v.physical_position)412.unwrap_or(UVec2::ZERO);413let max = min + self.physical_viewport_size()?;414Some(URect { min, max })415}416417/// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to418/// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the419/// full logical rect of the current [`RenderTarget`].420#[inline]421pub fn logical_viewport_rect(&self) -> Option<Rect> {422let URect { min, max } = self.physical_viewport_rect()?;423Some(Rect {424min: self.to_logical(min)?,425max: self.to_logical(max)?,426})427}428429/// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this430/// will be the size of that custom viewport. Otherwise it will default to the full logical size431/// of the current [`RenderTarget`].432/// For logic that requires the full logical size of the433/// [`RenderTarget`], prefer [`Camera::logical_target_size`].434///435/// Returns `None` if either:436/// - the function is called just after the `Camera` is created, before `camera_system` is executed,437/// - the [`RenderTarget`] isn't correctly set:438/// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,439/// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,440/// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),441/// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).442#[inline]443pub fn logical_viewport_size(&self) -> Option<Vec2> {444self.viewport445.as_ref()446.and_then(|v| self.to_logical(v.physical_size))447.or_else(|| self.logical_target_size())448}449450/// The physical size of this camera's viewport (in physical pixels).451/// If the `viewport` field is set to [`Some`], this452/// will be the size of that custom viewport. Otherwise it will default to the full physical size of453/// the current [`RenderTarget`].454/// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].455#[inline]456pub fn physical_viewport_size(&self) -> Option<UVec2> {457self.viewport458.as_ref()459.map(|v| v.physical_size)460.or_else(|| self.physical_target_size())461}462463/// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.464/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.465/// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].466#[inline]467pub fn logical_target_size(&self) -> Option<Vec2> {468self.computed469.target_info470.as_ref()471.and_then(|t| self.to_logical(t.physical_size))472}473474/// The full physical size of this camera's [`RenderTarget`] (in physical pixels),475/// ignoring custom `viewport` configuration.476/// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.477/// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].478#[inline]479pub fn physical_target_size(&self) -> Option<UVec2> {480self.computed.target_info.as_ref().map(|t| t.physical_size)481}482483#[inline]484pub fn target_scaling_factor(&self) -> Option<f32> {485self.computed486.target_info487.as_ref()488.map(|t: &RenderTargetInfo| t.scale_factor)489}490491/// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).492#[inline]493pub fn clip_from_view(&self) -> Mat4 {494self.computed.clip_from_view495}496497/// Core conversion logic to compute viewport coordinates498///499/// This function is shared by `world_to_viewport` and `world_to_viewport_with_depth`500/// to avoid code duplication.501///502/// Returns a tuple `(viewport_position, depth)`.503fn world_to_viewport_core(504&self,505camera_transform: &GlobalTransform,506world_position: Vec3,507) -> Result<(Vec2, f32), ViewportConversionError> {508let target_rect = self509.logical_viewport_rect()510.ok_or(ViewportConversionError::NoViewportSize)?;511let mut ndc_space_coords = self512.world_to_ndc(camera_transform, world_position)513.ok_or(ViewportConversionError::InvalidData)?;514// NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space515if ndc_space_coords.z < 0.0 {516return Err(ViewportConversionError::PastFarPlane);517}518if ndc_space_coords.z > 1.0 {519return Err(ViewportConversionError::PastNearPlane);520}521522let depth = ndc_space_coords.z;523524// Flip the Y co-ordinate origin from the bottom to the top.525ndc_space_coords.y = -ndc_space_coords.y;526527// Once in NDC space, we can discard the z element and map x/y to the viewport rect528let viewport_position =529(ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;530Ok((viewport_position, depth))531}532533/// Given a position in world space, use the camera to compute the viewport-space coordinates.534///535/// To get the coordinates in Normalized Device Coordinates, you should use536/// [`world_to_ndc`](Self::world_to_ndc).537///538/// # Panics539///540/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`541/// (see [`world_to_ndc`][Self::world_to_ndc]).542#[doc(alias = "world_to_screen")]543pub fn world_to_viewport(544&self,545camera_transform: &GlobalTransform,546world_position: Vec3,547) -> Result<Vec2, ViewportConversionError> {548Ok(self549.world_to_viewport_core(camera_transform, world_position)?550.0)551}552553/// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.554///555/// To get the coordinates in Normalized Device Coordinates, you should use556/// [`world_to_ndc`](Self::world_to_ndc).557///558/// # Panics559///560/// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`561/// (see [`world_to_ndc`][Self::world_to_ndc]).562#[doc(alias = "world_to_screen_with_depth")]563pub fn world_to_viewport_with_depth(564&self,565camera_transform: &GlobalTransform,566world_position: Vec3,567) -> Result<Vec3, ViewportConversionError> {568let result = self.world_to_viewport_core(camera_transform, world_position)?;569// Stretching ndc depth to value via near plane and negating result to be in positive room again.570let depth = -self.depth_ndc_to_view_z(result.1);571Ok(result.0.extend(depth))572}573574/// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.575///576/// The resulting ray starts on the near plane of the camera.577///578/// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.579///580/// To get the world space coordinates with Normalized Device Coordinates, you should use581/// [`ndc_to_world`](Self::ndc_to_world).582///583/// # Example584/// ```no_run585/// # use bevy_window::Window;586/// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};587/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};588/// # use bevy_camera::Camera;589/// # use bevy_app::{App, PostUpdate};590/// #591/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {592/// let (camera, camera_transform) = *camera_query;593///594/// if let Some(cursor_position) = window.cursor_position()595/// // Calculate a ray pointing from the camera into the world based on the cursor's position.596/// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)597/// {598/// println!("{ray:?}");599/// }600/// }601///602/// # let mut app = App::new();603/// // Run the system after transform propagation so the camera's global transform is up-to-date.604/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));605/// ```606///607/// # Panics608///609/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and610/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).611pub fn viewport_to_world(612&self,613camera_transform: &GlobalTransform,614viewport_position: Vec2,615) -> Result<Ray3d, ViewportConversionError> {616let ndc_xy = self.viewport_to_ndc(viewport_position)?;617618let ndc_point_near = ndc_xy.extend(1.0).into();619// Using EPSILON because an ndc with Z = 0 returns NaNs.620let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();621let view_from_clip = self.computed.clip_from_view.inverse();622let world_from_view = camera_transform.affine();623// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss624// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.625// Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.626let view_point_near = view_from_clip.project_point3a(ndc_point_near);627let view_point_far = view_from_clip.project_point3a(ndc_point_far);628let view_dir = view_point_far - view_point_near;629let origin = world_from_view.transform_point3a(view_point_near).into();630let direction = world_from_view.transform_vector3a(view_dir).into();631632// The fallible direction constructor ensures that direction isn't NaN.633Dir3::new(direction)634.map_err(|_| ViewportConversionError::InvalidData)635.map(|direction| Ray3d { origin, direction })636}637638/// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.639///640/// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.641///642/// To get the world space coordinates with Normalized Device Coordinates, you should use643/// [`ndc_to_world`](Self::ndc_to_world).644///645/// # Example646/// ```no_run647/// # use bevy_window::Window;648/// # use bevy_ecs::prelude::*;649/// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};650/// # use bevy_camera::Camera;651/// # use bevy_app::{App, PostUpdate};652/// #653/// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {654/// let (camera, camera_transform) = *camera_query;655///656/// if let Some(cursor_position) = window.cursor_position()657/// // Calculate a world position based on the cursor's position.658/// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)659/// {660/// println!("World position: {world_pos:.2}");661/// }662/// }663///664/// # let mut app = App::new();665/// // Run the system after transform propagation so the camera's global transform is up-to-date.666/// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));667/// ```668///669/// # Panics670///671/// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and672/// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).673pub fn viewport_to_world_2d(674&self,675camera_transform: &GlobalTransform,676viewport_position: Vec2,677) -> Result<Vec2, ViewportConversionError> {678let ndc = self.viewport_to_ndc(viewport_position)?;679680let world_near_plane = self681.ndc_to_world(camera_transform, ndc.extend(1.))682.ok_or(ViewportConversionError::InvalidData)?;683684Ok(world_near_plane.truncate())685}686687/// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.688///689/// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)690/// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.691/// To get the coordinates in the render target's viewport dimensions, you should use692/// [`world_to_viewport`](Self::world_to_viewport).693///694/// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by695/// [`Projection`](super::projection::Projection) contain `NAN`.696///697/// # Panics698///699/// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.700pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(701&self,702camera_transform: &GlobalTransform,703world_point: V,704) -> Option<V> {705let view_from_world = camera_transform.affine().inverse();706let view_point = view_from_world.transform_point3a(world_point.into());707let ndc_point = self.computed.clip_from_view.project_point3a(view_point);708709(!ndc_point.is_nan()).then_some(ndc_point.into())710}711712/// Given a position in Normalized Device Coordinates,713/// use the camera's viewport to compute the world space position.714///715/// The input is expected to be in NDC: `x` and `y` in the range `[-1.0, 1.0]`, and `z` in `[0.0, 1.0]`716/// (with `z = 0.0` at the far plane and `z = 1.0` at the near plane).717/// The returned value is a position in world space (your game's world units) and is not limited to `[-1.0, 1.0]`.718/// To convert from a viewport position to world space, you should use719/// [`viewport_to_world`](Self::viewport_to_world).720///721/// Returns `None` if the `camera_transform`, the `ndc_point`, or the projection matrix defined by722/// [`Projection`](super::projection::Projection) contain `NAN`.723///724/// # Panics725///726/// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.727pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(728&self,729camera_transform: &GlobalTransform,730ndc_point: V,731) -> Option<V> {732// We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss733// (and performance penalty) incurred by pre-composing an affine transform with a projective transform.734let view_point = self735.computed736.clip_from_view737.inverse()738.project_point3a(ndc_point.into());739let world_point = camera_transform.affine().transform_point3a(view_point);740741(!world_point.is_nan()).then_some(world_point.into())742}743744/// Converts the depth in Normalized Device Coordinates745/// to linear view z for perspective projections.746///747/// Note: Depth values in front of the camera will be negative as -z is forward748pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {749let near = self.clip_from_view().w_axis.z; // [3][2]750-near / ndc_depth751}752753/// Converts the depth in Normalized Device Coordinates754/// to linear view z for orthographic projections.755///756/// Note: Depth values in front of the camera will be negative as -z is forward757pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {758-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z759// [3][2] [2][2]760}761762/// Converts a position in viewport coordinates to NDC.763pub fn viewport_to_ndc(764&self,765viewport_position: Vec2,766) -> Result<Vec2, ViewportConversionError> {767let target_rect = self768.logical_viewport_rect()769.ok_or(ViewportConversionError::NoViewportSize)?;770let rect_relative = (viewport_position - target_rect.min) / target_rect.size();771let mut ndc = rect_relative * 2. - Vec2::ONE;772// Flip the Y co-ordinate from the top to the bottom to enter NDC.773ndc.y = -ndc.y;774Ok(ndc)775}776}777778/// Control how this [`Camera`] outputs once rendering is completed.779#[derive(Debug, Clone, Copy, Reflect)]780pub enum CameraOutputMode {781/// Writes the camera output to configured render target.782Write {783/// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.784/// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.785blend_state: Option<BlendState>,786/// The clear color operation to perform on the final render target texture.787clear_color: ClearColorConfig,788},789/// Skips writing the camera output to the configured render target. The output will remain in the790/// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target791/// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause792/// them to be lost. Only use this if you know what you are doing!793/// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove794/// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.795Skip,796}797798impl Default for CameraOutputMode {799fn default() -> Self {800CameraOutputMode::Write {801blend_state: None,802clear_color: ClearColorConfig::Default,803}804}805}806807/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`808/// swapchain or an [`Image`].809#[derive(Component, Debug, Clone, Reflect, From)]810#[reflect(Clone, Component)]811pub enum RenderTarget {812/// Window to which the camera's view is rendered.813Window(WindowRef),814/// Image to which the camera's view is rendered.815Image(ImageRenderTarget),816/// Texture View to which the camera's view is rendered.817/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.818TextureView(ManualTextureViewHandle),819/// The camera won't render to any color target.820///821/// This is useful when you want a camera that *only* renders prepasses, for822/// example a depth prepass. See the `render_depth_to_texture` example.823None {824/// The physical size of the viewport.825size: UVec2,826},827}828829impl RenderTarget {830/// Get a handle to the render target's image,831/// or `None` if the render target is another variant.832pub fn as_image(&self) -> Option<&Handle<Image>> {833if let Self::Image(image_target) = self {834Some(&image_target.handle)835} else {836None837}838}839}840841impl RenderTarget {842/// Normalize the render target down to a more concrete value, mostly used for equality comparisons.843pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {844match self {845RenderTarget::Window(window_ref) => window_ref846.normalize(primary_window)847.map(NormalizedRenderTarget::Window),848RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),849RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),850RenderTarget::None { size } => Some(NormalizedRenderTarget::None {851width: size.x,852height: size.y,853}),854}855}856}857858/// Normalized version of the render target.859///860/// Once we have this we shouldn't need to resolve it down anymore.861#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]862#[reflect(Clone, PartialEq, Hash)]863pub enum NormalizedRenderTarget {864/// Window to which the camera's view is rendered.865Window(NormalizedWindowRef),866/// Image to which the camera's view is rendered.867Image(ImageRenderTarget),868/// Texture View to which the camera's view is rendered.869/// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.870TextureView(ManualTextureViewHandle),871/// The camera won't render to any color target.872///873/// This is useful when you want a camera that *only* renders prepasses, for874/// example a depth prepass. See the `render_depth_to_texture` example.875None {876/// The physical width of the viewport.877width: u32,878/// The physical height of the viewport.879height: u32,880},881}882883/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.884///885/// See `ManualTextureViews` in `bevy_camera` for more details.886#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]887#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]888pub struct ManualTextureViewHandle(pub u32);889890/// A render target that renders to an [`Image`].891#[derive(Debug, Clone, Reflect)]892#[reflect(Clone, PartialEq, Hash)]893pub struct ImageRenderTarget {894/// The image to render to.895pub handle: Handle<Image>,896/// The scale factor of the render target image, corresponding to the scale897/// factor for a window target. This should almost always be 1.0.898pub scale_factor: f32,899}900901impl Eq for ImageRenderTarget {}902903impl PartialEq for ImageRenderTarget {904fn eq(&self, other: &Self) -> bool {905self.handle == other.handle && FloatOrd(self.scale_factor) == FloatOrd(other.scale_factor)906}907}908909impl core::hash::Hash for ImageRenderTarget {910fn hash<H: core::hash::Hasher>(&self, state: &mut H) {911self.handle.hash(state);912FloatOrd(self.scale_factor).hash(state);913}914}915916impl PartialOrd for ImageRenderTarget {917fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {918Some(self.cmp(other))919}920}921922impl Ord for ImageRenderTarget {923fn cmp(&self, other: &Self) -> core::cmp::Ordering {924self.handle925.cmp(&other.handle)926.then_with(|| FloatOrd(self.scale_factor).cmp(&FloatOrd(other.scale_factor)))927}928}929930impl From<Handle<Image>> for RenderTarget {931fn from(handle: Handle<Image>) -> Self {932Self::Image(handle.into())933}934}935936impl From<Handle<Image>> for ImageRenderTarget {937fn from(handle: Handle<Image>) -> Self {938Self {939handle,940scale_factor: 1.0,941}942}943}944945impl Default for RenderTarget {946fn default() -> Self {947Self::Window(Default::default())948}949}950951/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera952#[derive(Component, Clone, Copy, Reflect)]953#[reflect(opaque)]954#[reflect(Component, Default, Clone)]955pub struct CameraMainTextureUsages(pub TextureUsages);956957impl Default for CameraMainTextureUsages {958fn default() -> Self {959Self(960TextureUsages::RENDER_ATTACHMENT961| TextureUsages::TEXTURE_BINDING962| TextureUsages::COPY_SRC,963)964}965}966967impl CameraMainTextureUsages {968pub fn with(mut self, usages: TextureUsages) -> Self {969self.0 |= usages;970self971}972}973974#[cfg(test)]975mod test {976use bevy_math::{Vec2, Vec3};977use bevy_transform::components::GlobalTransform;978979use crate::{980Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,981Viewport,982};983984fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {985let viewport = Viewport {986physical_size: physical_size.as_uvec2(),987..Default::default()988};989let mut camera = Camera {990viewport: Some(viewport.clone()),991..Default::default()992};993camera.computed.target_info = Some(RenderTargetInfo {994physical_size: viewport.physical_size,995scale_factor: 1.0,996});997projection.update(998viewport.physical_size.x as f32,999viewport.physical_size.y as f32,1000);1001camera.computed.clip_from_view = projection.get_clip_from_view();1002camera1003}10041005#[test]1006fn viewport_to_world_orthographic_3d_returns_forward() {1007let transform = GlobalTransform::default();1008let size = Vec2::new(1600.0, 900.0);1009let camera = make_camera(1010Projection::Orthographic(OrthographicProjection::default_3d()),1011size,1012);1013let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();1014assert_eq!(ray.direction, transform.forward());1015assert!(ray1016.origin1017.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));1018let ray = camera.viewport_to_world(&transform, size).unwrap();1019assert_eq!(ray.direction, transform.forward());1020assert!(ray1021.origin1022.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));1023}10241025#[test]1026fn viewport_to_world_orthographic_2d_returns_forward() {1027let transform = GlobalTransform::default();1028let size = Vec2::new(1600.0, 900.0);1029let camera = make_camera(1030Projection::Orthographic(OrthographicProjection::default_2d()),1031size,1032);1033let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();1034assert_eq!(ray.direction, transform.forward());1035assert!(ray1036.origin1037.abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));1038let ray = camera.viewport_to_world(&transform, size).unwrap();1039assert_eq!(ray.direction, transform.forward());1040assert!(ray1041.origin1042.abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));1043}10441045#[test]1046fn viewport_to_world_perspective_center_returns_forward() {1047let transform = GlobalTransform::default();1048let size = Vec2::new(1600.0, 900.0);1049let camera = make_camera(1050Projection::Perspective(PerspectiveProjection::default()),1051size,1052);1053let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();1054assert_eq!(ray.direction, transform.forward());1055assert_eq!(ray.origin, transform.forward() * 0.1);1056}1057}105810591060