Path: blob/21.2-virgl/src/gallium/winsys/svga/drm/vmwgfx_drm.h
4573 views
/**************************************************************************1*2* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA3* All Rights Reserved.4*5* Permission is hereby granted, free of charge, to any person obtaining a6* copy of this software and associated documentation files (the7* "Software"), to deal in the Software without restriction, including8* without limitation the rights to use, copy, modify, merge, publish,9* distribute, sub license, and/or sell copies of the Software, and to10* permit persons to whom the Software is furnished to do so, subject to11* the following conditions:12*13* The above copyright notice and this permission notice (including the14* next paragraph) shall be included in all copies or substantial portions15* of the Software.16*17* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR18* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,19* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL20* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,21* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR22* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE23* USE OR OTHER DEALINGS IN THE SOFTWARE.24*25**************************************************************************/2627#ifndef __VMWGFX_DRM_H__28#define __VMWGFX_DRM_H__2930#include "drm.h"3132#if defined(__cplusplus)33extern "C" {34#endif3536#define DRM_VMW_MAX_SURFACE_FACES 637#define DRM_VMW_MAX_MIP_LEVELS 24383940#define DRM_VMW_GET_PARAM 041#define DRM_VMW_ALLOC_DMABUF 142#define DRM_VMW_ALLOC_BO 143#define DRM_VMW_UNREF_DMABUF 244#define DRM_VMW_HANDLE_CLOSE 245#define DRM_VMW_CURSOR_BYPASS 346/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/47#define DRM_VMW_CONTROL_STREAM 448#define DRM_VMW_CLAIM_STREAM 549#define DRM_VMW_UNREF_STREAM 650/* guarded by DRM_VMW_PARAM_3D == 1 */51#define DRM_VMW_CREATE_CONTEXT 752#define DRM_VMW_UNREF_CONTEXT 853#define DRM_VMW_CREATE_SURFACE 954#define DRM_VMW_UNREF_SURFACE 1055#define DRM_VMW_REF_SURFACE 1156#define DRM_VMW_EXECBUF 1257#define DRM_VMW_GET_3D_CAP 1358#define DRM_VMW_FENCE_WAIT 1459#define DRM_VMW_FENCE_SIGNALED 1560#define DRM_VMW_FENCE_UNREF 1661#define DRM_VMW_FENCE_EVENT 1762#define DRM_VMW_PRESENT 1863#define DRM_VMW_PRESENT_READBACK 1964#define DRM_VMW_UPDATE_LAYOUT 2065#define DRM_VMW_CREATE_SHADER 2166#define DRM_VMW_UNREF_SHADER 2267#define DRM_VMW_GB_SURFACE_CREATE 2368#define DRM_VMW_GB_SURFACE_REF 2469#define DRM_VMW_SYNCCPU 2570#define DRM_VMW_CREATE_EXTENDED_CONTEXT 2671#define DRM_VMW_GB_SURFACE_CREATE_EXT 2772#define DRM_VMW_GB_SURFACE_REF_EXT 2873#define DRM_VMW_MSG 297475/*************************************************************************/76/**77* DRM_VMW_GET_PARAM - get device information.78*79* DRM_VMW_PARAM_FIFO_OFFSET:80* Offset to use to map the first page of the FIFO read-only.81* The fifo is mapped using the mmap() system call on the drm device.82*83* DRM_VMW_PARAM_OVERLAY_IOCTL:84* Does the driver support the overlay ioctl.85*86* DRM_VMW_PARAM_SM4_187* SM4_1 support is enabled.88*/8990#define DRM_VMW_PARAM_NUM_STREAMS 091#define DRM_VMW_PARAM_NUM_FREE_STREAMS 192#define DRM_VMW_PARAM_3D 293#define DRM_VMW_PARAM_HW_CAPS 394#define DRM_VMW_PARAM_FIFO_CAPS 495#define DRM_VMW_PARAM_MAX_FB_SIZE 596#define DRM_VMW_PARAM_FIFO_HW_VERSION 697#define DRM_VMW_PARAM_MAX_SURF_MEMORY 798#define DRM_VMW_PARAM_3D_CAPS_SIZE 899#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9100#define DRM_VMW_PARAM_MAX_MOB_SIZE 10101#define DRM_VMW_PARAM_SCREEN_TARGET 11102#define DRM_VMW_PARAM_DX 12103#define DRM_VMW_PARAM_HW_CAPS2 13104#define DRM_VMW_PARAM_SM4_1 14105#define DRM_VMW_PARAM_SM5 15106107/**108* enum drm_vmw_handle_type - handle type for ref ioctls109*110*/111enum drm_vmw_handle_type {112DRM_VMW_HANDLE_LEGACY = 0,113DRM_VMW_HANDLE_PRIME = 1114};115116/**117* struct drm_vmw_getparam_arg118*119* @value: Returned value. //Out120* @param: Parameter to query. //In.121*122* Argument to the DRM_VMW_GET_PARAM Ioctl.123*/124125struct drm_vmw_getparam_arg {126__u64 value;127__u32 param;128__u32 pad64;129};130131/*************************************************************************/132/**133* DRM_VMW_CREATE_CONTEXT - Create a host context.134*135* Allocates a device unique context id, and queues a create context command136* for the host. Does not wait for host completion.137*/138139/**140* struct drm_vmw_context_arg141*142* @cid: Device unique context ID.143*144* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.145* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.146*/147148struct drm_vmw_context_arg {149__s32 cid;150__u32 pad64;151};152153/*************************************************************************/154/**155* DRM_VMW_UNREF_CONTEXT - Create a host context.156*157* Frees a global context id, and queues a destroy host command for the host.158* Does not wait for host completion. The context ID can be used directly159* in the command stream and shows up as the same context ID on the host.160*/161162/*************************************************************************/163/**164* DRM_VMW_CREATE_SURFACE - Create a host suface.165*166* Allocates a device unique surface id, and queues a create surface command167* for the host. Does not wait for host completion. The surface ID can be168* used directly in the command stream and shows up as the same surface169* ID on the host.170*/171172/**173* struct drm_wmv_surface_create_req174*175* @flags: Surface flags as understood by the host.176* @format: Surface format as understood by the host.177* @mip_levels: Number of mip levels for each face.178* An unused face should have 0 encoded.179* @size_addr: Address of a user-space array of sruct drm_vmw_size180* cast to an __u64 for 32-64 bit compatibility.181* The size of the array should equal the total number of mipmap levels.182* @shareable: Boolean whether other clients (as identified by file descriptors)183* may reference this surface.184* @scanout: Boolean whether the surface is intended to be used as a185* scanout.186*187* Input data to the DRM_VMW_CREATE_SURFACE Ioctl.188* Output data from the DRM_VMW_REF_SURFACE Ioctl.189*/190191struct drm_vmw_surface_create_req {192__u32 flags;193__u32 format;194__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];195__u64 size_addr;196__s32 shareable;197__s32 scanout;198};199200/**201* struct drm_wmv_surface_arg202*203* @sid: Surface id of created surface or surface to destroy or reference.204* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.205*206* Output data from the DRM_VMW_CREATE_SURFACE Ioctl.207* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.208* Input argument to the DRM_VMW_REF_SURFACE Ioctl.209*/210211struct drm_vmw_surface_arg {212__s32 sid;213enum drm_vmw_handle_type handle_type;214};215216/**217* struct drm_vmw_size ioctl.218*219* @width - mip level width220* @height - mip level height221* @depth - mip level depth222*223* Description of a mip level.224* Input data to the DRM_WMW_CREATE_SURFACE Ioctl.225*/226227struct drm_vmw_size {228__u32 width;229__u32 height;230__u32 depth;231__u32 pad64;232};233234/**235* union drm_vmw_surface_create_arg236*237* @rep: Output data as described above.238* @req: Input data as described above.239*240* Argument to the DRM_VMW_CREATE_SURFACE Ioctl.241*/242243union drm_vmw_surface_create_arg {244struct drm_vmw_surface_arg rep;245struct drm_vmw_surface_create_req req;246};247248/*************************************************************************/249/**250* DRM_VMW_REF_SURFACE - Reference a host surface.251*252* Puts a reference on a host surface with a give sid, as previously253* returned by the DRM_VMW_CREATE_SURFACE ioctl.254* A reference will make sure the surface isn't destroyed while we hold255* it and will allow the calling client to use the surface ID in the command256* stream.257*258* On successful return, the Ioctl returns the surface information given259* in the DRM_VMW_CREATE_SURFACE ioctl.260*/261262/**263* union drm_vmw_surface_reference_arg264*265* @rep: Output data as described above.266* @req: Input data as described above.267*268* Argument to the DRM_VMW_REF_SURFACE Ioctl.269*/270271union drm_vmw_surface_reference_arg {272struct drm_vmw_surface_create_req rep;273struct drm_vmw_surface_arg req;274};275276/*************************************************************************/277/**278* DRM_VMW_UNREF_SURFACE - Unreference a host surface.279*280* Clear a reference previously put on a host surface.281* When all references are gone, including the one implicitly placed282* on creation,283* a destroy surface command will be queued for the host.284* Does not wait for completion.285*/286287/*************************************************************************/288/**289* DRM_VMW_EXECBUF290*291* Submit a command buffer for execution on the host, and return a292* fence seqno that when signaled, indicates that the command buffer has293* executed.294*/295296/**297* struct drm_vmw_execbuf_arg298*299* @commands: User-space address of a command buffer cast to an __u64.300* @command-size: Size in bytes of the command buffer.301* @throttle-us: Sleep until software is less than @throttle_us302* microseconds ahead of hardware. The driver may round this value303* to the nearest kernel tick.304* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an305* __u64.306* @version: Allows expanding the execbuf ioctl parameters without breaking307* backwards compatibility, since user-space will always tell the kernel308* which version it uses.309* @flags: Execbuf flags.310* @imported_fence_fd: FD for a fence imported from another device311*312* Argument to the DRM_VMW_EXECBUF Ioctl.313*/314315#define DRM_VMW_EXECBUF_VERSION 2316317#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)318#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)319320struct drm_vmw_execbuf_arg {321__u64 commands;322__u32 command_size;323__u32 throttle_us;324__u64 fence_rep;325__u32 version;326__u32 flags;327__u32 context_handle;328__s32 imported_fence_fd;329};330331/**332* struct drm_vmw_fence_rep333*334* @handle: Fence object handle for fence associated with a command submission.335* @mask: Fence flags relevant for this fence object.336* @seqno: Fence sequence number in fifo. A fence object with a lower337* seqno will signal the EXEC flag before a fence object with a higher338* seqno. This can be used by user-space to avoid kernel calls to determine339* whether a fence has signaled the EXEC flag. Note that @seqno will340* wrap at 32-bit.341* @passed_seqno: The highest seqno number processed by the hardware342* so far. This can be used to mark user-space fence objects as signaled, and343* to determine whether a fence seqno might be stale.344* @fd: FD associated with the fence, -1 if not exported345* @error: This member should've been set to -EFAULT on submission.346* The following actions should be take on completion:347* error == -EFAULT: Fence communication failed. The host is synchronized.348* Use the last fence id read from the FIFO fence register.349* error != 0 && error != -EFAULT:350* Fence submission failed. The host is synchronized. Use the fence_seq member.351* error == 0: All is OK, The host may not be synchronized.352* Use the fence_seq member.353*354* Input / Output data to the DRM_VMW_EXECBUF Ioctl.355*/356357struct drm_vmw_fence_rep {358__u32 handle;359__u32 mask;360__u32 seqno;361__u32 passed_seqno;362__s32 fd;363__s32 error;364};365366/*************************************************************************/367/**368* DRM_VMW_ALLOC_BO369*370* Allocate a buffer object that is visible also to the host.371* NOTE: The buffer is372* identified by a handle and an offset, which are private to the guest, but373* useable in the command stream. The guest kernel may translate these374* and patch up the command stream accordingly. In the future, the offset may375* be zero at all times, or it may disappear from the interface before it is376* fixed.377*378* The buffer object may stay user-space mapped in the guest at all times,379* and is thus suitable for sub-allocation.380*381* Buffer objects are mapped using the mmap() syscall on the drm device.382*/383384/**385* struct drm_vmw_alloc_bo_req386*387* @size: Required minimum size of the buffer.388*389* Input data to the DRM_VMW_ALLOC_BO Ioctl.390*/391392struct drm_vmw_alloc_bo_req {393__u32 size;394__u32 pad64;395};396#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req397398/**399* struct drm_vmw_bo_rep400*401* @map_handle: Offset to use in the mmap() call used to map the buffer.402* @handle: Handle unique to this buffer. Used for unreferencing.403* @cur_gmr_id: GMR id to use in the command stream when this buffer is404* referenced. See not above.405* @cur_gmr_offset: Offset to use in the command stream when this buffer is406* referenced. See note above.407*408* Output data from the DRM_VMW_ALLOC_BO Ioctl.409*/410411struct drm_vmw_bo_rep {412__u64 map_handle;413__u32 handle;414__u32 cur_gmr_id;415__u32 cur_gmr_offset;416__u32 pad64;417};418#define drm_vmw_dmabuf_rep drm_vmw_bo_rep419420/**421* union drm_vmw_alloc_bo_arg422*423* @req: Input data as described above.424* @rep: Output data as described above.425*426* Argument to the DRM_VMW_ALLOC_BO Ioctl.427*/428429union drm_vmw_alloc_bo_arg {430struct drm_vmw_alloc_bo_req req;431struct drm_vmw_bo_rep rep;432};433#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg434435/*************************************************************************/436/**437* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.438*439* This IOCTL controls the overlay units of the svga device.440* The SVGA overlay units does not work like regular hardware units in441* that they do not automaticaly read back the contents of the given dma442* buffer. But instead only read back for each call to this ioctl, and443* at any point between this call being made and a following call that444* either changes the buffer or disables the stream.445*/446447/**448* struct drm_vmw_rect449*450* Defines a rectangle. Used in the overlay ioctl to define451* source and destination rectangle.452*/453454struct drm_vmw_rect {455__s32 x;456__s32 y;457__u32 w;458__u32 h;459};460461/**462* struct drm_vmw_control_stream_arg463*464* @stream_id: Stearm to control465* @enabled: If false all following arguments are ignored.466* @handle: Handle to buffer for getting data from.467* @format: Format of the overlay as understood by the host.468* @width: Width of the overlay.469* @height: Height of the overlay.470* @size: Size of the overlay in bytes.471* @pitch: Array of pitches, the two last are only used for YUV12 formats.472* @offset: Offset from start of dma buffer to overlay.473* @src: Source rect, must be within the defined area above.474* @dst: Destination rect, x and y may be negative.475*476* Argument to the DRM_VMW_CONTROL_STREAM Ioctl.477*/478479struct drm_vmw_control_stream_arg {480__u32 stream_id;481__u32 enabled;482483__u32 flags;484__u32 color_key;485486__u32 handle;487__u32 offset;488__s32 format;489__u32 size;490__u32 width;491__u32 height;492__u32 pitch[3];493494__u32 pad64;495struct drm_vmw_rect src;496struct drm_vmw_rect dst;497};498499/*************************************************************************/500/**501* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.502*503*/504505#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)506#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)507508/**509* struct drm_vmw_cursor_bypass_arg510*511* @flags: Flags.512* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.513* @xpos: X position of cursor.514* @ypos: Y position of cursor.515* @xhot: X hotspot.516* @yhot: Y hotspot.517*518* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.519*/520521struct drm_vmw_cursor_bypass_arg {522__u32 flags;523__u32 crtc_id;524__s32 xpos;525__s32 ypos;526__s32 xhot;527__s32 yhot;528};529530/*************************************************************************/531/**532* DRM_VMW_CLAIM_STREAM - Claim a single stream.533*/534535/**536* struct drm_vmw_context_arg537*538* @stream_id: Device unique context ID.539*540* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.541* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.542*/543544struct drm_vmw_stream_arg {545__u32 stream_id;546__u32 pad64;547};548549/*************************************************************************/550/**551* DRM_VMW_UNREF_STREAM - Unclaim a stream.552*553* Return a single stream that was claimed by this process. Also makes554* sure that the stream has been stopped.555*/556557/*************************************************************************/558/**559* DRM_VMW_GET_3D_CAP560*561* Read 3D capabilities from the FIFO562*563*/564565/**566* struct drm_vmw_get_3d_cap_arg567*568* @buffer: Pointer to a buffer for capability data, cast to an __u64569* @size: Max size to copy570*571* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL572* ioctls.573*/574575struct drm_vmw_get_3d_cap_arg {576__u64 buffer;577__u32 max_size;578__u32 pad64;579};580581/*************************************************************************/582/**583* DRM_VMW_FENCE_WAIT584*585* Waits for a fence object to signal. The wait is interruptible, so that586* signals may be delivered during the interrupt. The wait may timeout,587* in which case the calls returns -EBUSY. If the wait is restarted,588* that is restarting without resetting @cookie_valid to zero,589* the timeout is computed from the first call.590*591* The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait592* on:593* DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command594* stream595* have executed.596* DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish597* commands598* in the buffer given to the EXECBUF ioctl returning the fence object handle599* are available to user-space.600*601* DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the602* fenc wait ioctl returns 0, the fence object has been unreferenced after603* the wait.604*/605606#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)607#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)608609#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)610611/**612* struct drm_vmw_fence_wait_arg613*614* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.615* @cookie_valid: Must be reset to 0 on first call. Left alone on restart.616* @kernel_cookie: Set to 0 on first call. Left alone on restart.617* @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.618* @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick619* before returning.620* @flags: Fence flags to wait on.621* @wait_options: Options that control the behaviour of the wait ioctl.622*623* Input argument to the DRM_VMW_FENCE_WAIT ioctl.624*/625626struct drm_vmw_fence_wait_arg {627__u32 handle;628__s32 cookie_valid;629__u64 kernel_cookie;630__u64 timeout_us;631__s32 lazy;632__s32 flags;633__s32 wait_options;634__s32 pad64;635};636637/*************************************************************************/638/**639* DRM_VMW_FENCE_SIGNALED640*641* Checks if a fence object is signaled..642*/643644/**645* struct drm_vmw_fence_signaled_arg646*647* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.648* @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl649* @signaled: Out: Flags signaled.650* @sequence: Out: Highest sequence passed so far. Can be used to signal the651* EXEC flag of user-space fence objects.652*653* Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF654* ioctls.655*/656657struct drm_vmw_fence_signaled_arg {658__u32 handle;659__u32 flags;660__s32 signaled;661__u32 passed_seqno;662__u32 signaled_flags;663__u32 pad64;664};665666/*************************************************************************/667/**668* DRM_VMW_FENCE_UNREF669*670* Unreferences a fence object, and causes it to be destroyed if there are no671* other references to it.672*673*/674675/**676* struct drm_vmw_fence_arg677*678* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.679*680* Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..681*/682683struct drm_vmw_fence_arg {684__u32 handle;685__u32 pad64;686};687688689/*************************************************************************/690/**691* DRM_VMW_FENCE_EVENT692*693* Queues an event on a fence to be delivered on the drm character device694* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.695* Optionally the approximate time when the fence signaled is696* given by the event.697*/698699/*700* The event type701*/702#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000703704struct drm_vmw_event_fence {705struct drm_event base;706__u64 user_data;707__u32 tv_sec;708__u32 tv_usec;709};710711/*712* Flags that may be given to the command.713*/714/* Request fence signaled time on the event. */715#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)716717/**718* struct drm_vmw_fence_event_arg719*720* @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if721* the fence is not supposed to be referenced by user-space.722* @user_info: Info to be delivered with the event.723* @handle: Attach the event to this fence only.724* @flags: A set of flags as defined above.725*/726struct drm_vmw_fence_event_arg {727__u64 fence_rep;728__u64 user_data;729__u32 handle;730__u32 flags;731};732733734/*************************************************************************/735/**736* DRM_VMW_PRESENT737*738* Executes an SVGA present on a given fb for a given surface. The surface739* is placed on the framebuffer. Cliprects are given relative to the given740* point (the point disignated by dest_{x|y}).741*742*/743744/**745* struct drm_vmw_present_arg746* @fb_id: framebuffer id to present / read back from.747* @sid: Surface id to present from.748* @dest_x: X placement coordinate for surface.749* @dest_y: Y placement coordinate for surface.750* @clips_ptr: Pointer to an array of clip rects cast to an __u64.751* @num_clips: Number of cliprects given relative to the framebuffer origin,752* in the same coordinate space as the frame buffer.753* @pad64: Unused 64-bit padding.754*755* Input argument to the DRM_VMW_PRESENT ioctl.756*/757758struct drm_vmw_present_arg {759__u32 fb_id;760__u32 sid;761__s32 dest_x;762__s32 dest_y;763__u64 clips_ptr;764__u32 num_clips;765__u32 pad64;766};767768769/*************************************************************************/770/**771* DRM_VMW_PRESENT_READBACK772*773* Executes an SVGA present readback from a given fb to the dma buffer774* currently bound as the fb. If there is no dma buffer bound to the fb,775* an error will be returned.776*777*/778779/**780* struct drm_vmw_present_arg781* @fb_id: fb_id to present / read back from.782* @num_clips: Number of cliprects.783* @clips_ptr: Pointer to an array of clip rects cast to an __u64.784* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.785* If this member is NULL, then the ioctl should not return a fence.786*/787788struct drm_vmw_present_readback_arg {789__u32 fb_id;790__u32 num_clips;791__u64 clips_ptr;792__u64 fence_rep;793};794795/*************************************************************************/796/**797* DRM_VMW_UPDATE_LAYOUT - Update layout798*799* Updates the preferred modes and connection status for connectors. The800* command consists of one drm_vmw_update_layout_arg pointing to an array801* of num_outputs drm_vmw_rect's.802*/803804/**805* struct drm_vmw_update_layout_arg806*807* @num_outputs: number of active connectors808* @rects: pointer to array of drm_vmw_rect cast to an __u64809*810* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.811*/812struct drm_vmw_update_layout_arg {813__u32 num_outputs;814__u32 pad64;815__u64 rects;816};817818819/*************************************************************************/820/**821* DRM_VMW_CREATE_SHADER - Create shader822*823* Creates a shader and optionally binds it to a dma buffer containing824* the shader byte-code.825*/826827/**828* enum drm_vmw_shader_type - Shader types829*/830enum drm_vmw_shader_type {831drm_vmw_shader_type_vs = 0,832drm_vmw_shader_type_ps,833};834835836/**837* struct drm_vmw_shader_create_arg838*839* @shader_type: Shader type of the shader to create.840* @size: Size of the byte-code in bytes.841* where the shader byte-code starts842* @buffer_handle: Buffer handle identifying the buffer containing the843* shader byte-code844* @shader_handle: On successful completion contains a handle that845* can be used to subsequently identify the shader.846* @offset: Offset in bytes into the buffer given by @buffer_handle,847*848* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.849*/850struct drm_vmw_shader_create_arg {851enum drm_vmw_shader_type shader_type;852__u32 size;853__u32 buffer_handle;854__u32 shader_handle;855__u64 offset;856};857858/*************************************************************************/859/**860* DRM_VMW_UNREF_SHADER - Unreferences a shader861*862* Destroys a user-space reference to a shader, optionally destroying863* it.864*/865866/**867* struct drm_vmw_shader_arg868*869* @handle: Handle identifying the shader to destroy.870*871* Input argument to the DRM_VMW_UNREF_SHADER ioctl.872*/873struct drm_vmw_shader_arg {874__u32 handle;875__u32 pad64;876};877878/*************************************************************************/879/**880* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.881*882* Allocates a surface handle and queues a create surface command883* for the host on the first use of the surface. The surface ID can884* be used as the surface ID in commands referencing the surface.885*/886887/**888* enum drm_vmw_surface_flags889*890* @drm_vmw_surface_flag_shareable: Whether the surface is shareable891* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout892* surface.893* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is894* given.895* @drm_vmw_surface_flag_coherent: Back surface with coherent memory.896*/897enum drm_vmw_surface_flags {898drm_vmw_surface_flag_shareable = (1 << 0),899drm_vmw_surface_flag_scanout = (1 << 1),900drm_vmw_surface_flag_create_buffer = (1 << 2),901drm_vmw_surface_flag_coherent = (1 << 3),902};903904/**905* struct drm_vmw_gb_surface_create_req906*907* @svga3d_flags: SVGA3d surface flags for the device.908* @format: SVGA3d format.909* @mip_level: Number of mip levels for all faces.910* @drm_surface_flags Flags as described above.911* @multisample_count Future use. Set to 0.912* @autogen_filter Future use. Set to 0.913* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID914* if none.915* @base_size Size of the base mip level for all faces.916* @array_size Must be zero for non-DX hardware, and if non-zero917* svga3d_flags must have proper bind flags setup.918*919* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.920* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.921*/922struct drm_vmw_gb_surface_create_req {923__u32 svga3d_flags;924__u32 format;925__u32 mip_levels;926enum drm_vmw_surface_flags drm_surface_flags;927__u32 multisample_count;928__u32 autogen_filter;929__u32 buffer_handle;930__u32 array_size;931struct drm_vmw_size base_size;932};933934/**935* struct drm_vmw_gb_surface_create_rep936*937* @handle: Surface handle.938* @backup_size: Size of backup buffers for this surface.939* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.940* @buffer_size: Actual size of the buffer identified by941* @buffer_handle942* @buffer_map_handle: Offset into device address space for the buffer943* identified by @buffer_handle.944*945* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.946* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.947*/948struct drm_vmw_gb_surface_create_rep {949__u32 handle;950__u32 backup_size;951__u32 buffer_handle;952__u32 buffer_size;953__u64 buffer_map_handle;954};955956/**957* union drm_vmw_gb_surface_create_arg958*959* @req: Input argument as described above.960* @rep: Output argument as described above.961*962* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.963*/964union drm_vmw_gb_surface_create_arg {965struct drm_vmw_gb_surface_create_rep rep;966struct drm_vmw_gb_surface_create_req req;967};968969/*************************************************************************/970/**971* DRM_VMW_GB_SURFACE_REF - Reference a host surface.972*973* Puts a reference on a host surface with a given handle, as previously974* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.975* A reference will make sure the surface isn't destroyed while we hold976* it and will allow the calling client to use the surface handle in977* the command stream.978*979* On successful return, the Ioctl returns the surface information given980* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.981*/982983/**984* struct drm_vmw_gb_surface_reference_arg985*986* @creq: The data used as input when the surface was created, as described987* above at "struct drm_vmw_gb_surface_create_req"988* @crep: Additional data output when the surface was created, as described989* above at "struct drm_vmw_gb_surface_create_rep"990*991* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.992*/993struct drm_vmw_gb_surface_ref_rep {994struct drm_vmw_gb_surface_create_req creq;995struct drm_vmw_gb_surface_create_rep crep;996};997998/**999* union drm_vmw_gb_surface_reference_arg1000*1001* @req: Input data as described above at "struct drm_vmw_surface_arg"1002* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"1003*1004* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.1005*/1006union drm_vmw_gb_surface_reference_arg {1007struct drm_vmw_gb_surface_ref_rep rep;1008struct drm_vmw_surface_arg req;1009};101010111012/*************************************************************************/1013/**1014* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.1015*1016* Idles any previously submitted GPU operations on the buffer and1017* by default blocks command submissions that reference the buffer.1018* If the file descriptor used to grab a blocking CPU sync is closed, the1019* cpu sync is released.1020* The flags argument indicates how the grab / release operation should be1021* performed:1022*/10231024/**1025* enum drm_vmw_synccpu_flags - Synccpu flags:1026*1027* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a1028* hint to the kernel to allow command submissions that references the buffer1029* for read-only.1030* @drm_vmw_synccpu_write: Sync for write. Block all command submissions1031* referencing this buffer.1032* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return1033* -EBUSY should the buffer be busy.1034* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer1035* while the buffer is synced for CPU. This is similar to the GEM bo idle1036* behavior.1037*/1038enum drm_vmw_synccpu_flags {1039drm_vmw_synccpu_read = (1 << 0),1040drm_vmw_synccpu_write = (1 << 1),1041drm_vmw_synccpu_dontblock = (1 << 2),1042drm_vmw_synccpu_allow_cs = (1 << 3)1043};10441045/**1046* enum drm_vmw_synccpu_op - Synccpu operations:1047*1048* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations1049* @drm_vmw_synccpu_release: Release a previous grab.1050*/1051enum drm_vmw_synccpu_op {1052drm_vmw_synccpu_grab,1053drm_vmw_synccpu_release1054};10551056/**1057* struct drm_vmw_synccpu_arg1058*1059* @op: The synccpu operation as described above.1060* @handle: Handle identifying the buffer object.1061* @flags: Flags as described above.1062*/1063struct drm_vmw_synccpu_arg {1064enum drm_vmw_synccpu_op op;1065enum drm_vmw_synccpu_flags flags;1066__u32 handle;1067__u32 pad64;1068};10691070/*************************************************************************/1071/**1072* DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.1073*1074* Allocates a device unique context id, and queues a create context command1075* for the host. Does not wait for host completion.1076*/1077enum drm_vmw_extended_context {1078drm_vmw_context_legacy,1079drm_vmw_context_dx1080};10811082/**1083* union drm_vmw_extended_context_arg1084*1085* @req: Context type.1086* @rep: Context identifier.1087*1088* Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.1089*/1090union drm_vmw_extended_context_arg {1091enum drm_vmw_extended_context req;1092struct drm_vmw_context_arg rep;1093};10941095/*************************************************************************/1096/*1097* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its1098* underlying resource.1099*1100* Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF1101* Ioctl.1102*/11031104/**1105* struct drm_vmw_handle_close_arg1106*1107* @handle: Handle to close.1108*1109* Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.1110*/1111struct drm_vmw_handle_close_arg {1112__u32 handle;1113__u32 pad64;1114};1115#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg11161117/*************************************************************************/1118/**1119* DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.1120*1121* Allocates a surface handle and queues a create surface command1122* for the host on the first use of the surface. The surface ID can1123* be used as the surface ID in commands referencing the surface.1124*1125* This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version1126* parameter and 64 bit svga flag.1127*/11281129/**1130* enum drm_vmw_surface_version1131*1132* @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with1133* svga3d surface flags split into 2, upper half and lower half.1134*/1135enum drm_vmw_surface_version {1136drm_vmw_gb_surface_v11137};11381139/**1140* struct drm_vmw_gb_surface_create_ext_req1141*1142* @base: Surface create parameters.1143* @version: Version of surface create ioctl.1144* @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.1145* @multisample_pattern: Multisampling pattern when msaa is supported.1146* @quality_level: Precision settings for each sample.1147* @buffer_byte_stride: Buffer byte stride.1148* @must_be_zero: Reserved for future usage.1149*1150* Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.1151* Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.1152*/1153struct drm_vmw_gb_surface_create_ext_req {1154struct drm_vmw_gb_surface_create_req base;1155enum drm_vmw_surface_version version;1156__u32 svga3d_flags_upper_32_bits;1157__u32 multisample_pattern;1158__u32 quality_level;1159__u32 buffer_byte_stride;1160__u32 must_be_zero;1161};11621163/**1164* union drm_vmw_gb_surface_create_ext_arg1165*1166* @req: Input argument as described above.1167* @rep: Output argument as described above.1168*1169* Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.1170*/1171union drm_vmw_gb_surface_create_ext_arg {1172struct drm_vmw_gb_surface_create_rep rep;1173struct drm_vmw_gb_surface_create_ext_req req;1174};11751176/*************************************************************************/1177/**1178* DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.1179*1180* Puts a reference on a host surface with a given handle, as previously1181* returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.1182* A reference will make sure the surface isn't destroyed while we hold1183* it and will allow the calling client to use the surface handle in1184* the command stream.1185*1186* On successful return, the Ioctl returns the surface information given1187* to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.1188*/11891190/**1191* struct drm_vmw_gb_surface_ref_ext_rep1192*1193* @creq: The data used as input when the surface was created, as described1194* above at "struct drm_vmw_gb_surface_create_ext_req"1195* @crep: Additional data output when the surface was created, as described1196* above at "struct drm_vmw_gb_surface_create_rep"1197*1198* Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.1199*/1200struct drm_vmw_gb_surface_ref_ext_rep {1201struct drm_vmw_gb_surface_create_ext_req creq;1202struct drm_vmw_gb_surface_create_rep crep;1203};12041205/**1206* union drm_vmw_gb_surface_reference_ext_arg1207*1208* @req: Input data as described above at "struct drm_vmw_surface_arg"1209* @rep: Output data as described above at1210* "struct drm_vmw_gb_surface_ref_ext_rep"1211*1212* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.1213*/1214union drm_vmw_gb_surface_reference_ext_arg {1215struct drm_vmw_gb_surface_ref_ext_rep rep;1216struct drm_vmw_surface_arg req;1217};12181219/**1220* struct drm_vmw_msg_arg1221*1222* @send: Pointer to user-space msg string (null terminated).1223* @receive: Pointer to user-space receive buffer.1224* @send_only: Boolean whether this is only sending or receiving too.1225*1226* Argument to the DRM_VMW_MSG ioctl.1227*/1228struct drm_vmw_msg_arg {1229__u64 send;1230__u64 receive;1231__s32 send_only;1232__u32 receive_len;1233};12341235#if defined(__cplusplus)1236}1237#endif12381239#endif124012411242