/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */1/**************************************************************************2*3* Copyright © 2009-2023 VMware, Inc., Palo Alto, CA., USA4* All Rights Reserved.5*6* Permission is hereby granted, free of charge, to any person obtaining a7* copy of this software and associated documentation files (the8* "Software"), to deal in the Software without restriction, including9* without limitation the rights to use, copy, modify, merge, publish,10* distribute, sub license, and/or sell copies of the Software, and to11* permit persons to whom the Software is furnished to do so, subject to12* the following conditions:13*14* The above copyright notice and this permission notice (including the15* next paragraph) shall be included in all copies or substantial portions16* of the Software.17*18* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR19* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,20* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL21* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,22* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR23* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE24* USE OR OTHER DEALINGS IN THE SOFTWARE.25*26**************************************************************************/2728#ifndef __VMWGFX_DRM_H__29#define __VMWGFX_DRM_H__3031#include "drm.h"3233#if defined(__cplusplus)34extern "C" {35#endif3637#define DRM_VMW_MAX_SURFACE_FACES 638#define DRM_VMW_MAX_MIP_LEVELS 24394041#define DRM_VMW_GET_PARAM 042#define DRM_VMW_ALLOC_DMABUF 143#define DRM_VMW_ALLOC_BO 144#define DRM_VMW_UNREF_DMABUF 245#define DRM_VMW_HANDLE_CLOSE 246#define DRM_VMW_CURSOR_BYPASS 347/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/48#define DRM_VMW_CONTROL_STREAM 449#define DRM_VMW_CLAIM_STREAM 550#define DRM_VMW_UNREF_STREAM 651/* guarded by DRM_VMW_PARAM_3D == 1 */52#define DRM_VMW_CREATE_CONTEXT 753#define DRM_VMW_UNREF_CONTEXT 854#define DRM_VMW_CREATE_SURFACE 955#define DRM_VMW_UNREF_SURFACE 1056#define DRM_VMW_REF_SURFACE 1157#define DRM_VMW_EXECBUF 1258#define DRM_VMW_GET_3D_CAP 1359#define DRM_VMW_FENCE_WAIT 1460#define DRM_VMW_FENCE_SIGNALED 1561#define DRM_VMW_FENCE_UNREF 1662#define DRM_VMW_FENCE_EVENT 1763#define DRM_VMW_PRESENT 1864#define DRM_VMW_PRESENT_READBACK 1965#define DRM_VMW_UPDATE_LAYOUT 2066#define DRM_VMW_CREATE_SHADER 2167#define DRM_VMW_UNREF_SHADER 2268#define DRM_VMW_GB_SURFACE_CREATE 2369#define DRM_VMW_GB_SURFACE_REF 2470#define DRM_VMW_SYNCCPU 2571#define DRM_VMW_CREATE_EXTENDED_CONTEXT 2672#define DRM_VMW_GB_SURFACE_CREATE_EXT 2773#define DRM_VMW_GB_SURFACE_REF_EXT 2874#define DRM_VMW_MSG 2975#define DRM_VMW_MKSSTAT_RESET 3076#define DRM_VMW_MKSSTAT_ADD 3177#define DRM_VMW_MKSSTAT_REMOVE 327879/*************************************************************************/80/**81* DRM_VMW_GET_PARAM - get device information.82*83* DRM_VMW_PARAM_FIFO_OFFSET:84* Offset to use to map the first page of the FIFO read-only.85* The fifo is mapped using the mmap() system call on the drm device.86*87* DRM_VMW_PARAM_OVERLAY_IOCTL:88* Does the driver support the overlay ioctl.89*90* DRM_VMW_PARAM_SM4_191* SM4_1 support is enabled.92*93* DRM_VMW_PARAM_SM594* SM5 support is enabled.95*96* DRM_VMW_PARAM_GL4397* SM5.1+GL4.3 support is enabled.98*99* DRM_VMW_PARAM_DEVICE_ID100* PCI ID of the underlying SVGA device.101*/102103#define DRM_VMW_PARAM_NUM_STREAMS 0104#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1105#define DRM_VMW_PARAM_3D 2106#define DRM_VMW_PARAM_HW_CAPS 3107#define DRM_VMW_PARAM_FIFO_CAPS 4108#define DRM_VMW_PARAM_MAX_FB_SIZE 5109#define DRM_VMW_PARAM_FIFO_HW_VERSION 6110#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7111#define DRM_VMW_PARAM_3D_CAPS_SIZE 8112#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9113#define DRM_VMW_PARAM_MAX_MOB_SIZE 10114#define DRM_VMW_PARAM_SCREEN_TARGET 11115#define DRM_VMW_PARAM_DX 12116#define DRM_VMW_PARAM_HW_CAPS2 13117#define DRM_VMW_PARAM_SM4_1 14118#define DRM_VMW_PARAM_SM5 15119#define DRM_VMW_PARAM_GL43 16120#define DRM_VMW_PARAM_DEVICE_ID 17121122/**123* enum drm_vmw_handle_type - handle type for ref ioctls124*125*/126enum drm_vmw_handle_type {127DRM_VMW_HANDLE_LEGACY = 0,128DRM_VMW_HANDLE_PRIME = 1129};130131/**132* struct drm_vmw_getparam_arg133*134* @value: Returned value. //Out135* @param: Parameter to query. //In.136*137* Argument to the DRM_VMW_GET_PARAM Ioctl.138*/139140struct drm_vmw_getparam_arg {141__u64 value;142__u32 param;143__u32 pad64;144};145146/*************************************************************************/147/**148* DRM_VMW_CREATE_CONTEXT - Create a host context.149*150* Allocates a device unique context id, and queues a create context command151* for the host. Does not wait for host completion.152*/153154/**155* struct drm_vmw_context_arg156*157* @cid: Device unique context ID.158*159* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.160* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.161*/162163struct drm_vmw_context_arg {164__s32 cid;165__u32 pad64;166};167168/*************************************************************************/169/**170* DRM_VMW_UNREF_CONTEXT - Create a host context.171*172* Frees a global context id, and queues a destroy host command for the host.173* Does not wait for host completion. The context ID can be used directly174* in the command stream and shows up as the same context ID on the host.175*/176177/*************************************************************************/178/**179* DRM_VMW_CREATE_SURFACE - Create a host suface.180*181* Allocates a device unique surface id, and queues a create surface command182* for the host. Does not wait for host completion. The surface ID can be183* used directly in the command stream and shows up as the same surface184* ID on the host.185*/186187/**188* struct drm_wmv_surface_create_req189*190* @flags: Surface flags as understood by the host.191* @format: Surface format as understood by the host.192* @mip_levels: Number of mip levels for each face.193* An unused face should have 0 encoded.194* @size_addr: Address of a user-space array of sruct drm_vmw_size195* cast to an __u64 for 32-64 bit compatibility.196* The size of the array should equal the total number of mipmap levels.197* @shareable: Boolean whether other clients (as identified by file descriptors)198* may reference this surface.199* @scanout: Boolean whether the surface is intended to be used as a200* scanout.201*202* Input data to the DRM_VMW_CREATE_SURFACE Ioctl.203* Output data from the DRM_VMW_REF_SURFACE Ioctl.204*/205206struct drm_vmw_surface_create_req {207__u32 flags;208__u32 format;209__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];210__u64 size_addr;211__s32 shareable;212__s32 scanout;213};214215/**216* struct drm_wmv_surface_arg217*218* @sid: Surface id of created surface or surface to destroy or reference.219* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.220*221* Output data from the DRM_VMW_CREATE_SURFACE Ioctl.222* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.223* Input argument to the DRM_VMW_REF_SURFACE Ioctl.224*/225226struct drm_vmw_surface_arg {227__s32 sid;228enum drm_vmw_handle_type handle_type;229};230231/**232* struct drm_vmw_size ioctl.233*234* @width - mip level width235* @height - mip level height236* @depth - mip level depth237*238* Description of a mip level.239* Input data to the DRM_WMW_CREATE_SURFACE Ioctl.240*/241242struct drm_vmw_size {243__u32 width;244__u32 height;245__u32 depth;246__u32 pad64;247};248249/**250* union drm_vmw_surface_create_arg251*252* @rep: Output data as described above.253* @req: Input data as described above.254*255* Argument to the DRM_VMW_CREATE_SURFACE Ioctl.256*/257258union drm_vmw_surface_create_arg {259struct drm_vmw_surface_arg rep;260struct drm_vmw_surface_create_req req;261};262263/*************************************************************************/264/**265* DRM_VMW_REF_SURFACE - Reference a host surface.266*267* Puts a reference on a host surface with a give sid, as previously268* returned by the DRM_VMW_CREATE_SURFACE ioctl.269* A reference will make sure the surface isn't destroyed while we hold270* it and will allow the calling client to use the surface ID in the command271* stream.272*273* On successful return, the Ioctl returns the surface information given274* in the DRM_VMW_CREATE_SURFACE ioctl.275*/276277/**278* union drm_vmw_surface_reference_arg279*280* @rep: Output data as described above.281* @req: Input data as described above.282*283* Argument to the DRM_VMW_REF_SURFACE Ioctl.284*/285286union drm_vmw_surface_reference_arg {287struct drm_vmw_surface_create_req rep;288struct drm_vmw_surface_arg req;289};290291/*************************************************************************/292/**293* DRM_VMW_UNREF_SURFACE - Unreference a host surface.294*295* Clear a reference previously put on a host surface.296* When all references are gone, including the one implicitly placed297* on creation,298* a destroy surface command will be queued for the host.299* Does not wait for completion.300*/301302/*************************************************************************/303/**304* DRM_VMW_EXECBUF305*306* Submit a command buffer for execution on the host, and return a307* fence seqno that when signaled, indicates that the command buffer has308* executed.309*/310311/**312* struct drm_vmw_execbuf_arg313*314* @commands: User-space address of a command buffer cast to an __u64.315* @command-size: Size in bytes of the command buffer.316* @throttle-us: Sleep until software is less than @throttle_us317* microseconds ahead of hardware. The driver may round this value318* to the nearest kernel tick.319* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an320* __u64.321* @version: Allows expanding the execbuf ioctl parameters without breaking322* backwards compatibility, since user-space will always tell the kernel323* which version it uses.324* @flags: Execbuf flags.325* @imported_fence_fd: FD for a fence imported from another device326*327* Argument to the DRM_VMW_EXECBUF Ioctl.328*/329330#define DRM_VMW_EXECBUF_VERSION 2331332#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)333#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)334335struct drm_vmw_execbuf_arg {336__u64 commands;337__u32 command_size;338__u32 throttle_us;339__u64 fence_rep;340__u32 version;341__u32 flags;342__u32 context_handle;343__s32 imported_fence_fd;344};345346/**347* struct drm_vmw_fence_rep348*349* @handle: Fence object handle for fence associated with a command submission.350* @mask: Fence flags relevant for this fence object.351* @seqno: Fence sequence number in fifo. A fence object with a lower352* seqno will signal the EXEC flag before a fence object with a higher353* seqno. This can be used by user-space to avoid kernel calls to determine354* whether a fence has signaled the EXEC flag. Note that @seqno will355* wrap at 32-bit.356* @passed_seqno: The highest seqno number processed by the hardware357* so far. This can be used to mark user-space fence objects as signaled, and358* to determine whether a fence seqno might be stale.359* @fd: FD associated with the fence, -1 if not exported360* @error: This member should've been set to -EFAULT on submission.361* The following actions should be take on completion:362* error == -EFAULT: Fence communication failed. The host is synchronized.363* Use the last fence id read from the FIFO fence register.364* error != 0 && error != -EFAULT:365* Fence submission failed. The host is synchronized. Use the fence_seq member.366* error == 0: All is OK, The host may not be synchronized.367* Use the fence_seq member.368*369* Input / Output data to the DRM_VMW_EXECBUF Ioctl.370*/371372struct drm_vmw_fence_rep {373__u32 handle;374__u32 mask;375__u32 seqno;376__u32 passed_seqno;377__s32 fd;378__s32 error;379};380381/*************************************************************************/382/**383* DRM_VMW_ALLOC_BO384*385* Allocate a buffer object that is visible also to the host.386* NOTE: The buffer is387* identified by a handle and an offset, which are private to the guest, but388* useable in the command stream. The guest kernel may translate these389* and patch up the command stream accordingly. In the future, the offset may390* be zero at all times, or it may disappear from the interface before it is391* fixed.392*393* The buffer object may stay user-space mapped in the guest at all times,394* and is thus suitable for sub-allocation.395*396* Buffer objects are mapped using the mmap() syscall on the drm device.397*/398399/**400* struct drm_vmw_alloc_bo_req401*402* @size: Required minimum size of the buffer.403*404* Input data to the DRM_VMW_ALLOC_BO Ioctl.405*/406407struct drm_vmw_alloc_bo_req {408__u32 size;409__u32 pad64;410};411#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req412413/**414* struct drm_vmw_bo_rep415*416* @map_handle: Offset to use in the mmap() call used to map the buffer.417* @handle: Handle unique to this buffer. Used for unreferencing.418* @cur_gmr_id: GMR id to use in the command stream when this buffer is419* referenced. See not above.420* @cur_gmr_offset: Offset to use in the command stream when this buffer is421* referenced. See note above.422*423* Output data from the DRM_VMW_ALLOC_BO Ioctl.424*/425426struct drm_vmw_bo_rep {427__u64 map_handle;428__u32 handle;429__u32 cur_gmr_id;430__u32 cur_gmr_offset;431__u32 pad64;432};433#define drm_vmw_dmabuf_rep drm_vmw_bo_rep434435/**436* union drm_vmw_alloc_bo_arg437*438* @req: Input data as described above.439* @rep: Output data as described above.440*441* Argument to the DRM_VMW_ALLOC_BO Ioctl.442*/443444union drm_vmw_alloc_bo_arg {445struct drm_vmw_alloc_bo_req req;446struct drm_vmw_bo_rep rep;447};448#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg449450/*************************************************************************/451/**452* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.453*454* This IOCTL controls the overlay units of the svga device.455* The SVGA overlay units does not work like regular hardware units in456* that they do not automaticaly read back the contents of the given dma457* buffer. But instead only read back for each call to this ioctl, and458* at any point between this call being made and a following call that459* either changes the buffer or disables the stream.460*/461462/**463* struct drm_vmw_rect464*465* Defines a rectangle. Used in the overlay ioctl to define466* source and destination rectangle.467*/468469struct drm_vmw_rect {470__s32 x;471__s32 y;472__u32 w;473__u32 h;474};475476/**477* struct drm_vmw_control_stream_arg478*479* @stream_id: Stearm to control480* @enabled: If false all following arguments are ignored.481* @handle: Handle to buffer for getting data from.482* @format: Format of the overlay as understood by the host.483* @width: Width of the overlay.484* @height: Height of the overlay.485* @size: Size of the overlay in bytes.486* @pitch: Array of pitches, the two last are only used for YUV12 formats.487* @offset: Offset from start of dma buffer to overlay.488* @src: Source rect, must be within the defined area above.489* @dst: Destination rect, x and y may be negative.490*491* Argument to the DRM_VMW_CONTROL_STREAM Ioctl.492*/493494struct drm_vmw_control_stream_arg {495__u32 stream_id;496__u32 enabled;497498__u32 flags;499__u32 color_key;500501__u32 handle;502__u32 offset;503__s32 format;504__u32 size;505__u32 width;506__u32 height;507__u32 pitch[3];508509__u32 pad64;510struct drm_vmw_rect src;511struct drm_vmw_rect dst;512};513514/*************************************************************************/515/**516* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.517*518*/519520#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)521#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)522523/**524* struct drm_vmw_cursor_bypass_arg525*526* @flags: Flags.527* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.528* @xpos: X position of cursor.529* @ypos: Y position of cursor.530* @xhot: X hotspot.531* @yhot: Y hotspot.532*533* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.534*/535536struct drm_vmw_cursor_bypass_arg {537__u32 flags;538__u32 crtc_id;539__s32 xpos;540__s32 ypos;541__s32 xhot;542__s32 yhot;543};544545/*************************************************************************/546/**547* DRM_VMW_CLAIM_STREAM - Claim a single stream.548*/549550/**551* struct drm_vmw_context_arg552*553* @stream_id: Device unique context ID.554*555* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.556* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.557*/558559struct drm_vmw_stream_arg {560__u32 stream_id;561__u32 pad64;562};563564/*************************************************************************/565/**566* DRM_VMW_UNREF_STREAM - Unclaim a stream.567*568* Return a single stream that was claimed by this process. Also makes569* sure that the stream has been stopped.570*/571572/*************************************************************************/573/**574* DRM_VMW_GET_3D_CAP575*576* Read 3D capabilities from the FIFO577*578*/579580/**581* struct drm_vmw_get_3d_cap_arg582*583* @buffer: Pointer to a buffer for capability data, cast to an __u64584* @size: Max size to copy585*586* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL587* ioctls.588*/589590struct drm_vmw_get_3d_cap_arg {591__u64 buffer;592__u32 max_size;593__u32 pad64;594};595596/*************************************************************************/597/**598* DRM_VMW_FENCE_WAIT599*600* Waits for a fence object to signal. The wait is interruptible, so that601* signals may be delivered during the interrupt. The wait may timeout,602* in which case the calls returns -EBUSY. If the wait is restarted,603* that is restarting without resetting @cookie_valid to zero,604* the timeout is computed from the first call.605*606* The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait607* on:608* DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command609* stream610* have executed.611* DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish612* commands613* in the buffer given to the EXECBUF ioctl returning the fence object handle614* are available to user-space.615*616* DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the617* fenc wait ioctl returns 0, the fence object has been unreferenced after618* the wait.619*/620621#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)622#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)623624#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)625626/**627* struct drm_vmw_fence_wait_arg628*629* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.630* @cookie_valid: Must be reset to 0 on first call. Left alone on restart.631* @kernel_cookie: Set to 0 on first call. Left alone on restart.632* @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.633* @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick634* before returning.635* @flags: Fence flags to wait on.636* @wait_options: Options that control the behaviour of the wait ioctl.637*638* Input argument to the DRM_VMW_FENCE_WAIT ioctl.639*/640641struct drm_vmw_fence_wait_arg {642__u32 handle;643__s32 cookie_valid;644__u64 kernel_cookie;645__u64 timeout_us;646__s32 lazy;647__s32 flags;648__s32 wait_options;649__s32 pad64;650};651652/*************************************************************************/653/**654* DRM_VMW_FENCE_SIGNALED655*656* Checks if a fence object is signaled..657*/658659/**660* struct drm_vmw_fence_signaled_arg661*662* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.663* @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl664* @signaled: Out: Flags signaled.665* @sequence: Out: Highest sequence passed so far. Can be used to signal the666* EXEC flag of user-space fence objects.667*668* Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF669* ioctls.670*/671672struct drm_vmw_fence_signaled_arg {673__u32 handle;674__u32 flags;675__s32 signaled;676__u32 passed_seqno;677__u32 signaled_flags;678__u32 pad64;679};680681/*************************************************************************/682/**683* DRM_VMW_FENCE_UNREF684*685* Unreferences a fence object, and causes it to be destroyed if there are no686* other references to it.687*688*/689690/**691* struct drm_vmw_fence_arg692*693* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.694*695* Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..696*/697698struct drm_vmw_fence_arg {699__u32 handle;700__u32 pad64;701};702703704/*************************************************************************/705/**706* DRM_VMW_FENCE_EVENT707*708* Queues an event on a fence to be delivered on the drm character device709* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.710* Optionally the approximate time when the fence signaled is711* given by the event.712*/713714/*715* The event type716*/717#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000718719struct drm_vmw_event_fence {720struct drm_event base;721__u64 user_data;722__u32 tv_sec;723__u32 tv_usec;724};725726/*727* Flags that may be given to the command.728*/729/* Request fence signaled time on the event. */730#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)731732/**733* struct drm_vmw_fence_event_arg734*735* @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if736* the fence is not supposed to be referenced by user-space.737* @user_info: Info to be delivered with the event.738* @handle: Attach the event to this fence only.739* @flags: A set of flags as defined above.740*/741struct drm_vmw_fence_event_arg {742__u64 fence_rep;743__u64 user_data;744__u32 handle;745__u32 flags;746};747748749/*************************************************************************/750/**751* DRM_VMW_PRESENT752*753* Executes an SVGA present on a given fb for a given surface. The surface754* is placed on the framebuffer. Cliprects are given relative to the given755* point (the point disignated by dest_{x|y}).756*757*/758759/**760* struct drm_vmw_present_arg761* @fb_id: framebuffer id to present / read back from.762* @sid: Surface id to present from.763* @dest_x: X placement coordinate for surface.764* @dest_y: Y placement coordinate for surface.765* @clips_ptr: Pointer to an array of clip rects cast to an __u64.766* @num_clips: Number of cliprects given relative to the framebuffer origin,767* in the same coordinate space as the frame buffer.768* @pad64: Unused 64-bit padding.769*770* Input argument to the DRM_VMW_PRESENT ioctl.771*/772773struct drm_vmw_present_arg {774__u32 fb_id;775__u32 sid;776__s32 dest_x;777__s32 dest_y;778__u64 clips_ptr;779__u32 num_clips;780__u32 pad64;781};782783784/*************************************************************************/785/**786* DRM_VMW_PRESENT_READBACK787*788* Executes an SVGA present readback from a given fb to the dma buffer789* currently bound as the fb. If there is no dma buffer bound to the fb,790* an error will be returned.791*792*/793794/**795* struct drm_vmw_present_arg796* @fb_id: fb_id to present / read back from.797* @num_clips: Number of cliprects.798* @clips_ptr: Pointer to an array of clip rects cast to an __u64.799* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.800* If this member is NULL, then the ioctl should not return a fence.801*/802803struct drm_vmw_present_readback_arg {804__u32 fb_id;805__u32 num_clips;806__u64 clips_ptr;807__u64 fence_rep;808};809810/*************************************************************************/811/**812* DRM_VMW_UPDATE_LAYOUT - Update layout813*814* Updates the preferred modes and connection status for connectors. The815* command consists of one drm_vmw_update_layout_arg pointing to an array816* of num_outputs drm_vmw_rect's.817*/818819/**820* struct drm_vmw_update_layout_arg821*822* @num_outputs: number of active connectors823* @rects: pointer to array of drm_vmw_rect cast to an __u64824*825* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.826*/827struct drm_vmw_update_layout_arg {828__u32 num_outputs;829__u32 pad64;830__u64 rects;831};832833834/*************************************************************************/835/**836* DRM_VMW_CREATE_SHADER - Create shader837*838* Creates a shader and optionally binds it to a dma buffer containing839* the shader byte-code.840*/841842/**843* enum drm_vmw_shader_type - Shader types844*/845enum drm_vmw_shader_type {846drm_vmw_shader_type_vs = 0,847drm_vmw_shader_type_ps,848};849850851/**852* struct drm_vmw_shader_create_arg853*854* @shader_type: Shader type of the shader to create.855* @size: Size of the byte-code in bytes.856* where the shader byte-code starts857* @buffer_handle: Buffer handle identifying the buffer containing the858* shader byte-code859* @shader_handle: On successful completion contains a handle that860* can be used to subsequently identify the shader.861* @offset: Offset in bytes into the buffer given by @buffer_handle,862*863* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.864*/865struct drm_vmw_shader_create_arg {866enum drm_vmw_shader_type shader_type;867__u32 size;868__u32 buffer_handle;869__u32 shader_handle;870__u64 offset;871};872873/*************************************************************************/874/**875* DRM_VMW_UNREF_SHADER - Unreferences a shader876*877* Destroys a user-space reference to a shader, optionally destroying878* it.879*/880881/**882* struct drm_vmw_shader_arg883*884* @handle: Handle identifying the shader to destroy.885*886* Input argument to the DRM_VMW_UNREF_SHADER ioctl.887*/888struct drm_vmw_shader_arg {889__u32 handle;890__u32 pad64;891};892893/*************************************************************************/894/**895* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.896*897* Allocates a surface handle and queues a create surface command898* for the host on the first use of the surface. The surface ID can899* be used as the surface ID in commands referencing the surface.900*/901902/**903* enum drm_vmw_surface_flags904*905* @drm_vmw_surface_flag_shareable: Deprecated - all userspace surfaces are906* shareable.907* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout908* surface.909* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is910* given.911* @drm_vmw_surface_flag_coherent: Back surface with coherent memory.912*/913enum drm_vmw_surface_flags {914drm_vmw_surface_flag_shareable = (1 << 0),915drm_vmw_surface_flag_scanout = (1 << 1),916drm_vmw_surface_flag_create_buffer = (1 << 2),917drm_vmw_surface_flag_coherent = (1 << 3),918};919920/**921* struct drm_vmw_gb_surface_create_req922*923* @svga3d_flags: SVGA3d surface flags for the device.924* @format: SVGA3d format.925* @mip_level: Number of mip levels for all faces.926* @drm_surface_flags Flags as described above.927* @multisample_count Future use. Set to 0.928* @autogen_filter Future use. Set to 0.929* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID930* if none.931* @base_size Size of the base mip level for all faces.932* @array_size Must be zero for non-DX hardware, and if non-zero933* svga3d_flags must have proper bind flags setup.934*935* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.936* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.937*/938struct drm_vmw_gb_surface_create_req {939__u32 svga3d_flags;940__u32 format;941__u32 mip_levels;942enum drm_vmw_surface_flags drm_surface_flags;943__u32 multisample_count;944__u32 autogen_filter;945__u32 buffer_handle;946__u32 array_size;947struct drm_vmw_size base_size;948};949950/**951* struct drm_vmw_gb_surface_create_rep952*953* @handle: Surface handle.954* @backup_size: Size of backup buffers for this surface.955* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.956* @buffer_size: Actual size of the buffer identified by957* @buffer_handle958* @buffer_map_handle: Offset into device address space for the buffer959* identified by @buffer_handle.960*961* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.962* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.963*/964struct drm_vmw_gb_surface_create_rep {965__u32 handle;966__u32 backup_size;967__u32 buffer_handle;968__u32 buffer_size;969__u64 buffer_map_handle;970};971972/**973* union drm_vmw_gb_surface_create_arg974*975* @req: Input argument as described above.976* @rep: Output argument as described above.977*978* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.979*/980union drm_vmw_gb_surface_create_arg {981struct drm_vmw_gb_surface_create_rep rep;982struct drm_vmw_gb_surface_create_req req;983};984985/*************************************************************************/986/**987* DRM_VMW_GB_SURFACE_REF - Reference a host surface.988*989* Puts a reference on a host surface with a given handle, as previously990* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.991* A reference will make sure the surface isn't destroyed while we hold992* it and will allow the calling client to use the surface handle in993* the command stream.994*995* On successful return, the Ioctl returns the surface information given996* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.997*/998999/**1000* struct drm_vmw_gb_surface_reference_arg1001*1002* @creq: The data used as input when the surface was created, as described1003* above at "struct drm_vmw_gb_surface_create_req"1004* @crep: Additional data output when the surface was created, as described1005* above at "struct drm_vmw_gb_surface_create_rep"1006*1007* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.1008*/1009struct drm_vmw_gb_surface_ref_rep {1010struct drm_vmw_gb_surface_create_req creq;1011struct drm_vmw_gb_surface_create_rep crep;1012};10131014/**1015* union drm_vmw_gb_surface_reference_arg1016*1017* @req: Input data as described above at "struct drm_vmw_surface_arg"1018* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"1019*1020* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.1021*/1022union drm_vmw_gb_surface_reference_arg {1023struct drm_vmw_gb_surface_ref_rep rep;1024struct drm_vmw_surface_arg req;1025};102610271028/*************************************************************************/1029/**1030* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.1031*1032* Idles any previously submitted GPU operations on the buffer and1033* by default blocks command submissions that reference the buffer.1034* If the file descriptor used to grab a blocking CPU sync is closed, the1035* cpu sync is released.1036* The flags argument indicates how the grab / release operation should be1037* performed:1038*/10391040/**1041* enum drm_vmw_synccpu_flags - Synccpu flags:1042*1043* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a1044* hint to the kernel to allow command submissions that references the buffer1045* for read-only.1046* @drm_vmw_synccpu_write: Sync for write. Block all command submissions1047* referencing this buffer.1048* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return1049* -EBUSY should the buffer be busy.1050* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer1051* while the buffer is synced for CPU. This is similar to the GEM bo idle1052* behavior.1053*/1054enum drm_vmw_synccpu_flags {1055drm_vmw_synccpu_read = (1 << 0),1056drm_vmw_synccpu_write = (1 << 1),1057drm_vmw_synccpu_dontblock = (1 << 2),1058drm_vmw_synccpu_allow_cs = (1 << 3)1059};10601061/**1062* enum drm_vmw_synccpu_op - Synccpu operations:1063*1064* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations1065* @drm_vmw_synccpu_release: Release a previous grab.1066*/1067enum drm_vmw_synccpu_op {1068drm_vmw_synccpu_grab,1069drm_vmw_synccpu_release1070};10711072/**1073* struct drm_vmw_synccpu_arg1074*1075* @op: The synccpu operation as described above.1076* @handle: Handle identifying the buffer object.1077* @flags: Flags as described above.1078*/1079struct drm_vmw_synccpu_arg {1080enum drm_vmw_synccpu_op op;1081enum drm_vmw_synccpu_flags flags;1082__u32 handle;1083__u32 pad64;1084};10851086/*************************************************************************/1087/**1088* DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.1089*1090* Allocates a device unique context id, and queues a create context command1091* for the host. Does not wait for host completion.1092*/1093enum drm_vmw_extended_context {1094drm_vmw_context_legacy,1095drm_vmw_context_dx1096};10971098/**1099* union drm_vmw_extended_context_arg1100*1101* @req: Context type.1102* @rep: Context identifier.1103*1104* Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.1105*/1106union drm_vmw_extended_context_arg {1107enum drm_vmw_extended_context req;1108struct drm_vmw_context_arg rep;1109};11101111/*************************************************************************/1112/*1113* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its1114* underlying resource.1115*1116* Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF1117* Ioctl.1118*/11191120/**1121* struct drm_vmw_handle_close_arg1122*1123* @handle: Handle to close.1124*1125* Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.1126*/1127struct drm_vmw_handle_close_arg {1128__u32 handle;1129__u32 pad64;1130};1131#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg11321133/*************************************************************************/1134/**1135* DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.1136*1137* Allocates a surface handle and queues a create surface command1138* for the host on the first use of the surface. The surface ID can1139* be used as the surface ID in commands referencing the surface.1140*1141* This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version1142* parameter and 64 bit svga flag.1143*/11441145/**1146* enum drm_vmw_surface_version1147*1148* @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with1149* svga3d surface flags split into 2, upper half and lower half.1150*/1151enum drm_vmw_surface_version {1152drm_vmw_gb_surface_v1,1153};11541155/**1156* struct drm_vmw_gb_surface_create_ext_req1157*1158* @base: Surface create parameters.1159* @version: Version of surface create ioctl.1160* @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.1161* @multisample_pattern: Multisampling pattern when msaa is supported.1162* @quality_level: Precision settings for each sample.1163* @buffer_byte_stride: Buffer byte stride.1164* @must_be_zero: Reserved for future usage.1165*1166* Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.1167* Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.1168*/1169struct drm_vmw_gb_surface_create_ext_req {1170struct drm_vmw_gb_surface_create_req base;1171enum drm_vmw_surface_version version;1172__u32 svga3d_flags_upper_32_bits;1173__u32 multisample_pattern;1174__u32 quality_level;1175__u32 buffer_byte_stride;1176__u32 must_be_zero;1177};11781179/**1180* union drm_vmw_gb_surface_create_ext_arg1181*1182* @req: Input argument as described above.1183* @rep: Output argument as described above.1184*1185* Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.1186*/1187union drm_vmw_gb_surface_create_ext_arg {1188struct drm_vmw_gb_surface_create_rep rep;1189struct drm_vmw_gb_surface_create_ext_req req;1190};11911192/*************************************************************************/1193/**1194* DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.1195*1196* Puts a reference on a host surface with a given handle, as previously1197* returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.1198* A reference will make sure the surface isn't destroyed while we hold1199* it and will allow the calling client to use the surface handle in1200* the command stream.1201*1202* On successful return, the Ioctl returns the surface information given1203* to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.1204*/12051206/**1207* struct drm_vmw_gb_surface_ref_ext_rep1208*1209* @creq: The data used as input when the surface was created, as described1210* above at "struct drm_vmw_gb_surface_create_ext_req"1211* @crep: Additional data output when the surface was created, as described1212* above at "struct drm_vmw_gb_surface_create_rep"1213*1214* Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.1215*/1216struct drm_vmw_gb_surface_ref_ext_rep {1217struct drm_vmw_gb_surface_create_ext_req creq;1218struct drm_vmw_gb_surface_create_rep crep;1219};12201221/**1222* union drm_vmw_gb_surface_reference_ext_arg1223*1224* @req: Input data as described above at "struct drm_vmw_surface_arg"1225* @rep: Output data as described above at1226* "struct drm_vmw_gb_surface_ref_ext_rep"1227*1228* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.1229*/1230union drm_vmw_gb_surface_reference_ext_arg {1231struct drm_vmw_gb_surface_ref_ext_rep rep;1232struct drm_vmw_surface_arg req;1233};12341235/**1236* struct drm_vmw_msg_arg1237*1238* @send: Pointer to user-space msg string (null terminated).1239* @receive: Pointer to user-space receive buffer.1240* @send_only: Boolean whether this is only sending or receiving too.1241*1242* Argument to the DRM_VMW_MSG ioctl.1243*/1244struct drm_vmw_msg_arg {1245__u64 send;1246__u64 receive;1247__s32 send_only;1248__u32 receive_len;1249};12501251/**1252* struct drm_vmw_mksstat_add_arg1253*1254* @stat: Pointer to user-space stat-counters array, page-aligned.1255* @info: Pointer to user-space counter-infos array, page-aligned.1256* @strs: Pointer to user-space stat strings, page-aligned.1257* @stat_len: Length in bytes of stat-counters array.1258* @info_len: Length in bytes of counter-infos array.1259* @strs_len: Length in bytes of the stat strings, terminators included.1260* @description: Pointer to instance descriptor string; will be truncated1261* to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.1262* @id: Output identifier of the produced record; -1 if error.1263*1264* Argument to the DRM_VMW_MKSSTAT_ADD ioctl.1265*/1266struct drm_vmw_mksstat_add_arg {1267__u64 stat;1268__u64 info;1269__u64 strs;1270__u64 stat_len;1271__u64 info_len;1272__u64 strs_len;1273__u64 description;1274__u64 id;1275};12761277/**1278* struct drm_vmw_mksstat_remove_arg1279*1280* @id: Identifier of the record being disposed, originally obtained through1281* DRM_VMW_MKSSTAT_ADD ioctl.1282*1283* Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.1284*/1285struct drm_vmw_mksstat_remove_arg {1286__u64 id;1287};12881289#if defined(__cplusplus)1290}1291#endif12921293#endif129412951296