Path: blob/21.2-virgl/include/drm-uapi/i915_drm.h
4547 views
/*1* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.2* All Rights Reserved.3*4* Permission is hereby granted, free of charge, to any person obtaining a5* copy of this software and associated documentation files (the6* "Software"), to deal in the Software without restriction, including7* without limitation the rights to use, copy, modify, merge, publish,8* distribute, sub license, and/or sell copies of the Software, and to9* permit persons to whom the Software is furnished to do so, subject to10* the following conditions:11*12* The above copyright notice and this permission notice (including the13* next paragraph) shall be included in all copies or substantial portions14* of the Software.15*16* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS17* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF18* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.19* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR20* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,21* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE22* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.23*24*/2526#ifndef _I915_DRM_H_27#define _I915_DRM_H_2829#include "drm.h"3031#if defined(__cplusplus)32extern "C" {33#endif3435/* Please note that modifications to all structs defined here are36* subject to backwards-compatibility constraints.37*/3839/**40* DOC: uevents generated by i915 on it's device node41*42* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch43* event from the gpu l3 cache. Additional information supplied is ROW,44* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep45* track of these events and if a specific cache-line seems to have a46* persistent error remap it with the l3 remapping tool supplied in47* intel-gpu-tools. The value supplied with the event is always 1.48*49* I915_ERROR_UEVENT - Generated upon error detection, currently only via50* hangcheck. The error detection event is a good indicator of when things51* began to go badly. The value supplied with the event is a 1 upon error52* detection, and a 0 upon reset completion, signifying no more error53* exists. NOTE: Disabling hangcheck or reset via module parameter will54* cause the related events to not be seen.55*56* I915_RESET_UEVENT - Event is generated just before an attempt to reset the57* GPU. The value supplied with the event is always 1. NOTE: Disable58* reset via module parameter will cause this event to not be seen.59*/60#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"61#define I915_ERROR_UEVENT "ERROR"62#define I915_RESET_UEVENT "RESET"6364/**65* struct i915_user_extension - Base class for defining a chain of extensions66*67* Many interfaces need to grow over time. In most cases we can simply68* extend the struct and have userspace pass in more data. Another option,69* as demonstrated by Vulkan's approach to providing extensions for forward70* and backward compatibility, is to use a list of optional structs to71* provide those extra details.72*73* The key advantage to using an extension chain is that it allows us to74* redefine the interface more easily than an ever growing struct of75* increasing complexity, and for large parts of that interface to be76* entirely optional. The downside is more pointer chasing; chasing across77* the boundary with pointers encapsulated inside u64.78*79* Example chaining:80*81* .. code-block:: C82*83* struct i915_user_extension ext3 {84* .next_extension = 0, // end85* .name = ...,86* };87* struct i915_user_extension ext2 {88* .next_extension = (uintptr_t)&ext3,89* .name = ...,90* };91* struct i915_user_extension ext1 {92* .next_extension = (uintptr_t)&ext2,93* .name = ...,94* };95*96* Typically the struct i915_user_extension would be embedded in some uAPI97* struct, and in this case we would feed it the head of the chain(i.e ext1),98* which would then apply all of the above extensions.99*100*/101struct i915_user_extension {102/**103* @next_extension:104*105* Pointer to the next struct i915_user_extension, or zero if the end.106*/107__u64 next_extension;108/**109* @name: Name of the extension.110*111* Note that the name here is just some integer.112*113* Also note that the name space for this is not global for the whole114* driver, but rather its scope/meaning is limited to the specific piece115* of uAPI which has embedded the struct i915_user_extension.116*/117__u32 name;118/**119* @flags: MBZ120*121* All undefined bits must be zero.122*/123__u32 flags;124/**125* @rsvd: MBZ126*127* Reserved for future use; must be zero.128*/129__u32 rsvd[4];130};131132/*133* MOCS indexes used for GPU surfaces, defining the cacheability of the134* surface data and the coherency for this data wrt. CPU vs. GPU accesses.135*/136enum i915_mocs_table_index {137/*138* Not cached anywhere, coherency between CPU and GPU accesses is139* guaranteed.140*/141I915_MOCS_UNCACHED,142/*143* Cacheability and coherency controlled by the kernel automatically144* based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current145* usage of the surface (used for display scanout or not).146*/147I915_MOCS_PTE,148/*149* Cached in all GPU caches available on the platform.150* Coherency between CPU and GPU accesses to the surface is not151* guaranteed without extra synchronization.152*/153I915_MOCS_CACHED,154};155156/*157* Different engines serve different roles, and there may be more than one158* engine serving each role. enum drm_i915_gem_engine_class provides a159* classification of the role of the engine, which may be used when requesting160* operations to be performed on a certain subset of engines, or for providing161* information about that group.162*/163enum drm_i915_gem_engine_class {164I915_ENGINE_CLASS_RENDER = 0,165I915_ENGINE_CLASS_COPY = 1,166I915_ENGINE_CLASS_VIDEO = 2,167I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,168169/* should be kept compact */170171I915_ENGINE_CLASS_INVALID = -1172};173174/*175* There may be more than one engine fulfilling any role within the system.176* Each engine of a class is given a unique instance number and therefore177* any engine can be specified by its class:instance tuplet. APIs that allow178* access to any engine in the system will use struct i915_engine_class_instance179* for this identification.180*/181struct i915_engine_class_instance {182__u16 engine_class; /* see enum drm_i915_gem_engine_class */183__u16 engine_instance;184#define I915_ENGINE_CLASS_INVALID_NONE -1185#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2186};187188/**189* DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915190*191*/192193enum drm_i915_pmu_engine_sample {194I915_SAMPLE_BUSY = 0,195I915_SAMPLE_WAIT = 1,196I915_SAMPLE_SEMA = 2197};198199#define I915_PMU_SAMPLE_BITS (4)200#define I915_PMU_SAMPLE_MASK (0xf)201#define I915_PMU_SAMPLE_INSTANCE_BITS (8)202#define I915_PMU_CLASS_SHIFT \203(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)204205#define __I915_PMU_ENGINE(class, instance, sample) \206((class) << I915_PMU_CLASS_SHIFT | \207(instance) << I915_PMU_SAMPLE_BITS | \208(sample))209210#define I915_PMU_ENGINE_BUSY(class, instance) \211__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)212213#define I915_PMU_ENGINE_WAIT(class, instance) \214__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)215216#define I915_PMU_ENGINE_SEMA(class, instance) \217__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)218219#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))220221#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)222#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)223#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)224#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)225#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)226227#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY228229/* Each region is a minimum of 16k, and there are at most 255 of them.230*/231#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use232* of chars for next/prev indices */233#define I915_LOG_MIN_TEX_REGION_SIZE 14234235typedef struct _drm_i915_init {236enum {237I915_INIT_DMA = 0x01,238I915_CLEANUP_DMA = 0x02,239I915_RESUME_DMA = 0x03240} func;241unsigned int mmio_offset;242int sarea_priv_offset;243unsigned int ring_start;244unsigned int ring_end;245unsigned int ring_size;246unsigned int front_offset;247unsigned int back_offset;248unsigned int depth_offset;249unsigned int w;250unsigned int h;251unsigned int pitch;252unsigned int pitch_bits;253unsigned int back_pitch;254unsigned int depth_pitch;255unsigned int cpp;256unsigned int chipset;257} drm_i915_init_t;258259typedef struct _drm_i915_sarea {260struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];261int last_upload; /* last time texture was uploaded */262int last_enqueue; /* last time a buffer was enqueued */263int last_dispatch; /* age of the most recently dispatched buffer */264int ctxOwner; /* last context to upload state */265int texAge;266int pf_enabled; /* is pageflipping allowed? */267int pf_active;268int pf_current_page; /* which buffer is being displayed? */269int perf_boxes; /* performance boxes to be displayed */270int width, height; /* screen size in pixels */271272drm_handle_t front_handle;273int front_offset;274int front_size;275276drm_handle_t back_handle;277int back_offset;278int back_size;279280drm_handle_t depth_handle;281int depth_offset;282int depth_size;283284drm_handle_t tex_handle;285int tex_offset;286int tex_size;287int log_tex_granularity;288int pitch;289int rotation; /* 0, 90, 180 or 270 */290int rotated_offset;291int rotated_size;292int rotated_pitch;293int virtualX, virtualY;294295unsigned int front_tiled;296unsigned int back_tiled;297unsigned int depth_tiled;298unsigned int rotated_tiled;299unsigned int rotated2_tiled;300301int pipeA_x;302int pipeA_y;303int pipeA_w;304int pipeA_h;305int pipeB_x;306int pipeB_y;307int pipeB_w;308int pipeB_h;309310/* fill out some space for old userspace triple buffer */311drm_handle_t unused_handle;312__u32 unused1, unused2, unused3;313314/* buffer object handles for static buffers. May change315* over the lifetime of the client.316*/317__u32 front_bo_handle;318__u32 back_bo_handle;319__u32 unused_bo_handle;320__u32 depth_bo_handle;321322} drm_i915_sarea_t;323324/* due to userspace building against these headers we need some compat here */325#define planeA_x pipeA_x326#define planeA_y pipeA_y327#define planeA_w pipeA_w328#define planeA_h pipeA_h329#define planeB_x pipeB_x330#define planeB_y pipeB_y331#define planeB_w pipeB_w332#define planeB_h pipeB_h333334/* Flags for perf_boxes335*/336#define I915_BOX_RING_EMPTY 0x1337#define I915_BOX_FLIP 0x2338#define I915_BOX_WAIT 0x4339#define I915_BOX_TEXTURE_LOAD 0x8340#define I915_BOX_LOST_CONTEXT 0x10341342/*343* i915 specific ioctls.344*345* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie346* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset347* against DRM_COMMAND_BASE and should be between [0x0, 0x60).348*/349#define DRM_I915_INIT 0x00350#define DRM_I915_FLUSH 0x01351#define DRM_I915_FLIP 0x02352#define DRM_I915_BATCHBUFFER 0x03353#define DRM_I915_IRQ_EMIT 0x04354#define DRM_I915_IRQ_WAIT 0x05355#define DRM_I915_GETPARAM 0x06356#define DRM_I915_SETPARAM 0x07357#define DRM_I915_ALLOC 0x08358#define DRM_I915_FREE 0x09359#define DRM_I915_INIT_HEAP 0x0a360#define DRM_I915_CMDBUFFER 0x0b361#define DRM_I915_DESTROY_HEAP 0x0c362#define DRM_I915_SET_VBLANK_PIPE 0x0d363#define DRM_I915_GET_VBLANK_PIPE 0x0e364#define DRM_I915_VBLANK_SWAP 0x0f365#define DRM_I915_HWS_ADDR 0x11366#define DRM_I915_GEM_INIT 0x13367#define DRM_I915_GEM_EXECBUFFER 0x14368#define DRM_I915_GEM_PIN 0x15369#define DRM_I915_GEM_UNPIN 0x16370#define DRM_I915_GEM_BUSY 0x17371#define DRM_I915_GEM_THROTTLE 0x18372#define DRM_I915_GEM_ENTERVT 0x19373#define DRM_I915_GEM_LEAVEVT 0x1a374#define DRM_I915_GEM_CREATE 0x1b375#define DRM_I915_GEM_PREAD 0x1c376#define DRM_I915_GEM_PWRITE 0x1d377#define DRM_I915_GEM_MMAP 0x1e378#define DRM_I915_GEM_SET_DOMAIN 0x1f379#define DRM_I915_GEM_SW_FINISH 0x20380#define DRM_I915_GEM_SET_TILING 0x21381#define DRM_I915_GEM_GET_TILING 0x22382#define DRM_I915_GEM_GET_APERTURE 0x23383#define DRM_I915_GEM_MMAP_GTT 0x24384#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25385#define DRM_I915_GEM_MADVISE 0x26386#define DRM_I915_OVERLAY_PUT_IMAGE 0x27387#define DRM_I915_OVERLAY_ATTRS 0x28388#define DRM_I915_GEM_EXECBUFFER2 0x29389#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2390#define DRM_I915_GET_SPRITE_COLORKEY 0x2a391#define DRM_I915_SET_SPRITE_COLORKEY 0x2b392#define DRM_I915_GEM_WAIT 0x2c393#define DRM_I915_GEM_CONTEXT_CREATE 0x2d394#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e395#define DRM_I915_GEM_SET_CACHING 0x2f396#define DRM_I915_GEM_GET_CACHING 0x30397#define DRM_I915_REG_READ 0x31398#define DRM_I915_GET_RESET_STATS 0x32399#define DRM_I915_GEM_USERPTR 0x33400#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34401#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35402#define DRM_I915_PERF_OPEN 0x36403#define DRM_I915_PERF_ADD_CONFIG 0x37404#define DRM_I915_PERF_REMOVE_CONFIG 0x38405#define DRM_I915_QUERY 0x39406#define DRM_I915_GEM_VM_CREATE 0x3a407#define DRM_I915_GEM_VM_DESTROY 0x3b408#define DRM_I915_GEM_CREATE_EXT 0x3c409/* Must be kept compact -- no holes */410411#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)412#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)413#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)414#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)415#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)416#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)417#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)418#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)419#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)420#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)421#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)422#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)423#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)424#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)425#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)426#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)427#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)428#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)429#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)430#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)431#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)432#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)433#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)434#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)435#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)436#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)437#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)438#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)439#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)440#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)441#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)442#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)443#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)444#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)445#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)446#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)447#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)448#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)449#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)450#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)451#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)452#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)453#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)454#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)455#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)456#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)457#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)458#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)459#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)460#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)461#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)462#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)463#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)464#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)465#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)466#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)467#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)468#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)469#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)470#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)471#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)472#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)473474/* Allow drivers to submit batchbuffers directly to hardware, relying475* on the security mechanisms provided by hardware.476*/477typedef struct drm_i915_batchbuffer {478int start; /* agp offset */479int used; /* nr bytes in use */480int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */481int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */482int num_cliprects; /* mulitpass with multiple cliprects? */483struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */484} drm_i915_batchbuffer_t;485486/* As above, but pass a pointer to userspace buffer which can be487* validated by the kernel prior to sending to hardware.488*/489typedef struct _drm_i915_cmdbuffer {490char *buf; /* pointer to userspace command buffer */491int sz; /* nr bytes in buf */492int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */493int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */494int num_cliprects; /* mulitpass with multiple cliprects? */495struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */496} drm_i915_cmdbuffer_t;497498/* Userspace can request & wait on irq's:499*/500typedef struct drm_i915_irq_emit {501int *irq_seq;502} drm_i915_irq_emit_t;503504typedef struct drm_i915_irq_wait {505int irq_seq;506} drm_i915_irq_wait_t;507508/*509* Different modes of per-process Graphics Translation Table,510* see I915_PARAM_HAS_ALIASING_PPGTT511*/512#define I915_GEM_PPGTT_NONE 0513#define I915_GEM_PPGTT_ALIASING 1514#define I915_GEM_PPGTT_FULL 2515516/* Ioctl to query kernel params:517*/518#define I915_PARAM_IRQ_ACTIVE 1519#define I915_PARAM_ALLOW_BATCHBUFFER 2520#define I915_PARAM_LAST_DISPATCH 3521#define I915_PARAM_CHIPSET_ID 4522#define I915_PARAM_HAS_GEM 5523#define I915_PARAM_NUM_FENCES_AVAIL 6524#define I915_PARAM_HAS_OVERLAY 7525#define I915_PARAM_HAS_PAGEFLIPPING 8526#define I915_PARAM_HAS_EXECBUF2 9527#define I915_PARAM_HAS_BSD 10528#define I915_PARAM_HAS_BLT 11529#define I915_PARAM_HAS_RELAXED_FENCING 12530#define I915_PARAM_HAS_COHERENT_RINGS 13531#define I915_PARAM_HAS_EXEC_CONSTANTS 14532#define I915_PARAM_HAS_RELAXED_DELTA 15533#define I915_PARAM_HAS_GEN7_SOL_RESET 16534#define I915_PARAM_HAS_LLC 17535#define I915_PARAM_HAS_ALIASING_PPGTT 18536#define I915_PARAM_HAS_WAIT_TIMEOUT 19537#define I915_PARAM_HAS_SEMAPHORES 20538#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21539#define I915_PARAM_HAS_VEBOX 22540#define I915_PARAM_HAS_SECURE_BATCHES 23541#define I915_PARAM_HAS_PINNED_BATCHES 24542#define I915_PARAM_HAS_EXEC_NO_RELOC 25543#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26544#define I915_PARAM_HAS_WT 27545#define I915_PARAM_CMD_PARSER_VERSION 28546#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29547#define I915_PARAM_MMAP_VERSION 30548#define I915_PARAM_HAS_BSD2 31549#define I915_PARAM_REVISION 32550#define I915_PARAM_SUBSLICE_TOTAL 33551#define I915_PARAM_EU_TOTAL 34552#define I915_PARAM_HAS_GPU_RESET 35553#define I915_PARAM_HAS_RESOURCE_STREAMER 36554#define I915_PARAM_HAS_EXEC_SOFTPIN 37555#define I915_PARAM_HAS_POOLED_EU 38556#define I915_PARAM_MIN_EU_IN_POOL 39557#define I915_PARAM_MMAP_GTT_VERSION 40558559/*560* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution561* priorities and the driver will attempt to execute batches in priority order.562* The param returns a capability bitmask, nonzero implies that the scheduler563* is enabled, with different features present according to the mask.564*565* The initial priority for each batch is supplied by the context and is566* controlled via I915_CONTEXT_PARAM_PRIORITY.567*/568#define I915_PARAM_HAS_SCHEDULER 41569#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)570#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)571#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)572#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)573#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)574575#define I915_PARAM_HUC_STATUS 42576577/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of578* synchronisation with implicit fencing on individual objects.579* See EXEC_OBJECT_ASYNC.580*/581#define I915_PARAM_HAS_EXEC_ASYNC 43582583/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -584* both being able to pass in a sync_file fd to wait upon before executing,585* and being able to return a new sync_file fd that is signaled when the586* current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.587*/588#define I915_PARAM_HAS_EXEC_FENCE 44589590/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture591* user specified bufffers for post-mortem debugging of GPU hangs. See592* EXEC_OBJECT_CAPTURE.593*/594#define I915_PARAM_HAS_EXEC_CAPTURE 45595596#define I915_PARAM_SLICE_MASK 46597598/* Assuming it's uniform for each slice, this queries the mask of subslices599* per-slice for this system.600*/601#define I915_PARAM_SUBSLICE_MASK 47602603/*604* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer605* as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.606*/607#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48608609/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of610* drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.611*/612#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49613614/*615* Query whether every context (both per-file default and user created) is616* isolated (insofar as HW supports). If this parameter is not true, then617* freshly created contexts may inherit values from an existing context,618* rather than default HW values. If true, it also ensures (insofar as HW619* supports) that all state set by this context will not leak to any other620* context.621*622* As not every engine across every gen support contexts, the returned623* value reports the support of context isolation for individual engines by624* returning a bitmask of each engine class set to true if that class supports625* isolation.626*/627#define I915_PARAM_HAS_CONTEXT_ISOLATION 50628629/* Frequency of the command streamer timestamps given by the *_TIMESTAMP630* registers. This used to be fixed per platform but from CNL onwards, this631* might vary depending on the parts.632*/633#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51634635/*636* Once upon a time we supposed that writes through the GGTT would be637* immediately in physical memory (once flushed out of the CPU path). However,638* on a few different processors and chipsets, this is not necessarily the case639* as the writes appear to be buffered internally. Thus a read of the backing640* storage (physical memory) via a different path (with different physical tags641* to the indirect write via the GGTT) will see stale values from before642* the GGTT write. Inside the kernel, we can for the most part keep track of643* the different read/write domains in use (e.g. set-domain), but the assumption644* of coherency is baked into the ABI, hence reporting its true state in this645* parameter.646*647* Reports true when writes via mmap_gtt are immediately visible following an648* lfence to flush the WCB.649*650* Reports false when writes via mmap_gtt are indeterminately delayed in an in651* internal buffer and are _not_ immediately visible to third parties accessing652* directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC653* communications channel when reporting false is strongly disadvised.654*/655#define I915_PARAM_MMAP_GTT_COHERENT 52656657/*658* Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel659* execution through use of explicit fence support.660* See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.661*/662#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53663664/*665* Revision of the i915-perf uAPI. The value returned helps determine what666* i915-perf features are available. See drm_i915_perf_property_id.667*/668#define I915_PARAM_PERF_REVISION 54669670/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of671* timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See672* I915_EXEC_USE_EXTENSIONS.673*/674#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55675676/* Must be kept compact -- no holes and well documented */677678typedef struct drm_i915_getparam {679__s32 param;680/*681* WARNING: Using pointers instead of fixed-size u64 means we need to write682* compat32 code. Don't repeat this mistake.683*/684int *value;685} drm_i915_getparam_t;686687/* Ioctl to set kernel params:688*/689#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1690#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2691#define I915_SETPARAM_ALLOW_BATCHBUFFER 3692#define I915_SETPARAM_NUM_USED_FENCES 4693/* Must be kept compact -- no holes */694695typedef struct drm_i915_setparam {696int param;697int value;698} drm_i915_setparam_t;699700/* A memory manager for regions of shared memory:701*/702#define I915_MEM_REGION_AGP 1703704typedef struct drm_i915_mem_alloc {705int region;706int alignment;707int size;708int *region_offset; /* offset from start of fb or agp */709} drm_i915_mem_alloc_t;710711typedef struct drm_i915_mem_free {712int region;713int region_offset;714} drm_i915_mem_free_t;715716typedef struct drm_i915_mem_init_heap {717int region;718int size;719int start;720} drm_i915_mem_init_heap_t;721722/* Allow memory manager to be torn down and re-initialized (eg on723* rotate):724*/725typedef struct drm_i915_mem_destroy_heap {726int region;727} drm_i915_mem_destroy_heap_t;728729/* Allow X server to configure which pipes to monitor for vblank signals730*/731#define DRM_I915_VBLANK_PIPE_A 1732#define DRM_I915_VBLANK_PIPE_B 2733734typedef struct drm_i915_vblank_pipe {735int pipe;736} drm_i915_vblank_pipe_t;737738/* Schedule buffer swap at given vertical blank:739*/740typedef struct drm_i915_vblank_swap {741drm_drawable_t drawable;742enum drm_vblank_seq_type seqtype;743unsigned int sequence;744} drm_i915_vblank_swap_t;745746typedef struct drm_i915_hws_addr {747__u64 addr;748} drm_i915_hws_addr_t;749750struct drm_i915_gem_init {751/**752* Beginning offset in the GTT to be managed by the DRM memory753* manager.754*/755__u64 gtt_start;756/**757* Ending offset in the GTT to be managed by the DRM memory758* manager.759*/760__u64 gtt_end;761};762763struct drm_i915_gem_create {764/**765* Requested size for the object.766*767* The (page-aligned) allocated size for the object will be returned.768*/769__u64 size;770/**771* Returned handle for the object.772*773* Object handles are nonzero.774*/775__u32 handle;776__u32 pad;777};778779struct drm_i915_gem_pread {780/** Handle for the object being read. */781__u32 handle;782__u32 pad;783/** Offset into the object to read from */784__u64 offset;785/** Length of data to read */786__u64 size;787/**788* Pointer to write the data into.789*790* This is a fixed-size type for 32/64 compatibility.791*/792__u64 data_ptr;793};794795struct drm_i915_gem_pwrite {796/** Handle for the object being written to. */797__u32 handle;798__u32 pad;799/** Offset into the object to write to */800__u64 offset;801/** Length of data to write */802__u64 size;803/**804* Pointer to read the data from.805*806* This is a fixed-size type for 32/64 compatibility.807*/808__u64 data_ptr;809};810811struct drm_i915_gem_mmap {812/** Handle for the object being mapped. */813__u32 handle;814__u32 pad;815/** Offset in the object to map. */816__u64 offset;817/**818* Length of data to map.819*820* The value will be page-aligned.821*/822__u64 size;823/**824* Returned pointer the data was mapped at.825*826* This is a fixed-size type for 32/64 compatibility.827*/828__u64 addr_ptr;829830/**831* Flags for extended behaviour.832*833* Added in version 2.834*/835__u64 flags;836#define I915_MMAP_WC 0x1837};838839struct drm_i915_gem_mmap_gtt {840/** Handle for the object being mapped. */841__u32 handle;842__u32 pad;843/**844* Fake offset to use for subsequent mmap call845*846* This is a fixed-size type for 32/64 compatibility.847*/848__u64 offset;849};850851struct drm_i915_gem_mmap_offset {852/** Handle for the object being mapped. */853__u32 handle;854__u32 pad;855/**856* Fake offset to use for subsequent mmap call857*858* This is a fixed-size type for 32/64 compatibility.859*/860__u64 offset;861862/**863* Flags for extended behaviour.864*865* It is mandatory that one of the MMAP_OFFSET types866* (GTT, WC, WB, UC, etc) should be included.867*/868__u64 flags;869#define I915_MMAP_OFFSET_GTT 0870#define I915_MMAP_OFFSET_WC 1871#define I915_MMAP_OFFSET_WB 2872#define I915_MMAP_OFFSET_UC 3873874/*875* Zero-terminated chain of extensions.876*877* No current extensions defined; mbz.878*/879__u64 extensions;880};881882struct drm_i915_gem_set_domain {883/** Handle for the object */884__u32 handle;885886/** New read domains */887__u32 read_domains;888889/** New write domain */890__u32 write_domain;891};892893struct drm_i915_gem_sw_finish {894/** Handle for the object */895__u32 handle;896};897898struct drm_i915_gem_relocation_entry {899/**900* Handle of the buffer being pointed to by this relocation entry.901*902* It's appealing to make this be an index into the mm_validate_entry903* list to refer to the buffer, but this allows the driver to create904* a relocation list for state buffers and not re-write it per905* exec using the buffer.906*/907__u32 target_handle;908909/**910* Value to be added to the offset of the target buffer to make up911* the relocation entry.912*/913__u32 delta;914915/** Offset in the buffer the relocation entry will be written into */916__u64 offset;917918/**919* Offset value of the target buffer that the relocation entry was last920* written as.921*922* If the buffer has the same offset as last time, we can skip syncing923* and writing the relocation. This value is written back out by924* the execbuffer ioctl when the relocation is written.925*/926__u64 presumed_offset;927928/**929* Target memory domains read by this operation.930*/931__u32 read_domains;932933/**934* Target memory domains written by this operation.935*936* Note that only one domain may be written by the whole937* execbuffer operation, so that where there are conflicts,938* the application will get -EINVAL back.939*/940__u32 write_domain;941};942943/** @{944* Intel memory domains945*946* Most of these just align with the various caches in947* the system and are used to flush and invalidate as948* objects end up cached in different domains.949*/950/** CPU cache */951#define I915_GEM_DOMAIN_CPU 0x00000001952/** Render cache, used by 2D and 3D drawing */953#define I915_GEM_DOMAIN_RENDER 0x00000002954/** Sampler cache, used by texture engine */955#define I915_GEM_DOMAIN_SAMPLER 0x00000004956/** Command queue, used to load batch buffers */957#define I915_GEM_DOMAIN_COMMAND 0x00000008958/** Instruction cache, used by shader programs */959#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010960/** Vertex address cache */961#define I915_GEM_DOMAIN_VERTEX 0x00000020962/** GTT domain - aperture and scanout */963#define I915_GEM_DOMAIN_GTT 0x00000040964/** WC domain - uncached access */965#define I915_GEM_DOMAIN_WC 0x00000080966/** @} */967968struct drm_i915_gem_exec_object {969/**970* User's handle for a buffer to be bound into the GTT for this971* operation.972*/973__u32 handle;974975/** Number of relocations to be performed on this buffer */976__u32 relocation_count;977/**978* Pointer to array of struct drm_i915_gem_relocation_entry containing979* the relocations to be performed in this buffer.980*/981__u64 relocs_ptr;982983/** Required alignment in graphics aperture */984__u64 alignment;985986/**987* Returned value of the updated offset of the object, for future988* presumed_offset writes.989*/990__u64 offset;991};992993/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */994struct drm_i915_gem_execbuffer {995/**996* List of buffers to be validated with their relocations to be997* performend on them.998*999* This is a pointer to an array of struct drm_i915_gem_validate_entry.1000*1001* These buffers must be listed in an order such that all relocations1002* a buffer is performing refer to buffers that have already appeared1003* in the validate list.1004*/1005__u64 buffers_ptr;1006__u32 buffer_count;10071008/** Offset in the batchbuffer to start execution from. */1009__u32 batch_start_offset;1010/** Bytes used in batchbuffer from batch_start_offset */1011__u32 batch_len;1012__u32 DR1;1013__u32 DR4;1014__u32 num_cliprects;1015/** This is a struct drm_clip_rect *cliprects */1016__u64 cliprects_ptr;1017};10181019struct drm_i915_gem_exec_object2 {1020/**1021* User's handle for a buffer to be bound into the GTT for this1022* operation.1023*/1024__u32 handle;10251026/** Number of relocations to be performed on this buffer */1027__u32 relocation_count;1028/**1029* Pointer to array of struct drm_i915_gem_relocation_entry containing1030* the relocations to be performed in this buffer.1031*/1032__u64 relocs_ptr;10331034/** Required alignment in graphics aperture */1035__u64 alignment;10361037/**1038* When the EXEC_OBJECT_PINNED flag is specified this is populated by1039* the user with the GTT offset at which this object will be pinned.1040* When the I915_EXEC_NO_RELOC flag is specified this must contain the1041* presumed_offset of the object.1042* During execbuffer2 the kernel populates it with the value of the1043* current GTT offset of the object, for future presumed_offset writes.1044*/1045__u64 offset;10461047#define EXEC_OBJECT_NEEDS_FENCE (1<<0)1048#define EXEC_OBJECT_NEEDS_GTT (1<<1)1049#define EXEC_OBJECT_WRITE (1<<2)1050#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)1051#define EXEC_OBJECT_PINNED (1<<4)1052#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)1053/* The kernel implicitly tracks GPU activity on all GEM objects, and1054* synchronises operations with outstanding rendering. This includes1055* rendering on other devices if exported via dma-buf. However, sometimes1056* this tracking is too coarse and the user knows better. For example,1057* if the object is split into non-overlapping ranges shared between different1058* clients or engines (i.e. suballocating objects), the implicit tracking1059* by kernel assumes that each operation affects the whole object rather1060* than an individual range, causing needless synchronisation between clients.1061* The kernel will also forgo any CPU cache flushes prior to rendering from1062* the object as the client is expected to be also handling such domain1063* tracking.1064*1065* The kernel maintains the implicit tracking in order to manage resources1066* used by the GPU - this flag only disables the synchronisation prior to1067* rendering with this object in this execbuf.1068*1069* Opting out of implicit synhronisation requires the user to do its own1070* explicit tracking to avoid rendering corruption. See, for example,1071* I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.1072*/1073#define EXEC_OBJECT_ASYNC (1<<6)1074/* Request that the contents of this execobject be copied into the error1075* state upon a GPU hang involving this batch for post-mortem debugging.1076* These buffers are recorded in no particular order as "user" in1077* /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see1078* if the kernel supports this flag.1079*/1080#define EXEC_OBJECT_CAPTURE (1<<7)1081/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */1082#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)1083__u64 flags;10841085union {1086__u64 rsvd1;1087__u64 pad_to_size;1088};1089__u64 rsvd2;1090};10911092struct drm_i915_gem_exec_fence {1093/**1094* User's handle for a drm_syncobj to wait on or signal.1095*/1096__u32 handle;10971098#define I915_EXEC_FENCE_WAIT (1<<0)1099#define I915_EXEC_FENCE_SIGNAL (1<<1)1100#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))1101__u32 flags;1102};11031104/*1105* See drm_i915_gem_execbuffer_ext_timeline_fences.1106*/1107#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 011081109/*1110* This structure describes an array of drm_syncobj and associated points for1111* timeline variants of drm_syncobj. It is invalid to append this structure to1112* the execbuf if I915_EXEC_FENCE_ARRAY is set.1113*/1114struct drm_i915_gem_execbuffer_ext_timeline_fences {1115struct i915_user_extension base;11161117/**1118* Number of element in the handles_ptr & value_ptr arrays.1119*/1120__u64 fence_count;11211122/**1123* Pointer to an array of struct drm_i915_gem_exec_fence of length1124* fence_count.1125*/1126__u64 handles_ptr;11271128/**1129* Pointer to an array of u64 values of length fence_count. Values1130* must be 0 for a binary drm_syncobj. A Value of 0 for a timeline1131* drm_syncobj is invalid as it turns a drm_syncobj into a binary one.1132*/1133__u64 values_ptr;1134};11351136struct drm_i915_gem_execbuffer2 {1137/**1138* List of gem_exec_object2 structs1139*/1140__u64 buffers_ptr;1141__u32 buffer_count;11421143/** Offset in the batchbuffer to start execution from. */1144__u32 batch_start_offset;1145/** Bytes used in batchbuffer from batch_start_offset */1146__u32 batch_len;1147__u32 DR1;1148__u32 DR4;1149__u32 num_cliprects;1150/**1151* This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY1152* & I915_EXEC_USE_EXTENSIONS are not set.1153*1154* If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array1155* of struct drm_i915_gem_exec_fence and num_cliprects is the length1156* of the array.1157*1158* If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a1159* single struct i915_user_extension and num_cliprects is 0.1160*/1161__u64 cliprects_ptr;1162#define I915_EXEC_RING_MASK (0x3f)1163#define I915_EXEC_DEFAULT (0<<0)1164#define I915_EXEC_RENDER (1<<0)1165#define I915_EXEC_BSD (2<<0)1166#define I915_EXEC_BLT (3<<0)1167#define I915_EXEC_VEBOX (4<<0)11681169/* Used for switching the constants addressing mode on gen4+ RENDER ring.1170* Gen6+ only supports relative addressing to dynamic state (default) and1171* absolute addressing.1172*1173* These flags are ignored for the BSD and BLT rings.1174*/1175#define I915_EXEC_CONSTANTS_MASK (3<<6)1176#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */1177#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)1178#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */1179__u64 flags;1180__u64 rsvd1; /* now used for context info */1181__u64 rsvd2;1182};11831184/** Resets the SO write offset registers for transform feedback on gen7. */1185#define I915_EXEC_GEN7_SOL_RESET (1<<8)11861187/** Request a privileged ("secure") batch buffer. Note only available for1188* DRM_ROOT_ONLY | DRM_MASTER processes.1189*/1190#define I915_EXEC_SECURE (1<<9)11911192/** Inform the kernel that the batch is and will always be pinned. This1193* negates the requirement for a workaround to be performed to avoid1194* an incoherent CS (such as can be found on 830/845). If this flag is1195* not passed, the kernel will endeavour to make sure the batch is1196* coherent with the CS before execution. If this flag is passed,1197* userspace assumes the responsibility for ensuring the same.1198*/1199#define I915_EXEC_IS_PINNED (1<<10)12001201/** Provide a hint to the kernel that the command stream and auxiliary1202* state buffers already holds the correct presumed addresses and so the1203* relocation process may be skipped if no buffers need to be moved in1204* preparation for the execbuffer.1205*/1206#define I915_EXEC_NO_RELOC (1<<11)12071208/** Use the reloc.handle as an index into the exec object array rather1209* than as the per-file handle.1210*/1211#define I915_EXEC_HANDLE_LUT (1<<12)12121213/** Used for switching BSD rings on the platforms with two BSD rings */1214#define I915_EXEC_BSD_SHIFT (13)1215#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)1216/* default ping-pong mode */1217#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)1218#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)1219#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)12201221/** Tell the kernel that the batchbuffer is processed by1222* the resource streamer.1223*/1224#define I915_EXEC_RESOURCE_STREAMER (1<<15)12251226/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent1227* a sync_file fd to wait upon (in a nonblocking manner) prior to executing1228* the batch.1229*1230* Returns -EINVAL if the sync_file fd cannot be found.1231*/1232#define I915_EXEC_FENCE_IN (1<<16)12331234/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd1235* in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given1236* to the caller, and it should be close() after use. (The fd is a regular1237* file descriptor and will be cleaned up on process termination. It holds1238* a reference to the request, but nothing else.)1239*1240* The sync_file fd can be combined with other sync_file and passed either1241* to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip1242* will only occur after this request completes), or to other devices.1243*1244* Using I915_EXEC_FENCE_OUT requires use of1245* DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written1246* back to userspace. Failure to do so will cause the out-fence to always1247* be reported as zero, and the real fence fd to be leaked.1248*/1249#define I915_EXEC_FENCE_OUT (1<<17)12501251/*1252* Traditionally the execbuf ioctl has only considered the final element in1253* the execobject[] to be the executable batch. Often though, the client1254* will known the batch object prior to construction and being able to place1255* it into the execobject[] array first can simplify the relocation tracking.1256* Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the1257* execobject[] as the * batch instead (the default is to use the last1258* element).1259*/1260#define I915_EXEC_BATCH_FIRST (1<<18)12611262/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr1263* define an array of i915_gem_exec_fence structures which specify a set of1264* dma fences to wait upon or signal.1265*/1266#define I915_EXEC_FENCE_ARRAY (1<<19)12671268/*1269* Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent1270* a sync_file fd to wait upon (in a nonblocking manner) prior to executing1271* the batch.1272*1273* Returns -EINVAL if the sync_file fd cannot be found.1274*/1275#define I915_EXEC_FENCE_SUBMIT (1 << 20)12761277/*1278* Setting I915_EXEC_USE_EXTENSIONS implies that1279* drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked1280* list of i915_user_extension. Each i915_user_extension node is the base of a1281* larger structure. The list of supported structures are listed in the1282* drm_i915_gem_execbuffer_ext enum.1283*/1284#define I915_EXEC_USE_EXTENSIONS (1 << 21)12851286#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))12871288#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)1289#define i915_execbuffer2_set_context_id(eb2, context) \1290(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK1291#define i915_execbuffer2_get_context_id(eb2) \1292((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)12931294struct drm_i915_gem_pin {1295/** Handle of the buffer to be pinned. */1296__u32 handle;1297__u32 pad;12981299/** alignment required within the aperture */1300__u64 alignment;13011302/** Returned GTT offset of the buffer. */1303__u64 offset;1304};13051306struct drm_i915_gem_unpin {1307/** Handle of the buffer to be unpinned. */1308__u32 handle;1309__u32 pad;1310};13111312struct drm_i915_gem_busy {1313/** Handle of the buffer to check for busy */1314__u32 handle;13151316/** Return busy status1317*1318* A return of 0 implies that the object is idle (after1319* having flushed any pending activity), and a non-zero return that1320* the object is still in-flight on the GPU. (The GPU has not yet1321* signaled completion for all pending requests that reference the1322* object.) An object is guaranteed to become idle eventually (so1323* long as no new GPU commands are executed upon it). Due to the1324* asynchronous nature of the hardware, an object reported1325* as busy may become idle before the ioctl is completed.1326*1327* Furthermore, if the object is busy, which engine is busy is only1328* provided as a guide and only indirectly by reporting its class1329* (there may be more than one engine in each class). There are race1330* conditions which prevent the report of which engines are busy from1331* being always accurate. However, the converse is not true. If the1332* object is idle, the result of the ioctl, that all engines are idle,1333* is accurate.1334*1335* The returned dword is split into two fields to indicate both1336* the engine classess on which the object is being read, and the1337* engine class on which it is currently being written (if any).1338*1339* The low word (bits 0:15) indicate if the object is being written1340* to by any engine (there can only be one, as the GEM implicit1341* synchronisation rules force writes to be serialised). Only the1342* engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as1343* 1 not 0 etc) for the last write is reported.1344*1345* The high word (bits 16:31) are a bitmask of which engines classes1346* are currently reading from the object. Multiple engines may be1347* reading from the object simultaneously.1348*1349* The value of each engine class is the same as specified in the1350* I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.1351* I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.1352* reported as active itself. Some hardware may have parallel1353* execution engines, e.g. multiple media engines, which are1354* mapped to the same class identifier and so are not separately1355* reported for busyness.1356*1357* Caveat emptor:1358* Only the boolean result of this query is reliable; that is whether1359* the object is idle or busy. The report of which engines are busy1360* should be only used as a heuristic.1361*/1362__u32 busy;1363};13641365/**1366* I915_CACHING_NONE1367*1368* GPU access is not coherent with cpu caches. Default for machines without an1369* LLC.1370*/1371#define I915_CACHING_NONE 01372/**1373* I915_CACHING_CACHED1374*1375* GPU access is coherent with cpu caches and furthermore the data is cached in1376* last-level caches shared between cpu cores and the gpu GT. Default on1377* machines with HAS_LLC.1378*/1379#define I915_CACHING_CACHED 11380/**1381* I915_CACHING_DISPLAY1382*1383* Special GPU caching mode which is coherent with the scanout engines.1384* Transparently falls back to I915_CACHING_NONE on platforms where no special1385* cache mode (like write-through or gfdt flushing) is available. The kernel1386* automatically sets this mode when using a buffer as a scanout target.1387* Userspace can manually set this mode to avoid a costly stall and clflush in1388* the hotpath of drawing the first frame.1389*/1390#define I915_CACHING_DISPLAY 213911392struct drm_i915_gem_caching {1393/**1394* Handle of the buffer to set/get the caching level of. */1395__u32 handle;13961397/**1398* Cacheing level to apply or return value1399*1400* bits0-15 are for generic caching control (i.e. the above defined1401* values). bits16-31 are reserved for platform-specific variations1402* (e.g. l3$ caching on gen7). */1403__u32 caching;1404};14051406#define I915_TILING_NONE 01407#define I915_TILING_X 11408#define I915_TILING_Y 21409#define I915_TILING_LAST I915_TILING_Y14101411#define I915_BIT_6_SWIZZLE_NONE 01412#define I915_BIT_6_SWIZZLE_9 11413#define I915_BIT_6_SWIZZLE_9_10 21414#define I915_BIT_6_SWIZZLE_9_11 31415#define I915_BIT_6_SWIZZLE_9_10_11 41416/* Not seen by userland */1417#define I915_BIT_6_SWIZZLE_UNKNOWN 51418/* Seen by userland. */1419#define I915_BIT_6_SWIZZLE_9_17 61420#define I915_BIT_6_SWIZZLE_9_10_17 714211422struct drm_i915_gem_set_tiling {1423/** Handle of the buffer to have its tiling state updated */1424__u32 handle;14251426/**1427* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,1428* I915_TILING_Y).1429*1430* This value is to be set on request, and will be updated by the1431* kernel on successful return with the actual chosen tiling layout.1432*1433* The tiling mode may be demoted to I915_TILING_NONE when the system1434* has bit 6 swizzling that can't be managed correctly by GEM.1435*1436* Buffer contents become undefined when changing tiling_mode.1437*/1438__u32 tiling_mode;14391440/**1441* Stride in bytes for the object when in I915_TILING_X or1442* I915_TILING_Y.1443*/1444__u32 stride;14451446/**1447* Returned address bit 6 swizzling required for CPU access through1448* mmap mapping.1449*/1450__u32 swizzle_mode;1451};14521453struct drm_i915_gem_get_tiling {1454/** Handle of the buffer to get tiling state for. */1455__u32 handle;14561457/**1458* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,1459* I915_TILING_Y).1460*/1461__u32 tiling_mode;14621463/**1464* Returned address bit 6 swizzling required for CPU access through1465* mmap mapping.1466*/1467__u32 swizzle_mode;14681469/**1470* Returned address bit 6 swizzling required for CPU access through1471* mmap mapping whilst bound.1472*/1473__u32 phys_swizzle_mode;1474};14751476struct drm_i915_gem_get_aperture {1477/** Total size of the aperture used by i915_gem_execbuffer, in bytes */1478__u64 aper_size;14791480/**1481* Available space in the aperture used by i915_gem_execbuffer, in1482* bytes1483*/1484__u64 aper_available_size;1485};14861487struct drm_i915_get_pipe_from_crtc_id {1488/** ID of CRTC being requested **/1489__u32 crtc_id;14901491/** pipe of requested CRTC **/1492__u32 pipe;1493};14941495#define I915_MADV_WILLNEED 01496#define I915_MADV_DONTNEED 11497#define __I915_MADV_PURGED 2 /* internal state */14981499struct drm_i915_gem_madvise {1500/** Handle of the buffer to change the backing store advice */1501__u32 handle;15021503/* Advice: either the buffer will be needed again in the near future,1504* or wont be and could be discarded under memory pressure.1505*/1506__u32 madv;15071508/** Whether the backing store still exists. */1509__u32 retained;1510};15111512/* flags */1513#define I915_OVERLAY_TYPE_MASK 0xff1514#define I915_OVERLAY_YUV_PLANAR 0x011515#define I915_OVERLAY_YUV_PACKED 0x021516#define I915_OVERLAY_RGB 0x0315171518#define I915_OVERLAY_DEPTH_MASK 0xff001519#define I915_OVERLAY_RGB24 0x10001520#define I915_OVERLAY_RGB16 0x20001521#define I915_OVERLAY_RGB15 0x30001522#define I915_OVERLAY_YUV422 0x01001523#define I915_OVERLAY_YUV411 0x02001524#define I915_OVERLAY_YUV420 0x03001525#define I915_OVERLAY_YUV410 0x040015261527#define I915_OVERLAY_SWAP_MASK 0xff00001528#define I915_OVERLAY_NO_SWAP 0x0000001529#define I915_OVERLAY_UV_SWAP 0x0100001530#define I915_OVERLAY_Y_SWAP 0x0200001531#define I915_OVERLAY_Y_AND_UV_SWAP 0x03000015321533#define I915_OVERLAY_FLAGS_MASK 0xff0000001534#define I915_OVERLAY_ENABLE 0x0100000015351536struct drm_intel_overlay_put_image {1537/* various flags and src format description */1538__u32 flags;1539/* source picture description */1540__u32 bo_handle;1541/* stride values and offsets are in bytes, buffer relative */1542__u16 stride_Y; /* stride for packed formats */1543__u16 stride_UV;1544__u32 offset_Y; /* offset for packet formats */1545__u32 offset_U;1546__u32 offset_V;1547/* in pixels */1548__u16 src_width;1549__u16 src_height;1550/* to compensate the scaling factors for partially covered surfaces */1551__u16 src_scan_width;1552__u16 src_scan_height;1553/* output crtc description */1554__u32 crtc_id;1555__u16 dst_x;1556__u16 dst_y;1557__u16 dst_width;1558__u16 dst_height;1559};15601561/* flags */1562#define I915_OVERLAY_UPDATE_ATTRS (1<<0)1563#define I915_OVERLAY_UPDATE_GAMMA (1<<1)1564#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)1565struct drm_intel_overlay_attrs {1566__u32 flags;1567__u32 color_key;1568__s32 brightness;1569__u32 contrast;1570__u32 saturation;1571__u32 gamma0;1572__u32 gamma1;1573__u32 gamma2;1574__u32 gamma3;1575__u32 gamma4;1576__u32 gamma5;1577};15781579/*1580* Intel sprite handling1581*1582* Color keying works with a min/mask/max tuple. Both source and destination1583* color keying is allowed.1584*1585* Source keying:1586* Sprite pixels within the min & max values, masked against the color channels1587* specified in the mask field, will be transparent. All other pixels will1588* be displayed on top of the primary plane. For RGB surfaces, only the min1589* and mask fields will be used; ranged compares are not allowed.1590*1591* Destination keying:1592* Primary plane pixels that match the min value, masked against the color1593* channels specified in the mask field, will be replaced by corresponding1594* pixels from the sprite plane.1595*1596* Note that source & destination keying are exclusive; only one can be1597* active on a given plane.1598*/15991600#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set1601* flags==0 to disable colorkeying.1602*/1603#define I915_SET_COLORKEY_DESTINATION (1<<1)1604#define I915_SET_COLORKEY_SOURCE (1<<2)1605struct drm_intel_sprite_colorkey {1606__u32 plane_id;1607__u32 min_value;1608__u32 channel_mask;1609__u32 max_value;1610__u32 flags;1611};16121613struct drm_i915_gem_wait {1614/** Handle of BO we shall wait on */1615__u32 bo_handle;1616__u32 flags;1617/** Number of nanoseconds to wait, Returns time remaining. */1618__s64 timeout_ns;1619};16201621struct drm_i915_gem_context_create {1622__u32 ctx_id; /* output: id of new context*/1623__u32 pad;1624};16251626struct drm_i915_gem_context_create_ext {1627__u32 ctx_id; /* output: id of new context*/1628__u32 flags;1629#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)1630#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)1631#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \1632(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))1633__u64 extensions;1634};16351636struct drm_i915_gem_context_param {1637__u32 ctx_id;1638__u32 size;1639__u64 param;1640#define I915_CONTEXT_PARAM_BAN_PERIOD 0x11641#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x21642#define I915_CONTEXT_PARAM_GTT_SIZE 0x31643#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x41644#define I915_CONTEXT_PARAM_BANNABLE 0x51645#define I915_CONTEXT_PARAM_PRIORITY 0x61646#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */1647#define I915_CONTEXT_DEFAULT_PRIORITY 01648#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */1649/*1650* When using the following param, value should be a pointer to1651* drm_i915_gem_context_param_sseu.1652*/1653#define I915_CONTEXT_PARAM_SSEU 0x716541655/*1656* Not all clients may want to attempt automatic recover of a context after1657* a hang (for example, some clients may only submit very small incremental1658* batches relying on known logical state of previous batches which will never1659* recover correctly and each attempt will hang), and so would prefer that1660* the context is forever banned instead.1661*1662* If set to false (0), after a reset, subsequent (and in flight) rendering1663* from this context is discarded, and the client will need to create a new1664* context to use instead.1665*1666* If set to true (1), the kernel will automatically attempt to recover the1667* context by skipping the hanging batch and executing the next batch starting1668* from the default context state (discarding the incomplete logical context1669* state lost due to the reset).1670*1671* On creation, all new contexts are marked as recoverable.1672*/1673#define I915_CONTEXT_PARAM_RECOVERABLE 0x816741675/*1676* The id of the associated virtual memory address space (ppGTT) of1677* this context. Can be retrieved and passed to another context1678* (on the same fd) for both to use the same ppGTT and so share1679* address layouts, and avoid reloading the page tables on context1680* switches between themselves.1681*1682* See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.1683*/1684#define I915_CONTEXT_PARAM_VM 0x916851686/*1687* I915_CONTEXT_PARAM_ENGINES:1688*1689* Bind this context to operate on this subset of available engines. Henceforth,1690* the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as1691* an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]1692* and upwards. Slots 0...N are filled in using the specified (class, instance).1693* Use1694* engine_class: I915_ENGINE_CLASS_INVALID,1695* engine_instance: I915_ENGINE_CLASS_INVALID_NONE1696* to specify a gap in the array that can be filled in later, e.g. by a1697* virtual engine used for load balancing.1698*1699* Setting the number of engines bound to the context to 0, by passing a zero1700* sized argument, will revert back to default settings.1701*1702* See struct i915_context_param_engines.1703*1704* Extensions:1705* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)1706* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)1707*/1708#define I915_CONTEXT_PARAM_ENGINES 0xa17091710/*1711* I915_CONTEXT_PARAM_PERSISTENCE:1712*1713* Allow the context and active rendering to survive the process until1714* completion. Persistence allows fire-and-forget clients to queue up a1715* bunch of work, hand the output over to a display server and then quit.1716* If the context is marked as not persistent, upon closing (either via1717* an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure1718* or process termination), the context and any outstanding requests will be1719* cancelled (and exported fences for cancelled requests marked as -EIO).1720*1721* By default, new contexts allow persistence.1722*/1723#define I915_CONTEXT_PARAM_PERSISTENCE 0xb17241725/*1726* I915_CONTEXT_PARAM_RINGSIZE:1727*1728* Sets the size of the CS ringbuffer to use for logical ring contexts. This1729* applies a limit of how many batches can be queued to HW before the caller1730* is blocked due to lack of space for more commands.1731*1732* Only reliably possible to be set prior to first use, i.e. during1733* construction. At any later point, the current execution must be flushed as1734* the ring can only be changed while the context is idle. Note, the ringsize1735* can be specified as a constructor property, see1736* I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.1737*1738* Only applies to the current set of engine and lost when those engines1739* are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).1740*1741* Must be between 4 - 512 KiB, in intervals of page size [4 KiB].1742* Default is 16 KiB.1743*/1744#define I915_CONTEXT_PARAM_RINGSIZE 0xc1745/* Must be kept compact -- no holes and well documented */17461747__u64 value;1748};17491750/*1751* Context SSEU programming1752*1753* It may be necessary for either functional or performance reason to configure1754* a context to run with a reduced number of SSEU (where SSEU stands for Slice/1755* Sub-slice/EU).1756*1757* This is done by configuring SSEU configuration using the below1758* @struct drm_i915_gem_context_param_sseu for every supported engine which1759* userspace intends to use.1760*1761* Not all GPUs or engines support this functionality in which case an error1762* code -ENODEV will be returned.1763*1764* Also, flexibility of possible SSEU configuration permutations varies between1765* GPU generations and software imposed limitations. Requesting such a1766* combination will return an error code of -EINVAL.1767*1768* NOTE: When perf/OA is active the context's SSEU configuration is ignored in1769* favour of a single global setting.1770*/1771struct drm_i915_gem_context_param_sseu {1772/*1773* Engine class & instance to be configured or queried.1774*/1775struct i915_engine_class_instance engine;17761777/*1778* Unknown flags must be cleared to zero.1779*/1780__u32 flags;1781#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)17821783/*1784* Mask of slices to enable for the context. Valid values are a subset1785* of the bitmask value returned for I915_PARAM_SLICE_MASK.1786*/1787__u64 slice_mask;17881789/*1790* Mask of subslices to enable for the context. Valid values are a1791* subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.1792*/1793__u64 subslice_mask;17941795/*1796* Minimum/Maximum number of EUs to enable per subslice for the1797* context. min_eus_per_subslice must be inferior or equal to1798* max_eus_per_subslice.1799*/1800__u16 min_eus_per_subslice;1801__u16 max_eus_per_subslice;18021803/*1804* Unused for now. Must be cleared to zero.1805*/1806__u32 rsvd;1807};18081809/*1810* i915_context_engines_load_balance:1811*1812* Enable load balancing across this set of engines.1813*1814* Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when1815* used will proxy the execbuffer request onto one of the set of engines1816* in such a way as to distribute the load evenly across the set.1817*1818* The set of engines must be compatible (e.g. the same HW class) as they1819* will share the same logical GPU context and ring.1820*1821* To intermix rendering with the virtual engine and direct rendering onto1822* the backing engines (bypassing the load balancing proxy), the context must1823* be defined to use a single timeline for all engines.1824*/1825struct i915_context_engines_load_balance {1826struct i915_user_extension base;18271828__u16 engine_index;1829__u16 num_siblings;1830__u32 flags; /* all undefined flags must be zero */18311832__u64 mbz64; /* reserved for future use; must be zero */18331834struct i915_engine_class_instance engines[0];1835} __attribute__((packed));18361837#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \1838struct i915_user_extension base; \1839__u16 engine_index; \1840__u16 num_siblings; \1841__u32 flags; \1842__u64 mbz64; \1843struct i915_engine_class_instance engines[N__]; \1844} __attribute__((packed)) name__18451846/*1847* i915_context_engines_bond:1848*1849* Constructed bonded pairs for execution within a virtual engine.1850*1851* All engines are equal, but some are more equal than others. Given1852* the distribution of resources in the HW, it may be preferable to run1853* a request on a given subset of engines in parallel to a request on a1854* specific engine. We enable this selection of engines within a virtual1855* engine by specifying bonding pairs, for any given master engine we will1856* only execute on one of the corresponding siblings within the virtual engine.1857*1858* To execute a request in parallel on the master engine and a sibling requires1859* coordination with a I915_EXEC_FENCE_SUBMIT.1860*/1861struct i915_context_engines_bond {1862struct i915_user_extension base;18631864struct i915_engine_class_instance master;18651866__u16 virtual_index; /* index of virtual engine in ctx->engines[] */1867__u16 num_bonds;18681869__u64 flags; /* all undefined flags must be zero */1870__u64 mbz64[4]; /* reserved for future use; must be zero */18711872struct i915_engine_class_instance engines[0];1873} __attribute__((packed));18741875#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \1876struct i915_user_extension base; \1877struct i915_engine_class_instance master; \1878__u16 virtual_index; \1879__u16 num_bonds; \1880__u64 flags; \1881__u64 mbz64[4]; \1882struct i915_engine_class_instance engines[N__]; \1883} __attribute__((packed)) name__18841885struct i915_context_param_engines {1886__u64 extensions; /* linked chain of extension blocks, 0 terminates */1887#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */1888#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */1889struct i915_engine_class_instance engines[0];1890} __attribute__((packed));18911892#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \1893__u64 extensions; \1894struct i915_engine_class_instance engines[N__]; \1895} __attribute__((packed)) name__18961897struct drm_i915_gem_context_create_ext_setparam {1898#define I915_CONTEXT_CREATE_EXT_SETPARAM 01899struct i915_user_extension base;1900struct drm_i915_gem_context_param param;1901};19021903struct drm_i915_gem_context_create_ext_clone {1904#define I915_CONTEXT_CREATE_EXT_CLONE 11905struct i915_user_extension base;1906__u32 clone_id;1907__u32 flags;1908#define I915_CONTEXT_CLONE_ENGINES (1u << 0)1909#define I915_CONTEXT_CLONE_FLAGS (1u << 1)1910#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)1911#define I915_CONTEXT_CLONE_SSEU (1u << 3)1912#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)1913#define I915_CONTEXT_CLONE_VM (1u << 5)1914#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)1915__u64 rsvd;1916};19171918struct drm_i915_gem_context_destroy {1919__u32 ctx_id;1920__u32 pad;1921};19221923/*1924* DRM_I915_GEM_VM_CREATE -1925*1926* Create a new virtual memory address space (ppGTT) for use within a context1927* on the same file. Extensions can be provided to configure exactly how the1928* address space is setup upon creation.1929*1930* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is1931* returned in the outparam @id.1932*1933* No flags are defined, with all bits reserved and must be zero.1934*1935* An extension chain maybe provided, starting with @extensions, and terminated1936* by the @next_extension being 0. Currently, no extensions are defined.1937*1938* DRM_I915_GEM_VM_DESTROY -1939*1940* Destroys a previously created VM id, specified in @id.1941*1942* No extensions or flags are allowed currently, and so must be zero.1943*/1944struct drm_i915_gem_vm_control {1945__u64 extensions;1946__u32 flags;1947__u32 vm_id;1948};19491950struct drm_i915_reg_read {1951/*1952* Register offset.1953* For 64bit wide registers where the upper 32bits don't immediately1954* follow the lower 32bits, the offset of the lower 32bits must1955* be specified1956*/1957__u64 offset;1958#define I915_REG_READ_8B_WA (1ul << 0)19591960__u64 val; /* Return value */1961};19621963/* Known registers:1964*1965* Render engine timestamp - 0x2358 + 64bit - gen7+1966* - Note this register returns an invalid value if using the default1967* single instruction 8byte read, in order to workaround that pass1968* flag I915_REG_READ_8B_WA in offset field.1969*1970*/19711972struct drm_i915_reset_stats {1973__u32 ctx_id;1974__u32 flags;19751976/* All resets since boot/module reload, for all contexts */1977__u32 reset_count;19781979/* Number of batches lost when active in GPU, for this context */1980__u32 batch_active;19811982/* Number of batches lost pending for execution, for this context */1983__u32 batch_pending;19841985__u32 pad;1986};19871988struct drm_i915_gem_userptr {1989__u64 user_ptr;1990__u64 user_size;1991__u32 flags;1992#define I915_USERPTR_READ_ONLY 0x11993#define I915_USERPTR_UNSYNCHRONIZED 0x800000001994/**1995* Returned handle for the object.1996*1997* Object handles are nonzero.1998*/1999__u32 handle;2000};20012002enum drm_i915_oa_format {2003I915_OA_FORMAT_A13 = 1, /* HSW only */2004I915_OA_FORMAT_A29, /* HSW only */2005I915_OA_FORMAT_A13_B8_C8, /* HSW only */2006I915_OA_FORMAT_B4_C8, /* HSW only */2007I915_OA_FORMAT_A45_B8_C8, /* HSW only */2008I915_OA_FORMAT_B4_C8_A16, /* HSW only */2009I915_OA_FORMAT_C4_B8, /* HSW+ */20102011/* Gen8+ */2012I915_OA_FORMAT_A12,2013I915_OA_FORMAT_A12_B8_C8,2014I915_OA_FORMAT_A32u40_A4u32_B8_C8,20152016I915_OA_FORMAT_MAX /* non-ABI */2017};20182019enum drm_i915_perf_property_id {2020/**2021* Open the stream for a specific context handle (as used with2022* execbuffer2). A stream opened for a specific context this way2023* won't typically require root privileges.2024*2025* This property is available in perf revision 1.2026*/2027DRM_I915_PERF_PROP_CTX_HANDLE = 1,20282029/**2030* A value of 1 requests the inclusion of raw OA unit reports as2031* part of stream samples.2032*2033* This property is available in perf revision 1.2034*/2035DRM_I915_PERF_PROP_SAMPLE_OA,20362037/**2038* The value specifies which set of OA unit metrics should be2039* configured, defining the contents of any OA unit reports.2040*2041* This property is available in perf revision 1.2042*/2043DRM_I915_PERF_PROP_OA_METRICS_SET,20442045/**2046* The value specifies the size and layout of OA unit reports.2047*2048* This property is available in perf revision 1.2049*/2050DRM_I915_PERF_PROP_OA_FORMAT,20512052/**2053* Specifying this property implicitly requests periodic OA unit2054* sampling and (at least on Haswell) the sampling frequency is derived2055* from this exponent as follows:2056*2057* 80ns * 2^(period_exponent + 1)2058*2059* This property is available in perf revision 1.2060*/2061DRM_I915_PERF_PROP_OA_EXPONENT,20622063/**2064* Specifying this property is only valid when specify a context to2065* filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property2066* will hold preemption of the particular context we want to gather2067* performance data about. The execbuf2 submissions must include a2068* drm_i915_gem_execbuffer_ext_perf parameter for this to apply.2069*2070* This property is available in perf revision 3.2071*/2072DRM_I915_PERF_PROP_HOLD_PREEMPTION,20732074/**2075* Specifying this pins all contexts to the specified SSEU power2076* configuration for the duration of the recording.2077*2078* This parameter's value is a pointer to a struct2079* drm_i915_gem_context_param_sseu.2080*2081* This property is available in perf revision 4.2082*/2083DRM_I915_PERF_PROP_GLOBAL_SSEU,20842085/**2086* This optional parameter specifies the timer interval in nanoseconds2087* at which the i915 driver will check the OA buffer for available data.2088* Minimum allowed value is 100 microseconds. A default value is used by2089* the driver if this parameter is not specified. Note that larger timer2090* values will reduce cpu consumption during OA perf captures. However,2091* excessively large values would potentially result in OA buffer2092* overwrites as captures reach end of the OA buffer.2093*2094* This property is available in perf revision 5.2095*/2096DRM_I915_PERF_PROP_POLL_OA_PERIOD,20972098DRM_I915_PERF_PROP_MAX /* non-ABI */2099};21002101struct drm_i915_perf_open_param {2102__u32 flags;2103#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)2104#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)2105#define I915_PERF_FLAG_DISABLED (1<<2)21062107/** The number of u64 (id, value) pairs */2108__u32 num_properties;21092110/**2111* Pointer to array of u64 (id, value) pairs configuring the stream2112* to open.2113*/2114__u64 properties_ptr;2115};21162117/*2118* Enable data capture for a stream that was either opened in a disabled state2119* via I915_PERF_FLAG_DISABLED or was later disabled via2120* I915_PERF_IOCTL_DISABLE.2121*2122* It is intended to be cheaper to disable and enable a stream than it may be2123* to close and re-open a stream with the same configuration.2124*2125* It's undefined whether any pending data for the stream will be lost.2126*2127* This ioctl is available in perf revision 1.2128*/2129#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)21302131/*2132* Disable data capture for a stream.2133*2134* It is an error to try and read a stream that is disabled.2135*2136* This ioctl is available in perf revision 1.2137*/2138#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)21392140/*2141* Change metrics_set captured by a stream.2142*2143* If the stream is bound to a specific context, the configuration change2144* will performed __inline__ with that context such that it takes effect before2145* the next execbuf submission.2146*2147* Returns the previously bound metrics set id, or a negative error code.2148*2149* This ioctl is available in perf revision 2.2150*/2151#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)21522153/*2154* Common to all i915 perf records2155*/2156struct drm_i915_perf_record_header {2157__u32 type;2158__u16 pad;2159__u16 size;2160};21612162enum drm_i915_perf_record_type {21632164/**2165* Samples are the work horse record type whose contents are extensible2166* and defined when opening an i915 perf stream based on the given2167* properties.2168*2169* Boolean properties following the naming convention2170* DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in2171* every sample.2172*2173* The order of these sample properties given by userspace has no2174* affect on the ordering of data within a sample. The order is2175* documented here.2176*2177* struct {2178* struct drm_i915_perf_record_header header;2179*2180* { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA2181* };2182*/2183DRM_I915_PERF_RECORD_SAMPLE = 1,21842185/*2186* Indicates that one or more OA reports were not written by the2187* hardware. This can happen for example if an MI_REPORT_PERF_COUNT2188* command collides with periodic sampling - which would be more likely2189* at higher sampling frequencies.2190*/2191DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,21922193/**2194* An error occurred that resulted in all pending OA reports being lost.2195*/2196DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,21972198DRM_I915_PERF_RECORD_MAX /* non-ABI */2199};22002201/*2202* Structure to upload perf dynamic configuration into the kernel.2203*/2204struct drm_i915_perf_oa_config {2205/** String formatted like "%08x-%04x-%04x-%04x-%012x" */2206char uuid[36];22072208__u32 n_mux_regs;2209__u32 n_boolean_regs;2210__u32 n_flex_regs;22112212/*2213* These fields are pointers to tuples of u32 values (register address,2214* value). For example the expected length of the buffer pointed by2215* mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).2216*/2217__u64 mux_regs_ptr;2218__u64 boolean_regs_ptr;2219__u64 flex_regs_ptr;2220};22212222/**2223* struct drm_i915_query_item - An individual query for the kernel to process.2224*2225* The behaviour is determined by the @query_id. Note that exactly what2226* @data_ptr is also depends on the specific @query_id.2227*/2228struct drm_i915_query_item {2229/** @query_id: The id for this query */2230__u64 query_id;2231#define DRM_I915_QUERY_TOPOLOGY_INFO 12232#define DRM_I915_QUERY_ENGINE_INFO 22233#define DRM_I915_QUERY_PERF_CONFIG 32234#define DRM_I915_QUERY_MEMORY_REGIONS 42235/* Must be kept compact -- no holes and well documented */22362237/**2238* @length:2239*2240* When set to zero by userspace, this is filled with the size of the2241* data to be written at the @data_ptr pointer. The kernel sets this2242* value to a negative value to signal an error on a particular query2243* item.2244*/2245__s32 length;22462247/**2248* @flags:2249*2250* When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.2251*2252* When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the2253* following:2254*2255* - DRM_I915_QUERY_PERF_CONFIG_LIST2256* - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID2257* - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID2258*/2259__u32 flags;2260#define DRM_I915_QUERY_PERF_CONFIG_LIST 12261#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 22262#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 322632264/**2265* @data_ptr:2266*2267* Data will be written at the location pointed by @data_ptr when the2268* value of @length matches the length of the data to be written by the2269* kernel.2270*/2271__u64 data_ptr;2272};22732274/**2275* struct drm_i915_query - Supply an array of struct drm_i915_query_item for the2276* kernel to fill out.2277*2278* Note that this is generally a two step process for each struct2279* drm_i915_query_item in the array:2280*2281* 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct2282* drm_i915_query_item, with &drm_i915_query_item.length set to zero. The2283* kernel will then fill in the size, in bytes, which tells userspace how2284* memory it needs to allocate for the blob(say for an array of properties).2285*2286* 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the2287* &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that2288* the &drm_i915_query_item.length should still be the same as what the2289* kernel previously set. At this point the kernel can fill in the blob.2290*2291* Note that for some query items it can make sense for userspace to just pass2292* in a buffer/blob equal to or larger than the required size. In this case only2293* a single ioctl call is needed. For some smaller query items this can work2294* quite well.2295*2296*/2297struct drm_i915_query {2298/** @num_items: The number of elements in the @items_ptr array */2299__u32 num_items;23002301/**2302* @flags: Unused for now. Must be cleared to zero.2303*/2304__u32 flags;23052306/**2307* @items_ptr:2308*2309* Pointer to an array of struct drm_i915_query_item. The number of2310* array elements is @num_items.2311*/2312__u64 items_ptr;2313};23142315/*2316* Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :2317*2318* data: contains the 3 pieces of information :2319*2320* - the slice mask with one bit per slice telling whether a slice is2321* available. The availability of slice X can be queried with the following2322* formula :2323*2324* (data[X / 8] >> (X % 8)) & 12325*2326* - the subslice mask for each slice with one bit per subslice telling2327* whether a subslice is available. Gen12 has dual-subslices, which are2328* similar to two gen11 subslices. For gen12, this array represents dual-2329* subslices. The availability of subslice Y in slice X can be queried2330* with the following formula :2331*2332* (data[subslice_offset +2333* X * subslice_stride +2334* Y / 8] >> (Y % 8)) & 12335*2336* - the EU mask for each subslice in each slice with one bit per EU telling2337* whether an EU is available. The availability of EU Z in subslice Y in2338* slice X can be queried with the following formula :2339*2340* (data[eu_offset +2341* (X * max_subslices + Y) * eu_stride +2342* Z / 8] >> (Z % 8)) & 12343*/2344struct drm_i915_query_topology_info {2345/*2346* Unused for now. Must be cleared to zero.2347*/2348__u16 flags;23492350__u16 max_slices;2351__u16 max_subslices;2352__u16 max_eus_per_subslice;23532354/*2355* Offset in data[] at which the subslice masks are stored.2356*/2357__u16 subslice_offset;23582359/*2360* Stride at which each of the subslice masks for each slice are2361* stored.2362*/2363__u16 subslice_stride;23642365/*2366* Offset in data[] at which the EU masks are stored.2367*/2368__u16 eu_offset;23692370/*2371* Stride at which each of the EU masks for each subslice are stored.2372*/2373__u16 eu_stride;23742375__u8 data[];2376};23772378/**2379* struct drm_i915_engine_info2380*2381* Describes one engine and it's capabilities as known to the driver.2382*/2383struct drm_i915_engine_info {2384/** @engine: Engine class and instance. */2385struct i915_engine_class_instance engine;23862387/** @rsvd0: Reserved field. */2388__u32 rsvd0;23892390/** @flags: Engine flags. */2391__u64 flags;23922393/** @capabilities: Capabilities of this engine. */2394__u64 capabilities;2395#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)2396#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)23972398/** @rsvd1: Reserved fields. */2399__u64 rsvd1[4];2400};24012402/**2403* struct drm_i915_query_engine_info2404*2405* Engine info query enumerates all engines known to the driver by filling in2406* an array of struct drm_i915_engine_info structures.2407*/2408struct drm_i915_query_engine_info {2409/** @num_engines: Number of struct drm_i915_engine_info structs following. */2410__u32 num_engines;24112412/** @rsvd: MBZ */2413__u32 rsvd[3];24142415/** @engines: Marker for drm_i915_engine_info structures. */2416struct drm_i915_engine_info engines[];2417};24182419/*2420* Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.2421*/2422struct drm_i915_query_perf_config {2423union {2424/*2425* When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets2426* this fields to the number of configurations available.2427*/2428__u64 n_configs;24292430/*2431* When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,2432* i915 will use the value in this field as configuration2433* identifier to decide what data to write into config_ptr.2434*/2435__u64 config;24362437/*2438* When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,2439* i915 will use the value in this field as configuration2440* identifier to decide what data to write into config_ptr.2441*2442* String formatted like "%08x-%04x-%04x-%04x-%012x"2443*/2444char uuid[36];2445};24462447/*2448* Unused for now. Must be cleared to zero.2449*/2450__u32 flags;24512452/*2453* When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will2454* write an array of __u64 of configuration identifiers.2455*2456* When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will2457* write a struct drm_i915_perf_oa_config. If the following fields of2458* drm_i915_perf_oa_config are set not set to 0, i915 will write into2459* the associated pointers the values of submitted when the2460* configuration was created :2461*2462* - n_mux_regs2463* - n_boolean_regs2464* - n_flex_regs2465*/2466__u8 data[];2467};24682469/**2470* enum drm_i915_gem_memory_class - Supported memory classes2471*/2472enum drm_i915_gem_memory_class {2473/** @I915_MEMORY_CLASS_SYSTEM: System memory */2474I915_MEMORY_CLASS_SYSTEM = 0,2475/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */2476I915_MEMORY_CLASS_DEVICE,2477};24782479/**2480* struct drm_i915_gem_memory_class_instance - Identify particular memory region2481*/2482struct drm_i915_gem_memory_class_instance {2483/** @memory_class: See enum drm_i915_gem_memory_class */2484__u16 memory_class;24852486/** @memory_instance: Which instance */2487__u16 memory_instance;2488};24892490/**2491* struct drm_i915_memory_region_info - Describes one region as known to the2492* driver.2493*2494* Note that we reserve some stuff here for potential future work. As an example2495* we might want expose the capabilities for a given region, which could include2496* things like if the region is CPU mappable/accessible, what are the supported2497* mapping types etc.2498*2499* Note that to extend struct drm_i915_memory_region_info and struct2500* drm_i915_query_memory_regions in the future the plan is to do the following:2501*2502* .. code-block:: C2503*2504* struct drm_i915_memory_region_info {2505* struct drm_i915_gem_memory_class_instance region;2506* union {2507* __u32 rsvd0;2508* __u32 new_thing1;2509* };2510* ...2511* union {2512* __u64 rsvd1[8];2513* struct {2514* __u64 new_thing2;2515* __u64 new_thing3;2516* ...2517* };2518* };2519* };2520*2521* With this things should remain source compatible between versions for2522* userspace, even as we add new fields.2523*2524* Note this is using both struct drm_i915_query_item and struct drm_i915_query.2525* For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS2526* at &drm_i915_query_item.query_id.2527*/2528struct drm_i915_memory_region_info {2529/** @region: The class:instance pair encoding */2530struct drm_i915_gem_memory_class_instance region;25312532/** @rsvd0: MBZ */2533__u32 rsvd0;25342535/** @probed_size: Memory probed by the driver (-1 = unknown) */2536__u64 probed_size;25372538/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */2539__u64 unallocated_size;25402541/** @rsvd1: MBZ */2542__u64 rsvd1[8];2543};25442545/**2546* struct drm_i915_query_memory_regions2547*2548* The region info query enumerates all regions known to the driver by filling2549* in an array of struct drm_i915_memory_region_info structures.2550*2551* Example for getting the list of supported regions:2552*2553* .. code-block:: C2554*2555* struct drm_i915_query_memory_regions *info;2556* struct drm_i915_query_item item = {2557* .query_id = DRM_I915_QUERY_MEMORY_REGIONS;2558* };2559* struct drm_i915_query query = {2560* .num_items = 1,2561* .items_ptr = (uintptr_t)&item,2562* };2563* int err, i;2564*2565* // First query the size of the blob we need, this needs to be large2566* // enough to hold our array of regions. The kernel will fill out the2567* // item.length for us, which is the number of bytes we need.2568* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);2569* if (err) ...2570*2571* info = calloc(1, item.length);2572* // Now that we allocated the required number of bytes, we call the ioctl2573* // again, this time with the data_ptr pointing to our newly allocated2574* // blob, which the kernel can then populate with the all the region info.2575* item.data_ptr = (uintptr_t)&info,2576*2577* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);2578* if (err) ...2579*2580* // We can now access each region in the array2581* for (i = 0; i < info->num_regions; i++) {2582* struct drm_i915_memory_region_info mr = info->regions[i];2583* u16 class = mr.region.class;2584* u16 instance = mr.region.instance;2585*2586* ....2587* }2588*2589* free(info);2590*/2591struct drm_i915_query_memory_regions {2592/** @num_regions: Number of supported regions */2593__u32 num_regions;25942595/** @rsvd: MBZ */2596__u32 rsvd[3];25972598/** @regions: Info about each supported region */2599struct drm_i915_memory_region_info regions[];2600};26012602/**2603* struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added2604* extension support using struct i915_user_extension.2605*2606* Note that in the future we want to have our buffer flags here, at least for2607* the stuff that is immutable. Previously we would have two ioctls, one to2608* create the object with gem_create, and another to apply various parameters,2609* however this creates some ambiguity for the params which are considered2610* immutable. Also in general we're phasing out the various SET/GET ioctls.2611*/2612struct drm_i915_gem_create_ext {2613/**2614* @size: Requested size for the object.2615*2616* The (page-aligned) allocated size for the object will be returned.2617*2618* Note that for some devices we have might have further minimum2619* page-size restrictions(larger than 4K), like for device local-memory.2620* However in general the final size here should always reflect any2621* rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS2622* extension to place the object in device local-memory.2623*/2624__u64 size;2625/**2626* @handle: Returned handle for the object.2627*2628* Object handles are nonzero.2629*/2630__u32 handle;2631/** @flags: MBZ */2632__u32 flags;2633/**2634* @extensions: The chain of extensions to apply to this object.2635*2636* This will be useful in the future when we need to support several2637* different extensions, and we need to apply more than one when2638* creating the object. See struct i915_user_extension.2639*2640* If we don't supply any extensions then we get the same old gem_create2641* behaviour.2642*2643* For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see2644* struct drm_i915_gem_create_ext_memory_regions.2645*/2646#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 02647__u64 extensions;2648};26492650/**2651* struct drm_i915_gem_create_ext_memory_regions - The2652* I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.2653*2654* Set the object with the desired set of placements/regions in priority2655* order. Each entry must be unique and supported by the device.2656*2657* This is provided as an array of struct drm_i915_gem_memory_class_instance, or2658* an equivalent layout of class:instance pair encodings. See struct2659* drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to2660* query the supported regions for a device.2661*2662* As an example, on discrete devices, if we wish to set the placement as2663* device local-memory we can do something like:2664*2665* .. code-block:: C2666*2667* struct drm_i915_gem_memory_class_instance region_lmem = {2668* .memory_class = I915_MEMORY_CLASS_DEVICE,2669* .memory_instance = 0,2670* };2671* struct drm_i915_gem_create_ext_memory_regions regions = {2672* .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },2673* .regions = (uintptr_t)®ion_lmem,2674* .num_regions = 1,2675* };2676* struct drm_i915_gem_create_ext create_ext = {2677* .size = 16 * PAGE_SIZE,2678* .extensions = (uintptr_t)®ions,2679* };2680*2681* int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);2682* if (err) ...2683*2684* At which point we get the object handle in &drm_i915_gem_create_ext.handle,2685* along with the final object size in &drm_i915_gem_create_ext.size, which2686* should account for any rounding up, if required.2687*/2688struct drm_i915_gem_create_ext_memory_regions {2689/** @base: Extension link. See struct i915_user_extension. */2690struct i915_user_extension base;26912692/** @pad: MBZ */2693__u32 pad;2694/** @num_regions: Number of elements in the @regions array. */2695__u32 num_regions;2696/**2697* @regions: The regions/placements array.2698*2699* An array of struct drm_i915_gem_memory_class_instance.2700*/2701__u64 regions;2702};27032704#if defined(__cplusplus)2705}2706#endif27072708#endif /* _I915_DRM_H_ */270927102711