/* SPDX-License-Identifier: MIT */1/* Copyright (C) 2023 Collabora ltd. */2#ifndef _PANTHOR_DRM_H_3#define _PANTHOR_DRM_H_45#include "drm.h"67#if defined(__cplusplus)8extern "C" {9#endif1011/**12* DOC: Introduction13*14* This documentation describes the Panthor IOCTLs.15*16* Just a few generic rules about the data passed to the Panthor IOCTLs:17*18* - Structures must be aligned on 64-bit/8-byte. If the object is not19* naturally aligned, a padding field must be added.20* - Fields must be explicitly aligned to their natural type alignment with21* pad[0..N] fields.22* - All padding fields will be checked by the driver to make sure they are23* zeroed.24* - Flags can be added, but not removed/replaced.25* - New fields can be added to the main structures (the structures26* directly passed to the ioctl). Those fields can be added at the end of27* the structure, or replace existing padding fields. Any new field being28* added must preserve the behavior that existed before those fields were29* added when a value of zero is passed.30* - New fields can be added to indirect objects (objects pointed by the31* main structure), iff those objects are passed a size to reflect the32* size known by the userspace driver (see drm_panthor_obj_array::stride33* or drm_panthor_dev_query::size).34* - If the kernel driver is too old to know some fields, those will be35* ignored if zero, and otherwise rejected (and so will be zero on output).36* - If userspace is too old to know some fields, those will be zeroed37* (input) before the structure is parsed by the kernel driver.38* - Each new flag/field addition must come with a driver version update so39* the userspace driver doesn't have to trial and error to know which40* flags are supported.41* - Structures should not contain unions, as this would defeat the42* extensibility of such structures.43* - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed44* at the end of the drm_panthor_ioctl_id enum.45*/4647/**48* DOC: MMIO regions exposed to userspace.49*50* .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET51*52* File offset for all MMIO regions being exposed to userspace. Don't use53* this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.54* pgoffset passed to mmap2() is an unsigned long, which forces us to use a55* different offset on 32-bit and 64-bit systems.56*57* .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET58*59* File offset for the LATEST_FLUSH_ID register. The Userspace driver controls60* GPU cache flushing through CS instructions, but the flush reduction61* mechanism requires a flush_id. This flush_id could be queried with an62* ioctl, but Arm provides a well-isolated register page containing only this63* read-only register, so let's expose this page through a static mmap offset64* and allow direct mapping of this MMIO region so we can avoid the65* user <-> kernel round-trip.66*/67#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)68#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)69#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \70DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \71DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)72#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)7374/**75* DOC: IOCTL IDs76*77* enum drm_panthor_ioctl_id - IOCTL IDs78*79* Place new ioctls at the end, don't re-order, don't replace or remove entries.80*81* These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx82* definitions instead.83*/84enum drm_panthor_ioctl_id {85/** @DRM_PANTHOR_DEV_QUERY: Query device information. */86DRM_PANTHOR_DEV_QUERY = 0,8788/** @DRM_PANTHOR_VM_CREATE: Create a VM. */89DRM_PANTHOR_VM_CREATE,9091/** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */92DRM_PANTHOR_VM_DESTROY,9394/** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */95DRM_PANTHOR_VM_BIND,9697/** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */98DRM_PANTHOR_VM_GET_STATE,99100/** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */101DRM_PANTHOR_BO_CREATE,102103/**104* @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to105* mmap to map a GEM object.106*/107DRM_PANTHOR_BO_MMAP_OFFSET,108109/** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */110DRM_PANTHOR_GROUP_CREATE,111112/** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */113DRM_PANTHOR_GROUP_DESTROY,114115/**116* @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging117* to a specific scheduling group.118*/119DRM_PANTHOR_GROUP_SUBMIT,120121/** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */122DRM_PANTHOR_GROUP_GET_STATE,123124/** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */125DRM_PANTHOR_TILER_HEAP_CREATE,126127/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */128DRM_PANTHOR_TILER_HEAP_DESTROY,129130/** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */131DRM_PANTHOR_BO_SET_LABEL,132133/**134* @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.135*136* The default behavior is to pick the MMIO offset based on the size of the pgoff_t137* type seen by the process that manipulates the FD, such that a 32-bit process can138* always map the user MMIO ranges. But this approach doesn't work well for emulators139* like FEX, where the emulator is an 64-bit binary which might be executing 32-bit140* code. In that case, the kernel thinks it's the 64-bit process and assumes141* DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects142* DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the143* pgoff_t size.144*/145DRM_PANTHOR_SET_USER_MMIO_OFFSET,146};147148/**149* DOC: IOCTL arguments150*/151152/**153* struct drm_panthor_obj_array - Object array.154*155* This object is used to pass an array of objects whose size is subject to changes in156* future versions of the driver. In order to support this mutability, we pass a stride157* describing the size of the object as known by userspace.158*159* You shouldn't fill drm_panthor_obj_array fields directly. You should instead use160* the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to161* the object size.162*/163struct drm_panthor_obj_array {164/** @stride: Stride of object struct. Used for versioning. */165__u32 stride;166167/** @count: Number of objects in the array. */168__u32 count;169170/** @array: User pointer to an array of objects. */171__u64 array;172};173174/**175* DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.176* @cnt: Number of elements in the array.177* @ptr: Pointer to the array to pass to the kernel.178*179* Macro initializing a drm_panthor_obj_array based on the object size as known180* by userspace.181*/182#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \183{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }184185/**186* enum drm_panthor_sync_op_flags - Synchronization operation flags.187*/188enum drm_panthor_sync_op_flags {189/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */190DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,191192/** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */193DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,194195/**196* @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization197* object type.198*/199DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,200201/** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */202DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,203204/** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */205DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),206};207208/**209* struct drm_panthor_sync_op - Synchronization operation.210*/211struct drm_panthor_sync_op {212/** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */213__u32 flags;214215/** @handle: Sync handle. */216__u32 handle;217218/**219* @timeline_value: MBZ if220* (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=221* DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.222*/223__u64 timeline_value;224};225226/**227* enum drm_panthor_dev_query_type - Query type228*229* Place new types at the end, don't re-order, don't remove or replace.230*/231enum drm_panthor_dev_query_type {232/** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */233DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,234235/** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */236DRM_PANTHOR_DEV_QUERY_CSIF_INFO,237238/** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */239DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,240241/**242* @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.243*/244DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,245};246247/**248* struct drm_panthor_gpu_info - GPU information249*250* Structure grouping all queryable information relating to the GPU.251*/252struct drm_panthor_gpu_info {253/** @gpu_id : GPU ID. */254__u32 gpu_id;255#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)256#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)257#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)258#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)259#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)260#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)261#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)262263/** @gpu_rev: GPU revision. */264__u32 gpu_rev;265266/** @csf_id: Command stream frontend ID. */267__u32 csf_id;268#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)269#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)270#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)271#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)272#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)273#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)274275/** @l2_features: L2-cache features. */276__u32 l2_features;277278/** @tiler_features: Tiler features. */279__u32 tiler_features;280281/** @mem_features: Memory features. */282__u32 mem_features;283284/** @mmu_features: MMU features. */285__u32 mmu_features;286#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)287288/** @thread_features: Thread features. */289__u32 thread_features;290291/** @max_threads: Maximum number of threads. */292__u32 max_threads;293294/** @thread_max_workgroup_size: Maximum workgroup size. */295__u32 thread_max_workgroup_size;296297/**298* @thread_max_barrier_size: Maximum number of threads that can wait299* simultaneously on a barrier.300*/301__u32 thread_max_barrier_size;302303/** @coherency_features: Coherency features. */304__u32 coherency_features;305306/** @texture_features: Texture features. */307__u32 texture_features[4];308309/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */310__u32 as_present;311312/** @pad0: MBZ. */313__u32 pad0;314315/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */316__u64 shader_present;317318/** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */319__u64 l2_present;320321/** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */322__u64 tiler_present;323324/** @core_features: Used to discriminate core variants when they exist. */325__u32 core_features;326327/** @pad: MBZ. */328__u32 pad;329};330331/**332* struct drm_panthor_csif_info - Command stream interface information333*334* Structure grouping all queryable information relating to the command stream interface.335*/336struct drm_panthor_csif_info {337/** @csg_slot_count: Number of command stream group slots exposed by the firmware. */338__u32 csg_slot_count;339340/** @cs_slot_count: Number of command stream slots per group. */341__u32 cs_slot_count;342343/** @cs_reg_count: Number of command stream registers. */344__u32 cs_reg_count;345346/** @scoreboard_slot_count: Number of scoreboard slots. */347__u32 scoreboard_slot_count;348349/**350* @unpreserved_cs_reg_count: Number of command stream registers reserved by351* the kernel driver to call a userspace command stream.352*353* All registers can be used by a userspace command stream, but the354* [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are355* used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.356*/357__u32 unpreserved_cs_reg_count;358359/**360* @pad: Padding field, set to zero.361*/362__u32 pad;363};364365/**366* struct drm_panthor_timestamp_info - Timestamp information367*368* Structure grouping all queryable information relating to the GPU timestamp.369*/370struct drm_panthor_timestamp_info {371/**372* @timestamp_frequency: The frequency of the timestamp timer or 0 if373* unknown.374*/375__u64 timestamp_frequency;376377/** @current_timestamp: The current timestamp. */378__u64 current_timestamp;379380/** @timestamp_offset: The offset of the timestamp timer. */381__u64 timestamp_offset;382};383384/**385* struct drm_panthor_group_priorities_info - Group priorities information386*387* Structure grouping all queryable information relating to the allowed group priorities.388*/389struct drm_panthor_group_priorities_info {390/**391* @allowed_mask: Bitmask of the allowed group priorities.392*393* Each bit represents a variant of the enum drm_panthor_group_priority.394*/395__u8 allowed_mask;396397/** @pad: Padding fields, MBZ. */398__u8 pad[3];399};400401/**402* struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY403*/404struct drm_panthor_dev_query {405/** @type: the query type (see drm_panthor_dev_query_type). */406__u32 type;407408/**409* @size: size of the type being queried.410*411* If pointer is NULL, size is updated by the driver to provide the412* output structure size. If pointer is not NULL, the driver will413* only copy min(size, actual_structure_size) bytes to the pointer,414* and update the size accordingly. This allows us to extend query415* types without breaking userspace.416*/417__u32 size;418419/**420* @pointer: user pointer to a query type struct.421*422* Pointer can be NULL, in which case, nothing is copied, but the423* actual structure size is returned. If not NULL, it must point to424* a location that's large enough to hold size bytes.425*/426__u64 pointer;427};428429/**430* struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE431*/432struct drm_panthor_vm_create {433/** @flags: VM flags, MBZ. */434__u32 flags;435436/** @id: Returned VM ID. */437__u32 id;438439/**440* @user_va_range: Size of the VA space reserved for user objects.441*442* The kernel will pick the remaining space to map kernel-only objects to the443* VM (heap chunks, heap context, ring buffers, kernel synchronization objects,444* ...). If the space left for kernel objects is too small, kernel object445* allocation will fail further down the road. One can use446* drm_panthor_gpu_info::mmu_features to extract the total virtual address447* range, and chose a user_va_range that leaves some space to the kernel.448*449* If user_va_range is zero, the kernel will pick a sensible value based on450* TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user451* split should leave enough VA space for userspace processes to support SVM,452* while still allowing the kernel to map some amount of kernel objects in453* the kernel VA range). The value chosen by the driver will be returned in454* @user_va_range.455*456* User VA space always starts at 0x0, kernel VA space is always placed after457* the user VA range.458*/459__u64 user_va_range;460};461462/**463* struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY464*/465struct drm_panthor_vm_destroy {466/** @id: ID of the VM to destroy. */467__u32 id;468469/** @pad: MBZ. */470__u32 pad;471};472473/**474* enum drm_panthor_vm_bind_op_flags - VM bind operation flags475*/476enum drm_panthor_vm_bind_op_flags {477/**478* @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.479*480* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.481*/482DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,483484/**485* @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.486*487* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.488*/489DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,490491/**492* @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.493*494* Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.495*/496DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,497498/**499* @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.500*/501DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),502503/** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */504DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,505506/** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */507DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,508509/**510* @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.511*512* Just serves as a synchronization point on a VM queue.513*514* Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,515* and drm_panthor_vm_bind_op::syncs contains at least one element.516*/517DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,518};519520/**521* struct drm_panthor_vm_bind_op - VM bind operation522*/523struct drm_panthor_vm_bind_op {524/** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */525__u32 flags;526527/**528* @bo_handle: Handle of the buffer object to map.529* MBZ for unmap or sync-only operations.530*/531__u32 bo_handle;532533/**534* @bo_offset: Buffer object offset.535* MBZ for unmap or sync-only operations.536*/537__u64 bo_offset;538539/**540* @va: Virtual address to map/unmap.541* MBZ for sync-only operations.542*/543__u64 va;544545/**546* @size: Size to map/unmap.547* MBZ for sync-only operations.548*/549__u64 size;550551/**552* @syncs: Array of struct drm_panthor_sync_op synchronization553* operations.554*555* This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on556* the drm_panthor_vm_bind object containing this VM bind operation.557*558* This array shall not be empty for sync-only operations.559*/560struct drm_panthor_obj_array syncs;561562};563564/**565* enum drm_panthor_vm_bind_flags - VM bind flags566*/567enum drm_panthor_vm_bind_flags {568/**569* @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM570* queue instead of being executed synchronously.571*/572DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,573};574575/**576* struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND577*/578struct drm_panthor_vm_bind {579/** @vm_id: VM targeted by the bind request. */580__u32 vm_id;581582/** @flags: Combination of drm_panthor_vm_bind_flags flags. */583__u32 flags;584585/** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */586struct drm_panthor_obj_array ops;587};588589/**590* enum drm_panthor_vm_state - VM states.591*/592enum drm_panthor_vm_state {593/**594* @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.595*596* New VM operations will be accepted on this VM.597*/598DRM_PANTHOR_VM_STATE_USABLE,599600/**601* @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.602*603* Something put the VM in an unusable state (like an asynchronous604* VM_BIND request failing for any reason).605*606* Once the VM is in this state, all new MAP operations will be607* rejected, and any GPU job targeting this VM will fail.608* UNMAP operations are still accepted.609*610* The only way to recover from an unusable VM is to create a new611* VM, and destroy the old one.612*/613DRM_PANTHOR_VM_STATE_UNUSABLE,614};615616/**617* struct drm_panthor_vm_get_state - Get VM state.618*/619struct drm_panthor_vm_get_state {620/** @vm_id: VM targeted by the get_state request. */621__u32 vm_id;622623/**624* @state: state returned by the driver.625*626* Must be one of the enum drm_panthor_vm_state values.627*/628__u32 state;629};630631/**632* enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.633*/634enum drm_panthor_bo_flags {635/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */636DRM_PANTHOR_BO_NO_MMAP = (1 << 0),637};638639/**640* struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.641*/642struct drm_panthor_bo_create {643/**644* @size: Requested size for the object645*646* The (page-aligned) allocated size for the object will be returned.647*/648__u64 size;649650/**651* @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.652*/653__u32 flags;654655/**656* @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.657*658* If not zero, the field must refer to a valid VM ID, and implies that:659* - the buffer object will only ever be bound to that VM660* - cannot be exported as a PRIME fd661*/662__u32 exclusive_vm_id;663664/**665* @handle: Returned handle for the object.666*667* Object handles are nonzero.668*/669__u32 handle;670671/** @pad: MBZ. */672__u32 pad;673};674675/**676* struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.677*/678struct drm_panthor_bo_mmap_offset {679/** @handle: Handle of the object we want an mmap offset for. */680__u32 handle;681682/** @pad: MBZ. */683__u32 pad;684685/** @offset: The fake offset to use for subsequent mmap calls. */686__u64 offset;687};688689/**690* struct drm_panthor_queue_create - Queue creation arguments.691*/692struct drm_panthor_queue_create {693/**694* @priority: Defines the priority of queues inside a group. Goes from 0 to 15,695* 15 being the highest priority.696*/697__u8 priority;698699/** @pad: Padding fields, MBZ. */700__u8 pad[3];701702/** @ringbuf_size: Size of the ring buffer to allocate to this queue. */703__u32 ringbuf_size;704};705706/**707* enum drm_panthor_group_priority - Scheduling group priority708*/709enum drm_panthor_group_priority {710/** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */711PANTHOR_GROUP_PRIORITY_LOW = 0,712713/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */714PANTHOR_GROUP_PRIORITY_MEDIUM,715716/**717* @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.718*719* Requires CAP_SYS_NICE or DRM_MASTER.720*/721PANTHOR_GROUP_PRIORITY_HIGH,722723/**724* @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.725*726* Requires CAP_SYS_NICE or DRM_MASTER.727*/728PANTHOR_GROUP_PRIORITY_REALTIME,729};730731/**732* struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE733*/734struct drm_panthor_group_create {735/** @queues: Array of drm_panthor_queue_create elements. */736struct drm_panthor_obj_array queues;737738/**739* @max_compute_cores: Maximum number of cores that can be used by compute740* jobs across CS queues bound to this group.741*742* Must be less or equal to the number of bits set in @compute_core_mask.743*/744__u8 max_compute_cores;745746/**747* @max_fragment_cores: Maximum number of cores that can be used by fragment748* jobs across CS queues bound to this group.749*750* Must be less or equal to the number of bits set in @fragment_core_mask.751*/752__u8 max_fragment_cores;753754/**755* @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs756* across CS queues bound to this group.757*758* Must be less or equal to the number of bits set in @tiler_core_mask.759*/760__u8 max_tiler_cores;761762/** @priority: Group priority (see enum drm_panthor_group_priority). */763__u8 priority;764765/** @pad: Padding field, MBZ. */766__u32 pad;767768/**769* @compute_core_mask: Mask encoding cores that can be used for compute jobs.770*771* This field must have at least @max_compute_cores bits set.772*773* The bits set here should also be set in drm_panthor_gpu_info::shader_present.774*/775__u64 compute_core_mask;776777/**778* @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.779*780* This field must have at least @max_fragment_cores bits set.781*782* The bits set here should also be set in drm_panthor_gpu_info::shader_present.783*/784__u64 fragment_core_mask;785786/**787* @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.788*789* This field must have at least @max_tiler_cores bits set.790*791* The bits set here should also be set in drm_panthor_gpu_info::tiler_present.792*/793__u64 tiler_core_mask;794795/**796* @vm_id: VM ID to bind this group to.797*798* All submission to queues bound to this group will use this VM.799*/800__u32 vm_id;801802/**803* @group_handle: Returned group handle. Passed back when submitting jobs or804* destroying a group.805*/806__u32 group_handle;807};808809/**810* struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY811*/812struct drm_panthor_group_destroy {813/** @group_handle: Group to destroy */814__u32 group_handle;815816/** @pad: Padding field, MBZ. */817__u32 pad;818};819820/**821* struct drm_panthor_queue_submit - Job submission arguments.822*823* This is describing the userspace command stream to call from the kernel824* command stream ring-buffer. Queue submission is always part of a group825* submission, taking one or more jobs to submit to the underlying queues.826*/827struct drm_panthor_queue_submit {828/** @queue_index: Index of the queue inside a group. */829__u32 queue_index;830831/**832* @stream_size: Size of the command stream to execute.833*834* Must be 64-bit/8-byte aligned (the size of a CS instruction)835*836* Can be zero if stream_addr is zero too.837*838* When the stream size is zero, the queue submit serves as a839* synchronization point.840*/841__u32 stream_size;842843/**844* @stream_addr: GPU address of the command stream to execute.845*846* Must be aligned on 64-byte.847*848* Can be zero is stream_size is zero too.849*/850__u64 stream_addr;851852/**853* @latest_flush: FLUSH_ID read at the time the stream was built.854*855* This allows cache flush elimination for the automatic856* flush+invalidate(all) done at submission time, which is needed to857* ensure the GPU doesn't get garbage when reading the indirect command858* stream buffers. If you want the cache flush to happen859* unconditionally, pass a zero here.860*861* Ignored when stream_size is zero.862*/863__u32 latest_flush;864865/** @pad: MBZ. */866__u32 pad;867868/** @syncs: Array of struct drm_panthor_sync_op sync operations. */869struct drm_panthor_obj_array syncs;870};871872/**873* struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT874*/875struct drm_panthor_group_submit {876/** @group_handle: Handle of the group to queue jobs to. */877__u32 group_handle;878879/** @pad: MBZ. */880__u32 pad;881882/** @queue_submits: Array of drm_panthor_queue_submit objects. */883struct drm_panthor_obj_array queue_submits;884};885886/**887* enum drm_panthor_group_state_flags - Group state flags888*/889enum drm_panthor_group_state_flags {890/**891* @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.892*893* When a group ends up with this flag set, no jobs can be submitted to its queues.894*/895DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,896897/**898* @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.899*900* When a group ends up with this flag set, no jobs can be submitted to its queues.901*/902DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,903904/**905* @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other906* groups.907*908* This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and909* DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.910*/911DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,912};913914/**915* struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE916*917* Used to query the state of a group and decide whether a new group should be created to918* replace it.919*/920struct drm_panthor_group_get_state {921/** @group_handle: Handle of the group to query state on */922__u32 group_handle;923924/**925* @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the926* group state.927*/928__u32 state;929930/** @fatal_queues: Bitmask of queues that faced fatal faults. */931__u32 fatal_queues;932933/** @pad: MBZ */934__u32 pad;935};936937/**938* struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE939*/940struct drm_panthor_tiler_heap_create {941/** @vm_id: VM ID the tiler heap should be mapped to */942__u32 vm_id;943944/** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */945__u32 initial_chunk_count;946947/**948* @chunk_size: Chunk size.949*950* Must be page-aligned and lie in the [128k:8M] range.951*/952__u32 chunk_size;953954/**955* @max_chunks: Maximum number of chunks that can be allocated.956*957* Must be at least @initial_chunk_count.958*/959__u32 max_chunks;960961/**962* @target_in_flight: Maximum number of in-flight render passes.963*964* If the heap has more than tiler jobs in-flight, the FW will wait for render965* passes to finish before queuing new tiler jobs.966*/967__u32 target_in_flight;968969/** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */970__u32 handle;971972/** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */973__u64 tiler_heap_ctx_gpu_va;974975/**976* @first_heap_chunk_gpu_va: First heap chunk.977*978* The tiler heap is formed of heap chunks forming a single-link list. This979* is the first element in the list.980*/981__u64 first_heap_chunk_gpu_va;982};983984/**985* struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY986*/987struct drm_panthor_tiler_heap_destroy {988/**989* @handle: Handle of the tiler heap to destroy.990*991* Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.992*/993__u32 handle;994995/** @pad: Padding field, MBZ. */996__u32 pad;997};998999/**1000* struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL1001*/1002struct drm_panthor_bo_set_label {1003/** @handle: Handle of the buffer object to label. */1004__u32 handle;10051006/** @pad: MBZ. */1007__u32 pad;10081009/**1010* @label: User pointer to a NUL-terminated string1011*1012* Length cannot be greater than 40961013*/1014__u64 label;1015};10161017/**1018* struct drm_panthor_set_user_mmio_offset - Arguments passed to1019* DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET1020*1021* This ioctl is only really useful if you want to support userspace1022* CPU emulation environments where the size of an unsigned long differs1023* between the host and the guest architectures.1024*/1025struct drm_panthor_set_user_mmio_offset {1026/**1027* @offset: User MMIO offset to use.1028*1029* Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or1030* DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.1031*1032* Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or1033* OFFSET_64BIT based on the size of an unsigned long) unless you1034* have a very good reason to overrule this decision.1035*/1036__u64 offset;1037};10381039/**1040* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number1041* @__access: Access type. Must be R, W or RW.1042* @__id: One of the DRM_PANTHOR_xxx id.1043* @__type: Suffix of the type being passed to the IOCTL.1044*1045* Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx1046* values instead.1047*1048* Return: An IOCTL number to be passed to ioctl() from userspace.1049*/1050#define DRM_IOCTL_PANTHOR(__access, __id, __type) \1051DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \1052struct drm_panthor_ ## __type)10531054enum {1055DRM_IOCTL_PANTHOR_DEV_QUERY =1056DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),1057DRM_IOCTL_PANTHOR_VM_CREATE =1058DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),1059DRM_IOCTL_PANTHOR_VM_DESTROY =1060DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),1061DRM_IOCTL_PANTHOR_VM_BIND =1062DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),1063DRM_IOCTL_PANTHOR_VM_GET_STATE =1064DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),1065DRM_IOCTL_PANTHOR_BO_CREATE =1066DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),1067DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =1068DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),1069DRM_IOCTL_PANTHOR_GROUP_CREATE =1070DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),1071DRM_IOCTL_PANTHOR_GROUP_DESTROY =1072DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),1073DRM_IOCTL_PANTHOR_GROUP_SUBMIT =1074DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),1075DRM_IOCTL_PANTHOR_GROUP_GET_STATE =1076DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),1077DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =1078DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),1079DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =1080DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),1081DRM_IOCTL_PANTHOR_BO_SET_LABEL =1082DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),1083DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =1084DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),1085};10861087#if defined(__cplusplus)1088}1089#endif10901091#endif /* _PANTHOR_DRM_H_ */109210931094