Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/uapi/drm/drm.h
26282 views
1
/*
2
* Header for the Direct Rendering Manager
3
*
4
* Author: Rickard E. (Rik) Faith <[email protected]>
5
*
6
* Acknowledgments:
7
* Dec 1999, Richard Henderson <[email protected]>, move to generic cmpxchg.
8
*/
9
10
/*
11
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13
* All rights reserved.
14
*
15
* Permission is hereby granted, free of charge, to any person obtaining a
16
* copy of this software and associated documentation files (the "Software"),
17
* to deal in the Software without restriction, including without limitation
18
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
19
* and/or sell copies of the Software, and to permit persons to whom the
20
* Software is furnished to do so, subject to the following conditions:
21
*
22
* The above copyright notice and this permission notice (including the next
23
* paragraph) shall be included in all copies or substantial portions of the
24
* Software.
25
*
26
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
29
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32
* OTHER DEALINGS IN THE SOFTWARE.
33
*/
34
35
#ifndef _DRM_H_
36
#define _DRM_H_
37
38
#if defined(__KERNEL__)
39
40
#include <linux/types.h>
41
#include <asm/ioctl.h>
42
typedef unsigned int drm_handle_t;
43
44
#elif defined(__linux__)
45
46
#include <linux/types.h>
47
#include <asm/ioctl.h>
48
typedef unsigned int drm_handle_t;
49
50
#else /* One of the BSDs */
51
52
#include <stdint.h>
53
#include <sys/ioccom.h>
54
#include <sys/types.h>
55
typedef int8_t __s8;
56
typedef uint8_t __u8;
57
typedef int16_t __s16;
58
typedef uint16_t __u16;
59
typedef int32_t __s32;
60
typedef uint32_t __u32;
61
typedef int64_t __s64;
62
typedef uint64_t __u64;
63
typedef size_t __kernel_size_t;
64
typedef unsigned long drm_handle_t;
65
66
#endif
67
68
#if defined(__cplusplus)
69
extern "C" {
70
#endif
71
72
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
73
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
74
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
75
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
76
77
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
78
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
79
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
80
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
81
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
82
83
typedef unsigned int drm_context_t;
84
typedef unsigned int drm_drawable_t;
85
typedef unsigned int drm_magic_t;
86
87
/*
88
* Cliprect.
89
*
90
* \warning: If you change this structure, make sure you change
91
* XF86DRIClipRectRec in the server as well
92
*
93
* \note KW: Actually it's illegal to change either for
94
* backwards-compatibility reasons.
95
*/
96
struct drm_clip_rect {
97
unsigned short x1;
98
unsigned short y1;
99
unsigned short x2;
100
unsigned short y2;
101
};
102
103
/*
104
* Drawable information.
105
*/
106
struct drm_drawable_info {
107
unsigned int num_rects;
108
struct drm_clip_rect *rects;
109
};
110
111
/*
112
* Texture region,
113
*/
114
struct drm_tex_region {
115
unsigned char next;
116
unsigned char prev;
117
unsigned char in_use;
118
unsigned char padding;
119
unsigned int age;
120
};
121
122
/*
123
* Hardware lock.
124
*
125
* The lock structure is a simple cache-line aligned integer. To avoid
126
* processor bus contention on a multiprocessor system, there should not be any
127
* other data stored in the same cache line.
128
*/
129
struct drm_hw_lock {
130
__volatile__ unsigned int lock; /**< lock variable */
131
char padding[60]; /**< Pad to cache line */
132
};
133
134
/*
135
* DRM_IOCTL_VERSION ioctl argument type.
136
*
137
* \sa drmGetVersion().
138
*/
139
struct drm_version {
140
int version_major; /**< Major version */
141
int version_minor; /**< Minor version */
142
int version_patchlevel; /**< Patch level */
143
__kernel_size_t name_len; /**< Length of name buffer */
144
char __user *name; /**< Name of driver */
145
__kernel_size_t date_len; /**< Length of date buffer */
146
char __user *date; /**< User-space buffer to hold date */
147
__kernel_size_t desc_len; /**< Length of desc buffer */
148
char __user *desc; /**< User-space buffer to hold desc */
149
};
150
151
/*
152
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
153
*
154
* \sa drmGetBusid() and drmSetBusId().
155
*/
156
struct drm_unique {
157
__kernel_size_t unique_len; /**< Length of unique */
158
char __user *unique; /**< Unique name for driver instantiation */
159
};
160
161
struct drm_list {
162
int count; /**< Length of user-space structures */
163
struct drm_version __user *version;
164
};
165
166
struct drm_block {
167
int unused;
168
};
169
170
/*
171
* DRM_IOCTL_CONTROL ioctl argument type.
172
*
173
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
174
*/
175
struct drm_control {
176
enum {
177
DRM_ADD_COMMAND,
178
DRM_RM_COMMAND,
179
DRM_INST_HANDLER,
180
DRM_UNINST_HANDLER
181
} func;
182
int irq;
183
};
184
185
/*
186
* Type of memory to map.
187
*/
188
enum drm_map_type {
189
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
190
_DRM_REGISTERS = 1, /**< no caching, no core dump */
191
_DRM_SHM = 2, /**< shared, cached */
192
_DRM_AGP = 3, /**< AGP/GART */
193
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
194
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
195
};
196
197
/*
198
* Memory mapping flags.
199
*/
200
enum drm_map_flags {
201
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
202
_DRM_READ_ONLY = 0x02,
203
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
204
_DRM_KERNEL = 0x08, /**< kernel requires access */
205
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
206
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
207
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
208
_DRM_DRIVER = 0x80 /**< Managed by driver */
209
};
210
211
struct drm_ctx_priv_map {
212
unsigned int ctx_id; /**< Context requesting private mapping */
213
void *handle; /**< Handle of map */
214
};
215
216
/*
217
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
218
* argument type.
219
*
220
* \sa drmAddMap().
221
*/
222
struct drm_map {
223
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
224
unsigned long size; /**< Requested physical size (bytes) */
225
enum drm_map_type type; /**< Type of memory to map */
226
enum drm_map_flags flags; /**< Flags */
227
void *handle; /**< User-space: "Handle" to pass to mmap() */
228
/**< Kernel-space: kernel-virtual address */
229
int mtrr; /**< MTRR slot used */
230
/* Private data */
231
};
232
233
/*
234
* DRM_IOCTL_GET_CLIENT ioctl argument type.
235
*/
236
struct drm_client {
237
int idx; /**< Which client desired? */
238
int auth; /**< Is client authenticated? */
239
unsigned long pid; /**< Process ID */
240
unsigned long uid; /**< User ID */
241
unsigned long magic; /**< Magic */
242
unsigned long iocs; /**< Ioctl count */
243
};
244
245
enum drm_stat_type {
246
_DRM_STAT_LOCK,
247
_DRM_STAT_OPENS,
248
_DRM_STAT_CLOSES,
249
_DRM_STAT_IOCTLS,
250
_DRM_STAT_LOCKS,
251
_DRM_STAT_UNLOCKS,
252
_DRM_STAT_VALUE, /**< Generic value */
253
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
254
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
255
256
_DRM_STAT_IRQ, /**< IRQ */
257
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
258
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
259
_DRM_STAT_DMA, /**< DMA */
260
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
261
_DRM_STAT_MISSED /**< Missed DMA opportunity */
262
/* Add to the *END* of the list */
263
};
264
265
/*
266
* DRM_IOCTL_GET_STATS ioctl argument type.
267
*/
268
struct drm_stats {
269
unsigned long count;
270
struct {
271
unsigned long value;
272
enum drm_stat_type type;
273
} data[15];
274
};
275
276
/*
277
* Hardware locking flags.
278
*/
279
enum drm_lock_flags {
280
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
281
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
282
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
283
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
284
/* These *HALT* flags aren't supported yet
285
-- they will be used to support the
286
full-screen DGA-like mode. */
287
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
288
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
289
};
290
291
/*
292
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
293
*
294
* \sa drmGetLock() and drmUnlock().
295
*/
296
struct drm_lock {
297
int context;
298
enum drm_lock_flags flags;
299
};
300
301
/*
302
* DMA flags
303
*
304
* \warning
305
* These values \e must match xf86drm.h.
306
*
307
* \sa drm_dma.
308
*/
309
enum drm_dma_flags {
310
/* Flags for DMA buffer dispatch */
311
_DRM_DMA_BLOCK = 0x01, /**<
312
* Block until buffer dispatched.
313
*
314
* \note The buffer may not yet have
315
* been processed by the hardware --
316
* getting a hardware lock with the
317
* hardware quiescent will ensure
318
* that the buffer has been
319
* processed.
320
*/
321
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
322
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
323
324
/* Flags for DMA buffer request */
325
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
326
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
327
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
328
};
329
330
/*
331
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
332
*
333
* \sa drmAddBufs().
334
*/
335
struct drm_buf_desc {
336
int count; /**< Number of buffers of this size */
337
int size; /**< Size in bytes */
338
int low_mark; /**< Low water mark */
339
int high_mark; /**< High water mark */
340
enum {
341
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
342
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
343
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
344
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
345
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
346
} flags;
347
unsigned long agp_start; /**<
348
* Start address of where the AGP buffers are
349
* in the AGP aperture
350
*/
351
};
352
353
/*
354
* DRM_IOCTL_INFO_BUFS ioctl argument type.
355
*/
356
struct drm_buf_info {
357
int count; /**< Entries in list */
358
struct drm_buf_desc __user *list;
359
};
360
361
/*
362
* DRM_IOCTL_FREE_BUFS ioctl argument type.
363
*/
364
struct drm_buf_free {
365
int count;
366
int __user *list;
367
};
368
369
/*
370
* Buffer information
371
*
372
* \sa drm_buf_map.
373
*/
374
struct drm_buf_pub {
375
int idx; /**< Index into the master buffer list */
376
int total; /**< Buffer size */
377
int used; /**< Amount of buffer in use (for DMA) */
378
void __user *address; /**< Address of buffer */
379
};
380
381
/*
382
* DRM_IOCTL_MAP_BUFS ioctl argument type.
383
*/
384
struct drm_buf_map {
385
int count; /**< Length of the buffer list */
386
#ifdef __cplusplus
387
void __user *virt;
388
#else
389
void __user *virtual; /**< Mmap'd area in user-virtual */
390
#endif
391
struct drm_buf_pub __user *list; /**< Buffer information */
392
};
393
394
/*
395
* DRM_IOCTL_DMA ioctl argument type.
396
*
397
* Indices here refer to the offset into the buffer list in drm_buf_get.
398
*
399
* \sa drmDMA().
400
*/
401
struct drm_dma {
402
int context; /**< Context handle */
403
int send_count; /**< Number of buffers to send */
404
int __user *send_indices; /**< List of handles to buffers */
405
int __user *send_sizes; /**< Lengths of data to send */
406
enum drm_dma_flags flags; /**< Flags */
407
int request_count; /**< Number of buffers requested */
408
int request_size; /**< Desired size for buffers */
409
int __user *request_indices; /**< Buffer information */
410
int __user *request_sizes;
411
int granted_count; /**< Number of buffers granted */
412
};
413
414
enum drm_ctx_flags {
415
_DRM_CONTEXT_PRESERVED = 0x01,
416
_DRM_CONTEXT_2DONLY = 0x02
417
};
418
419
/*
420
* DRM_IOCTL_ADD_CTX ioctl argument type.
421
*
422
* \sa drmCreateContext() and drmDestroyContext().
423
*/
424
struct drm_ctx {
425
drm_context_t handle;
426
enum drm_ctx_flags flags;
427
};
428
429
/*
430
* DRM_IOCTL_RES_CTX ioctl argument type.
431
*/
432
struct drm_ctx_res {
433
int count;
434
struct drm_ctx __user *contexts;
435
};
436
437
/*
438
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
439
*/
440
struct drm_draw {
441
drm_drawable_t handle;
442
};
443
444
/*
445
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
446
*/
447
typedef enum {
448
DRM_DRAWABLE_CLIPRECTS
449
} drm_drawable_info_type_t;
450
451
struct drm_update_draw {
452
drm_drawable_t handle;
453
unsigned int type;
454
unsigned int num;
455
unsigned long long data;
456
};
457
458
/*
459
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
460
*/
461
struct drm_auth {
462
drm_magic_t magic;
463
};
464
465
/*
466
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
467
*
468
* \sa drmGetInterruptFromBusID().
469
*/
470
struct drm_irq_busid {
471
int irq; /**< IRQ number */
472
int busnum; /**< bus number */
473
int devnum; /**< device number */
474
int funcnum; /**< function number */
475
};
476
477
enum drm_vblank_seq_type {
478
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
479
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
480
/* bits 1-6 are reserved for high crtcs */
481
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
482
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
483
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
484
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
485
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
486
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
487
};
488
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
489
490
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
491
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
492
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
493
494
struct drm_wait_vblank_request {
495
enum drm_vblank_seq_type type;
496
unsigned int sequence;
497
unsigned long signal;
498
};
499
500
struct drm_wait_vblank_reply {
501
enum drm_vblank_seq_type type;
502
unsigned int sequence;
503
long tval_sec;
504
long tval_usec;
505
};
506
507
/*
508
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
509
*
510
* \sa drmWaitVBlank().
511
*/
512
union drm_wait_vblank {
513
struct drm_wait_vblank_request request;
514
struct drm_wait_vblank_reply reply;
515
};
516
517
#define _DRM_PRE_MODESET 1
518
#define _DRM_POST_MODESET 2
519
520
/*
521
* DRM_IOCTL_MODESET_CTL ioctl argument type
522
*
523
* \sa drmModesetCtl().
524
*/
525
struct drm_modeset_ctl {
526
__u32 crtc;
527
__u32 cmd;
528
};
529
530
/*
531
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
532
*
533
* \sa drmAgpEnable().
534
*/
535
struct drm_agp_mode {
536
unsigned long mode; /**< AGP mode */
537
};
538
539
/*
540
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
541
*
542
* \sa drmAgpAlloc() and drmAgpFree().
543
*/
544
struct drm_agp_buffer {
545
unsigned long size; /**< In bytes -- will round to page boundary */
546
unsigned long handle; /**< Used for binding / unbinding */
547
unsigned long type; /**< Type of memory to allocate */
548
unsigned long physical; /**< Physical used by i810 */
549
};
550
551
/*
552
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
553
*
554
* \sa drmAgpBind() and drmAgpUnbind().
555
*/
556
struct drm_agp_binding {
557
unsigned long handle; /**< From drm_agp_buffer */
558
unsigned long offset; /**< In bytes -- will round to page boundary */
559
};
560
561
/*
562
* DRM_IOCTL_AGP_INFO ioctl argument type.
563
*
564
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
565
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
566
* drmAgpVendorId() and drmAgpDeviceId().
567
*/
568
struct drm_agp_info {
569
int agp_version_major;
570
int agp_version_minor;
571
unsigned long mode;
572
unsigned long aperture_base; /* physical address */
573
unsigned long aperture_size; /* bytes */
574
unsigned long memory_allowed; /* bytes */
575
unsigned long memory_used;
576
577
/* PCI information */
578
unsigned short id_vendor;
579
unsigned short id_device;
580
};
581
582
/*
583
* DRM_IOCTL_SG_ALLOC ioctl argument type.
584
*/
585
struct drm_scatter_gather {
586
unsigned long size; /**< In bytes -- will round to page boundary */
587
unsigned long handle; /**< Used for mapping / unmapping */
588
};
589
590
/*
591
* DRM_IOCTL_SET_VERSION ioctl argument type.
592
*/
593
struct drm_set_version {
594
int drm_di_major;
595
int drm_di_minor;
596
int drm_dd_major;
597
int drm_dd_minor;
598
};
599
600
/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
601
struct drm_gem_close {
602
/** Handle of the object to be closed. */
603
__u32 handle;
604
__u32 pad;
605
};
606
607
/* DRM_IOCTL_GEM_FLINK ioctl argument type */
608
struct drm_gem_flink {
609
/** Handle for the object being named */
610
__u32 handle;
611
612
/** Returned global name */
613
__u32 name;
614
};
615
616
/* DRM_IOCTL_GEM_OPEN ioctl argument type */
617
struct drm_gem_open {
618
/** Name of object being opened */
619
__u32 name;
620
621
/** Returned handle for the object */
622
__u32 handle;
623
624
/** Returned size of the object */
625
__u64 size;
626
};
627
628
/**
629
* DRM_CAP_DUMB_BUFFER
630
*
631
* If set to 1, the driver supports creating dumb buffers via the
632
* &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
633
*/
634
#define DRM_CAP_DUMB_BUFFER 0x1
635
/**
636
* DRM_CAP_VBLANK_HIGH_CRTC
637
*
638
* If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
639
* in the high bits of &drm_wait_vblank_request.type.
640
*
641
* Starting kernel version 2.6.39, this capability is always set to 1.
642
*/
643
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
644
/**
645
* DRM_CAP_DUMB_PREFERRED_DEPTH
646
*
647
* The preferred bit depth for dumb buffers.
648
*
649
* The bit depth is the number of bits used to indicate the color of a single
650
* pixel excluding any padding. This is different from the number of bits per
651
* pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
652
* pixel.
653
*
654
* Note that this preference only applies to dumb buffers, it's irrelevant for
655
* other types of buffers.
656
*/
657
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
658
/**
659
* DRM_CAP_DUMB_PREFER_SHADOW
660
*
661
* If set to 1, the driver prefers userspace to render to a shadow buffer
662
* instead of directly rendering to a dumb buffer. For best speed, userspace
663
* should do streaming ordered memory copies into the dumb buffer and never
664
* read from it.
665
*
666
* Note that this preference only applies to dumb buffers, it's irrelevant for
667
* other types of buffers.
668
*/
669
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
670
/**
671
* DRM_CAP_PRIME
672
*
673
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
674
* and &DRM_PRIME_CAP_EXPORT.
675
*
676
* Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
677
* &DRM_PRIME_CAP_EXPORT are always advertised.
678
*
679
* PRIME buffers are exposed as dma-buf file descriptors.
680
* See :ref:`prime_buffer_sharing`.
681
*/
682
#define DRM_CAP_PRIME 0x5
683
/**
684
* DRM_PRIME_CAP_IMPORT
685
*
686
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
687
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
688
*
689
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
690
*/
691
#define DRM_PRIME_CAP_IMPORT 0x1
692
/**
693
* DRM_PRIME_CAP_EXPORT
694
*
695
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
696
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
697
*
698
* Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
699
*/
700
#define DRM_PRIME_CAP_EXPORT 0x2
701
/**
702
* DRM_CAP_TIMESTAMP_MONOTONIC
703
*
704
* If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
705
* struct drm_event_vblank. If set to 1, the kernel will report timestamps with
706
* ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
707
* clocks.
708
*
709
* Starting from kernel version 2.6.39, the default value for this capability
710
* is 1. Starting kernel version 4.15, this capability is always set to 1.
711
*/
712
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
713
/**
714
* DRM_CAP_ASYNC_PAGE_FLIP
715
*
716
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
717
* page-flips.
718
*/
719
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
720
/**
721
* DRM_CAP_CURSOR_WIDTH
722
*
723
* The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
724
* width x height combination for the hardware cursor. The intention is that a
725
* hardware agnostic userspace can query a cursor plane size to use.
726
*
727
* Note that the cross-driver contract is to merely return a valid size;
728
* drivers are free to attach another meaning on top, eg. i915 returns the
729
* maximum plane size.
730
*/
731
#define DRM_CAP_CURSOR_WIDTH 0x8
732
/**
733
* DRM_CAP_CURSOR_HEIGHT
734
*
735
* See &DRM_CAP_CURSOR_WIDTH.
736
*/
737
#define DRM_CAP_CURSOR_HEIGHT 0x9
738
/**
739
* DRM_CAP_ADDFB2_MODIFIERS
740
*
741
* If set to 1, the driver supports supplying modifiers in the
742
* &DRM_IOCTL_MODE_ADDFB2 ioctl.
743
*/
744
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
745
/**
746
* DRM_CAP_PAGE_FLIP_TARGET
747
*
748
* If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
749
* &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
750
* &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
751
* ioctl.
752
*/
753
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
754
/**
755
* DRM_CAP_CRTC_IN_VBLANK_EVENT
756
*
757
* If set to 1, the kernel supports reporting the CRTC ID in
758
* &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
759
* &DRM_EVENT_FLIP_COMPLETE events.
760
*
761
* Starting kernel version 4.12, this capability is always set to 1.
762
*/
763
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
764
/**
765
* DRM_CAP_SYNCOBJ
766
*
767
* If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
768
*/
769
#define DRM_CAP_SYNCOBJ 0x13
770
/**
771
* DRM_CAP_SYNCOBJ_TIMELINE
772
*
773
* If set to 1, the driver supports timeline operations on sync objects. See
774
* :ref:`drm_sync_objects`.
775
*/
776
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
777
/**
778
* DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
779
*
780
* If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
781
* commits.
782
*/
783
#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
784
785
/* DRM_IOCTL_GET_CAP ioctl argument type */
786
struct drm_get_cap {
787
__u64 capability;
788
__u64 value;
789
};
790
791
/**
792
* DRM_CLIENT_CAP_STEREO_3D
793
*
794
* If set to 1, the DRM core will expose the stereo 3D capabilities of the
795
* monitor by advertising the supported 3D layouts in the flags of struct
796
* drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
797
*
798
* This capability is always supported for all drivers starting from kernel
799
* version 3.13.
800
*/
801
#define DRM_CLIENT_CAP_STEREO_3D 1
802
803
/**
804
* DRM_CLIENT_CAP_UNIVERSAL_PLANES
805
*
806
* If set to 1, the DRM core will expose all planes (overlay, primary, and
807
* cursor) to userspace.
808
*
809
* This capability has been introduced in kernel version 3.15. Starting from
810
* kernel version 3.17, this capability is always supported for all drivers.
811
*/
812
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
813
814
/**
815
* DRM_CLIENT_CAP_ATOMIC
816
*
817
* If set to 1, the DRM core will expose atomic properties to userspace. This
818
* implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
819
* &DRM_CLIENT_CAP_ASPECT_RATIO.
820
*
821
* If the driver doesn't support atomic mode-setting, enabling this capability
822
* will fail with -EOPNOTSUPP.
823
*
824
* This capability has been introduced in kernel version 4.0. Starting from
825
* kernel version 4.2, this capability is always supported for atomic-capable
826
* drivers.
827
*/
828
#define DRM_CLIENT_CAP_ATOMIC 3
829
830
/**
831
* DRM_CLIENT_CAP_ASPECT_RATIO
832
*
833
* If set to 1, the DRM core will provide aspect ratio information in modes.
834
* See ``DRM_MODE_FLAG_PIC_AR_*``.
835
*
836
* This capability is always supported for all drivers starting from kernel
837
* version 4.18.
838
*/
839
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
840
841
/**
842
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
843
*
844
* If set to 1, the DRM core will expose special connectors to be used for
845
* writing back to memory the scene setup in the commit. The client must enable
846
* &DRM_CLIENT_CAP_ATOMIC first.
847
*
848
* This capability is always supported for atomic-capable drivers starting from
849
* kernel version 4.19.
850
*/
851
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
852
853
/**
854
* DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
855
*
856
* Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
857
* virtualbox) have additional restrictions for cursor planes (thus
858
* making cursor planes on those drivers not truly universal,) e.g.
859
* they need cursor planes to act like one would expect from a mouse
860
* cursor and have correctly set hotspot properties.
861
* If this client cap is not set the DRM core will hide cursor plane on
862
* those virtualized drivers because not setting it implies that the
863
* client is not capable of dealing with those extra restictions.
864
* Clients which do set cursor hotspot and treat the cursor plane
865
* like a mouse cursor should set this property.
866
* The client must enable &DRM_CLIENT_CAP_ATOMIC first.
867
*
868
* Setting this property on drivers which do not special case
869
* cursor planes (i.e. non-virtualized drivers) will return
870
* EOPNOTSUPP, which can be used by userspace to gauge
871
* requirements of the hardware/drivers they're running on.
872
*
873
* This capability is always supported for atomic-capable virtualized
874
* drivers starting from kernel version 6.6.
875
*/
876
#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
877
878
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
879
struct drm_set_client_cap {
880
__u64 capability;
881
__u64 value;
882
};
883
884
#define DRM_RDWR O_RDWR
885
#define DRM_CLOEXEC O_CLOEXEC
886
struct drm_prime_handle {
887
__u32 handle;
888
889
/** Flags.. only applicable for handle->fd */
890
__u32 flags;
891
892
/** Returned dmabuf file descriptor */
893
__s32 fd;
894
};
895
896
struct drm_syncobj_create {
897
__u32 handle;
898
#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
899
__u32 flags;
900
};
901
902
struct drm_syncobj_destroy {
903
__u32 handle;
904
__u32 pad;
905
};
906
907
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
908
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
909
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
910
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
911
struct drm_syncobj_handle {
912
__u32 handle;
913
__u32 flags;
914
915
__s32 fd;
916
__u32 pad;
917
918
__u64 point;
919
};
920
921
struct drm_syncobj_transfer {
922
__u32 src_handle;
923
__u32 dst_handle;
924
__u64 src_point;
925
__u64 dst_point;
926
__u32 flags;
927
__u32 pad;
928
};
929
930
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
931
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
932
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
933
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
934
struct drm_syncobj_wait {
935
__u64 handles;
936
/* absolute timeout */
937
__s64 timeout_nsec;
938
__u32 count_handles;
939
__u32 flags;
940
__u32 first_signaled; /* only valid when not waiting all */
941
__u32 pad;
942
/**
943
* @deadline_nsec - fence deadline hint
944
*
945
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
946
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
947
* set.
948
*/
949
__u64 deadline_nsec;
950
};
951
952
struct drm_syncobj_timeline_wait {
953
__u64 handles;
954
/* wait on specific timeline point for every handles*/
955
__u64 points;
956
/* absolute timeout */
957
__s64 timeout_nsec;
958
__u32 count_handles;
959
__u32 flags;
960
__u32 first_signaled; /* only valid when not waiting all */
961
__u32 pad;
962
/**
963
* @deadline_nsec - fence deadline hint
964
*
965
* Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
966
* fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
967
* set.
968
*/
969
__u64 deadline_nsec;
970
};
971
972
/**
973
* struct drm_syncobj_eventfd
974
* @handle: syncobj handle.
975
* @flags: Zero to wait for the point to be signalled, or
976
* &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
977
* available for the point.
978
* @point: syncobj timeline point (set to zero for binary syncobjs).
979
* @fd: Existing eventfd to sent events to.
980
* @pad: Must be zero.
981
*
982
* Register an eventfd to be signalled by a syncobj. The eventfd counter will
983
* be incremented by one.
984
*/
985
struct drm_syncobj_eventfd {
986
__u32 handle;
987
__u32 flags;
988
__u64 point;
989
__s32 fd;
990
__u32 pad;
991
};
992
993
994
struct drm_syncobj_array {
995
__u64 handles;
996
__u32 count_handles;
997
__u32 pad;
998
};
999
1000
#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
1001
struct drm_syncobj_timeline_array {
1002
__u64 handles;
1003
__u64 points;
1004
__u32 count_handles;
1005
__u32 flags;
1006
};
1007
1008
1009
/* Query current scanout sequence number */
1010
struct drm_crtc_get_sequence {
1011
__u32 crtc_id; /* requested crtc_id */
1012
__u32 active; /* return: crtc output is active */
1013
__u64 sequence; /* return: most recent vblank sequence */
1014
__s64 sequence_ns; /* return: most recent time of first pixel out */
1015
};
1016
1017
/* Queue event to be delivered at specified sequence. Time stamp marks
1018
* when the first pixel of the refresh cycle leaves the display engine
1019
* for the display
1020
*/
1021
#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
1022
#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
1023
1024
struct drm_crtc_queue_sequence {
1025
__u32 crtc_id;
1026
__u32 flags;
1027
__u64 sequence; /* on input, target sequence. on output, actual sequence */
1028
__u64 user_data; /* user data passed to event */
1029
};
1030
1031
#define DRM_CLIENT_NAME_MAX_LEN 64
1032
struct drm_set_client_name {
1033
__u64 name_len;
1034
__u64 name;
1035
};
1036
1037
1038
#if defined(__cplusplus)
1039
}
1040
#endif
1041
1042
#include "drm_mode.h"
1043
1044
#if defined(__cplusplus)
1045
extern "C" {
1046
#endif
1047
1048
#define DRM_IOCTL_BASE 'd'
1049
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
1050
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
1051
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
1052
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
1053
1054
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
1055
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
1056
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
1057
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
1058
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
1059
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
1060
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
1061
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
1062
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
1063
/**
1064
* DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
1065
*
1066
* GEM handles are not reference-counted by the kernel. User-space is
1067
* responsible for managing their lifetime. For example, if user-space imports
1068
* the same memory object twice on the same DRM file description, the same GEM
1069
* handle is returned by both imports, and user-space needs to ensure
1070
* &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
1071
* when a memory object is allocated, then exported and imported again on the
1072
* same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
1073
* and always returns fresh new GEM handles even if an existing GEM handle
1074
* already refers to the same memory object before the IOCTL is performed.
1075
*/
1076
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
1077
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
1078
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
1079
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
1080
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
1081
1082
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
1083
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
1084
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
1085
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
1086
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
1087
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
1088
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
1089
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
1090
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
1091
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
1092
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
1093
1094
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
1095
1096
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1097
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1098
1099
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
1100
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
1101
1102
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
1103
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
1104
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
1105
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
1106
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
1107
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
1108
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
1109
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
1110
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
1111
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
1112
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
1113
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
1114
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
1115
1116
/**
1117
* DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1118
*
1119
* User-space sets &drm_prime_handle.handle with the GEM handle to export and
1120
* &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1121
* &drm_prime_handle.fd.
1122
*
1123
* The export can fail for any driver-specific reason, e.g. because export is
1124
* not supported for this specific GEM handle (but might be for others).
1125
*
1126
* Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1127
*/
1128
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
1129
/**
1130
* DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1131
*
1132
* User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1133
* import, and gets back a GEM handle in &drm_prime_handle.handle.
1134
* &drm_prime_handle.flags is unused.
1135
*
1136
* If an existing GEM handle refers to the memory object backing the DMA-BUF,
1137
* that GEM handle is returned. Therefore user-space which needs to handle
1138
* arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1139
* reference-count duplicated GEM handles. For more information see
1140
* &DRM_IOCTL_GEM_CLOSE.
1141
*
1142
* The import can fail for any driver-specific reason, e.g. because import is
1143
* only supported for DMA-BUFs allocated on this DRM device.
1144
*
1145
* Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1146
*/
1147
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
1148
1149
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
1150
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
1151
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
1152
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
1153
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
1154
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
1155
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
1156
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
1157
1158
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
1159
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
1160
1161
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
1162
1163
#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1164
#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1165
1166
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
1167
1168
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
1169
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
1170
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
1171
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
1172
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1173
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1174
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1175
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
1176
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1177
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1178
1179
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
1180
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1181
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
1182
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1183
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1184
/**
1185
* DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1186
*
1187
* This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1188
* argument is a framebuffer object ID.
1189
*
1190
* Warning: removing a framebuffer currently in-use on an enabled plane will
1191
* disable that plane. The CRTC the plane is linked to may also be disabled
1192
* (depending on driver capabilities).
1193
*/
1194
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
1195
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1196
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1197
1198
/**
1199
* DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
1200
*
1201
* KMS dumb buffers provide a very primitive way to allocate a buffer object
1202
* suitable for scanout and map it for software rendering. KMS dumb buffers are
1203
* not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
1204
* buffers are not suitable to be displayed on any other device than the KMS
1205
* device where they were allocated from. Also see
1206
* :ref:`kms_dumb_buffer_objects`.
1207
*
1208
* The IOCTL argument is a struct drm_mode_create_dumb.
1209
*
1210
* User-space is expected to create a KMS dumb buffer via this IOCTL, then add
1211
* it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
1212
* &DRM_IOCTL_MODE_MAP_DUMB.
1213
*
1214
* &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
1215
* &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
1216
* driver preferences for dumb buffers.
1217
*/
1218
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1219
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1220
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1221
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1222
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
1223
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
1224
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1225
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1226
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1227
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
1228
#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
1229
#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
1230
#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1231
1232
#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create)
1233
#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1234
#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
1235
#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
1236
#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
1237
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
1238
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
1239
1240
#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
1241
#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1242
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
1243
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1244
1245
#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1246
#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1247
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1248
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1249
1250
/**
1251
* DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1252
*
1253
* This queries metadata about a framebuffer. User-space fills
1254
* &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1255
* struct as the output.
1256
*
1257
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1258
* will be filled with GEM buffer handles. Fresh new GEM handles are always
1259
* returned, even if another GEM handle referring to the same memory object
1260
* already exists on the DRM file description. The caller is responsible for
1261
* removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1262
* new handle will be returned for multiple planes in case they use the same
1263
* memory object. Planes are valid until one has a zero handle -- this can be
1264
* used to compute the number of planes.
1265
*
1266
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1267
* until one has a zero &drm_mode_fb_cmd2.pitches.
1268
*
1269
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1270
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1271
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1272
*
1273
* To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1274
* can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1275
* close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1276
* double-close handles which are specified multiple times in the array.
1277
*/
1278
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1279
1280
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
1281
1282
/**
1283
* DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
1284
*
1285
* This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1286
* argument is a framebuffer object ID.
1287
*
1288
* This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
1289
* planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
1290
* alive. When the plane no longer uses the framebuffer (because the
1291
* framebuffer is replaced with another one, or the plane is disabled), the
1292
* framebuffer is cleaned up.
1293
*
1294
* This is useful to implement flicker-free transitions between two processes.
1295
*
1296
* Depending on the threat model, user-space may want to ensure that the
1297
* framebuffer doesn't expose any sensitive user information: closed
1298
* framebuffers attached to a plane can be read back by the next DRM master.
1299
*/
1300
#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
1301
1302
/**
1303
* DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file
1304
*
1305
* Having a name allows for easier tracking and debugging.
1306
* The length of the name (without null ending char) must be
1307
* <= DRM_CLIENT_NAME_MAX_LEN.
1308
* The call will fail if the name contains whitespaces or non-printable chars.
1309
*/
1310
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
1311
1312
/*
1313
* Device specific ioctls should only be in their respective headers
1314
* The device specific ioctl range is from 0x40 to 0x9f.
1315
* Generic IOCTLS restart at 0xA0.
1316
*
1317
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1318
* drmCommandReadWrite().
1319
*/
1320
#define DRM_COMMAND_BASE 0x40
1321
#define DRM_COMMAND_END 0xA0
1322
1323
/**
1324
* struct drm_event - Header for DRM events
1325
* @type: event type.
1326
* @length: total number of payload bytes (including header).
1327
*
1328
* This struct is a header for events written back to user-space on the DRM FD.
1329
* A read on the DRM FD will always only return complete events: e.g. if the
1330
* read buffer is 100 bytes large and there are two 64 byte events pending,
1331
* only one will be returned.
1332
*
1333
* Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
1334
* up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
1335
* &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
1336
*/
1337
struct drm_event {
1338
__u32 type;
1339
__u32 length;
1340
};
1341
1342
/**
1343
* DRM_EVENT_VBLANK - vertical blanking event
1344
*
1345
* This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
1346
* &_DRM_VBLANK_EVENT flag set.
1347
*
1348
* The event payload is a struct drm_event_vblank.
1349
*/
1350
#define DRM_EVENT_VBLANK 0x01
1351
/**
1352
* DRM_EVENT_FLIP_COMPLETE - page-flip completion event
1353
*
1354
* This event is sent in response to an atomic commit or legacy page-flip with
1355
* the &DRM_MODE_PAGE_FLIP_EVENT flag set.
1356
*
1357
* The event payload is a struct drm_event_vblank.
1358
*/
1359
#define DRM_EVENT_FLIP_COMPLETE 0x02
1360
/**
1361
* DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
1362
*
1363
* This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
1364
*
1365
* The event payload is a struct drm_event_crtc_sequence.
1366
*/
1367
#define DRM_EVENT_CRTC_SEQUENCE 0x03
1368
1369
struct drm_event_vblank {
1370
struct drm_event base;
1371
__u64 user_data;
1372
__u32 tv_sec;
1373
__u32 tv_usec;
1374
__u32 sequence;
1375
__u32 crtc_id; /* 0 on older kernels that do not support this */
1376
};
1377
1378
/* Event delivered at sequence. Time stamp marks when the first pixel
1379
* of the refresh cycle leaves the display engine for the display
1380
*/
1381
struct drm_event_crtc_sequence {
1382
struct drm_event base;
1383
__u64 user_data;
1384
__s64 time_ns;
1385
__u64 sequence;
1386
};
1387
1388
/* typedef area */
1389
#ifndef __KERNEL__
1390
typedef struct drm_clip_rect drm_clip_rect_t;
1391
typedef struct drm_drawable_info drm_drawable_info_t;
1392
typedef struct drm_tex_region drm_tex_region_t;
1393
typedef struct drm_hw_lock drm_hw_lock_t;
1394
typedef struct drm_version drm_version_t;
1395
typedef struct drm_unique drm_unique_t;
1396
typedef struct drm_list drm_list_t;
1397
typedef struct drm_block drm_block_t;
1398
typedef struct drm_control drm_control_t;
1399
typedef enum drm_map_type drm_map_type_t;
1400
typedef enum drm_map_flags drm_map_flags_t;
1401
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1402
typedef struct drm_map drm_map_t;
1403
typedef struct drm_client drm_client_t;
1404
typedef enum drm_stat_type drm_stat_type_t;
1405
typedef struct drm_stats drm_stats_t;
1406
typedef enum drm_lock_flags drm_lock_flags_t;
1407
typedef struct drm_lock drm_lock_t;
1408
typedef enum drm_dma_flags drm_dma_flags_t;
1409
typedef struct drm_buf_desc drm_buf_desc_t;
1410
typedef struct drm_buf_info drm_buf_info_t;
1411
typedef struct drm_buf_free drm_buf_free_t;
1412
typedef struct drm_buf_pub drm_buf_pub_t;
1413
typedef struct drm_buf_map drm_buf_map_t;
1414
typedef struct drm_dma drm_dma_t;
1415
typedef union drm_wait_vblank drm_wait_vblank_t;
1416
typedef struct drm_agp_mode drm_agp_mode_t;
1417
typedef enum drm_ctx_flags drm_ctx_flags_t;
1418
typedef struct drm_ctx drm_ctx_t;
1419
typedef struct drm_ctx_res drm_ctx_res_t;
1420
typedef struct drm_draw drm_draw_t;
1421
typedef struct drm_update_draw drm_update_draw_t;
1422
typedef struct drm_auth drm_auth_t;
1423
typedef struct drm_irq_busid drm_irq_busid_t;
1424
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1425
1426
typedef struct drm_agp_buffer drm_agp_buffer_t;
1427
typedef struct drm_agp_binding drm_agp_binding_t;
1428
typedef struct drm_agp_info drm_agp_info_t;
1429
typedef struct drm_scatter_gather drm_scatter_gather_t;
1430
typedef struct drm_set_version drm_set_version_t;
1431
#endif
1432
1433
#if defined(__cplusplus)
1434
}
1435
#endif
1436
1437
#endif
1438
1439