Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/include/drm-uapi/amdgpu_drm.h
4545 views
1
/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
2
*
3
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
6
* Copyright 2014 Advanced Micro Devices, Inc.
7
*
8
* Permission is hereby granted, free of charge, to any person obtaining a
9
* copy of this software and associated documentation files (the "Software"),
10
* to deal in the Software without restriction, including without limitation
11
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
* and/or sell copies of the Software, and to permit persons to whom the
13
* Software is furnished to do so, subject to the following conditions:
14
*
15
* The above copyright notice and this permission notice shall be included in
16
* all copies or substantial portions of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24
* OTHER DEALINGS IN THE SOFTWARE.
25
*
26
* Authors:
27
* Kevin E. Martin <[email protected]>
28
* Gareth Hughes <[email protected]>
29
* Keith Whitwell <[email protected]>
30
*/
31
32
#ifndef __AMDGPU_DRM_H__
33
#define __AMDGPU_DRM_H__
34
35
#include "drm.h"
36
37
#if defined(__cplusplus)
38
extern "C" {
39
#endif
40
41
#define DRM_AMDGPU_GEM_CREATE 0x00
42
#define DRM_AMDGPU_GEM_MMAP 0x01
43
#define DRM_AMDGPU_CTX 0x02
44
#define DRM_AMDGPU_BO_LIST 0x03
45
#define DRM_AMDGPU_CS 0x04
46
#define DRM_AMDGPU_INFO 0x05
47
#define DRM_AMDGPU_GEM_METADATA 0x06
48
#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
49
#define DRM_AMDGPU_GEM_VA 0x08
50
#define DRM_AMDGPU_WAIT_CS 0x09
51
#define DRM_AMDGPU_GEM_OP 0x10
52
#define DRM_AMDGPU_GEM_USERPTR 0x11
53
#define DRM_AMDGPU_WAIT_FENCES 0x12
54
#define DRM_AMDGPU_VM 0x13
55
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
56
#define DRM_AMDGPU_SCHED 0x15
57
58
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
59
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
60
#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
61
#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
62
#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
63
#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
64
#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
65
#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
66
#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
67
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
68
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
69
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
70
#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
71
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
72
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
73
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
74
75
/**
76
* DOC: memory domains
77
*
78
* %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
79
* Memory in this pool could be swapped out to disk if there is pressure.
80
*
81
* %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
82
* GPU's virtual address space via gart. Gart memory linearizes non-contiguous
83
* pages of system memory, allows GPU access system memory in a linezrized
84
* fashion.
85
*
86
* %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
87
* carved out by the BIOS.
88
*
89
* %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
90
* across shader threads.
91
*
92
* %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
93
* execution of all the waves on a device.
94
*
95
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
96
* for appending data.
97
*/
98
#define AMDGPU_GEM_DOMAIN_CPU 0x1
99
#define AMDGPU_GEM_DOMAIN_GTT 0x2
100
#define AMDGPU_GEM_DOMAIN_VRAM 0x4
101
#define AMDGPU_GEM_DOMAIN_GDS 0x8
102
#define AMDGPU_GEM_DOMAIN_GWS 0x10
103
#define AMDGPU_GEM_DOMAIN_OA 0x20
104
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
105
AMDGPU_GEM_DOMAIN_GTT | \
106
AMDGPU_GEM_DOMAIN_VRAM | \
107
AMDGPU_GEM_DOMAIN_GDS | \
108
AMDGPU_GEM_DOMAIN_GWS | \
109
AMDGPU_GEM_DOMAIN_OA)
110
111
/* Flag that CPU access will be required for the case of VRAM domain */
112
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
113
/* Flag that CPU access will not work, this VRAM domain is invisible */
114
#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
115
/* Flag that USWC attributes should be used for GTT */
116
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
117
/* Flag that the memory should be in VRAM and cleared */
118
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
119
/* Flag that allocating the BO should use linear VRAM */
120
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
121
/* Flag that BO is always valid in this VM */
122
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
123
/* Flag that BO sharing will be explicitly synchronized */
124
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
125
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
126
* for the second page onward should be set to NC. It should never
127
* be used by user space applications.
128
*/
129
#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
130
/* Flag that BO may contain sensitive data that must be wiped before
131
* releasing the memory
132
*/
133
#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9)
134
/* Flag that BO will be encrypted and that the TMZ bit should be
135
* set in the PTEs when mapping this buffer via GPUVM or
136
* accessing it with various hw blocks
137
*/
138
#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
139
/* Flag that BO will be used only in preemptible context, which does
140
* not require GTT memory accounting
141
*/
142
#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
143
144
struct drm_amdgpu_gem_create_in {
145
/** the requested memory size */
146
__u64 bo_size;
147
/** physical start_addr alignment in bytes for some HW requirements */
148
__u64 alignment;
149
/** the requested memory domains */
150
__u64 domains;
151
/** allocation flags */
152
__u64 domain_flags;
153
};
154
155
struct drm_amdgpu_gem_create_out {
156
/** returned GEM object handle */
157
__u32 handle;
158
__u32 _pad;
159
};
160
161
union drm_amdgpu_gem_create {
162
struct drm_amdgpu_gem_create_in in;
163
struct drm_amdgpu_gem_create_out out;
164
};
165
166
/** Opcode to create new residency list. */
167
#define AMDGPU_BO_LIST_OP_CREATE 0
168
/** Opcode to destroy previously created residency list */
169
#define AMDGPU_BO_LIST_OP_DESTROY 1
170
/** Opcode to update resource information in the list */
171
#define AMDGPU_BO_LIST_OP_UPDATE 2
172
173
struct drm_amdgpu_bo_list_in {
174
/** Type of operation */
175
__u32 operation;
176
/** Handle of list or 0 if we want to create one */
177
__u32 list_handle;
178
/** Number of BOs in list */
179
__u32 bo_number;
180
/** Size of each element describing BO */
181
__u32 bo_info_size;
182
/** Pointer to array describing BOs */
183
__u64 bo_info_ptr;
184
};
185
186
struct drm_amdgpu_bo_list_entry {
187
/** Handle of BO */
188
__u32 bo_handle;
189
/** New (if specified) BO priority to be used during migration */
190
__u32 bo_priority;
191
};
192
193
struct drm_amdgpu_bo_list_out {
194
/** Handle of resource list */
195
__u32 list_handle;
196
__u32 _pad;
197
};
198
199
union drm_amdgpu_bo_list {
200
struct drm_amdgpu_bo_list_in in;
201
struct drm_amdgpu_bo_list_out out;
202
};
203
204
/* context related */
205
#define AMDGPU_CTX_OP_ALLOC_CTX 1
206
#define AMDGPU_CTX_OP_FREE_CTX 2
207
#define AMDGPU_CTX_OP_QUERY_STATE 3
208
#define AMDGPU_CTX_OP_QUERY_STATE2 4
209
210
/* GPU reset status */
211
#define AMDGPU_CTX_NO_RESET 0
212
/* this the context caused it */
213
#define AMDGPU_CTX_GUILTY_RESET 1
214
/* some other context caused it */
215
#define AMDGPU_CTX_INNOCENT_RESET 2
216
/* unknown cause */
217
#define AMDGPU_CTX_UNKNOWN_RESET 3
218
219
/* indicate gpu reset occured after ctx created */
220
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
221
/* indicate vram lost occured after ctx created */
222
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
223
/* indicate some job from this context once cause gpu hang */
224
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
225
/* indicate some errors are detected by RAS */
226
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
227
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
228
229
/* Context priority level */
230
#define AMDGPU_CTX_PRIORITY_UNSET -2048
231
#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
232
#define AMDGPU_CTX_PRIORITY_LOW -512
233
#define AMDGPU_CTX_PRIORITY_NORMAL 0
234
/*
235
* When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
236
* CAP_SYS_NICE or DRM_MASTER
237
*/
238
#define AMDGPU_CTX_PRIORITY_HIGH 512
239
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
240
241
struct drm_amdgpu_ctx_in {
242
/** AMDGPU_CTX_OP_* */
243
__u32 op;
244
/** For future use, no flags defined so far */
245
__u32 flags;
246
__u32 ctx_id;
247
/** AMDGPU_CTX_PRIORITY_* */
248
__s32 priority;
249
};
250
251
union drm_amdgpu_ctx_out {
252
struct {
253
__u32 ctx_id;
254
__u32 _pad;
255
} alloc;
256
257
struct {
258
/** For future use, no flags defined so far */
259
__u64 flags;
260
/** Number of resets caused by this context so far. */
261
__u32 hangs;
262
/** Reset status since the last call of the ioctl. */
263
__u32 reset_status;
264
} state;
265
};
266
267
union drm_amdgpu_ctx {
268
struct drm_amdgpu_ctx_in in;
269
union drm_amdgpu_ctx_out out;
270
};
271
272
/* vm ioctl */
273
#define AMDGPU_VM_OP_RESERVE_VMID 1
274
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
275
276
struct drm_amdgpu_vm_in {
277
/** AMDGPU_VM_OP_* */
278
__u32 op;
279
__u32 flags;
280
};
281
282
struct drm_amdgpu_vm_out {
283
/** For future use, no flags defined so far */
284
__u64 flags;
285
};
286
287
union drm_amdgpu_vm {
288
struct drm_amdgpu_vm_in in;
289
struct drm_amdgpu_vm_out out;
290
};
291
292
/* sched ioctl */
293
#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
294
#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2
295
296
struct drm_amdgpu_sched_in {
297
/* AMDGPU_SCHED_OP_* */
298
__u32 op;
299
__u32 fd;
300
/** AMDGPU_CTX_PRIORITY_* */
301
__s32 priority;
302
__u32 ctx_id;
303
};
304
305
union drm_amdgpu_sched {
306
struct drm_amdgpu_sched_in in;
307
};
308
309
/*
310
* This is not a reliable API and you should expect it to fail for any
311
* number of reasons and have fallback path that do not use userptr to
312
* perform any operation.
313
*/
314
#define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
315
#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
316
#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
317
#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
318
319
struct drm_amdgpu_gem_userptr {
320
__u64 addr;
321
__u64 size;
322
/* AMDGPU_GEM_USERPTR_* */
323
__u32 flags;
324
/* Resulting GEM handle */
325
__u32 handle;
326
};
327
328
/* SI-CI-VI: */
329
/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
330
#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
331
#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
332
#define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
333
#define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
334
#define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
335
#define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
336
#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
337
#define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
338
#define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
339
#define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
340
#define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
341
#define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
342
#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
343
#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
344
#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
345
#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
346
347
/* GFX9 and later: */
348
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
349
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
350
#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
351
#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
352
#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
353
#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
354
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
355
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
356
#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
357
#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
358
#define AMDGPU_TILING_SCANOUT_SHIFT 63
359
#define AMDGPU_TILING_SCANOUT_MASK 0x1
360
361
/* Set/Get helpers for tiling flags. */
362
#define AMDGPU_TILING_SET(field, value) \
363
(((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
364
#define AMDGPU_TILING_GET(value, field) \
365
(((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
366
367
#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
368
#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
369
370
/** The same structure is shared for input/output */
371
struct drm_amdgpu_gem_metadata {
372
/** GEM Object handle */
373
__u32 handle;
374
/** Do we want get or set metadata */
375
__u32 op;
376
struct {
377
/** For future use, no flags defined so far */
378
__u64 flags;
379
/** family specific tiling info */
380
__u64 tiling_info;
381
__u32 data_size_bytes;
382
__u32 data[64];
383
} data;
384
};
385
386
struct drm_amdgpu_gem_mmap_in {
387
/** the GEM object handle */
388
__u32 handle;
389
__u32 _pad;
390
};
391
392
struct drm_amdgpu_gem_mmap_out {
393
/** mmap offset from the vma offset manager */
394
__u64 addr_ptr;
395
};
396
397
union drm_amdgpu_gem_mmap {
398
struct drm_amdgpu_gem_mmap_in in;
399
struct drm_amdgpu_gem_mmap_out out;
400
};
401
402
struct drm_amdgpu_gem_wait_idle_in {
403
/** GEM object handle */
404
__u32 handle;
405
/** For future use, no flags defined so far */
406
__u32 flags;
407
/** Absolute timeout to wait */
408
__u64 timeout;
409
};
410
411
struct drm_amdgpu_gem_wait_idle_out {
412
/** BO status: 0 - BO is idle, 1 - BO is busy */
413
__u32 status;
414
/** Returned current memory domain */
415
__u32 domain;
416
};
417
418
union drm_amdgpu_gem_wait_idle {
419
struct drm_amdgpu_gem_wait_idle_in in;
420
struct drm_amdgpu_gem_wait_idle_out out;
421
};
422
423
struct drm_amdgpu_wait_cs_in {
424
/* Command submission handle
425
* handle equals 0 means none to wait for
426
* handle equals ~0ull means wait for the latest sequence number
427
*/
428
__u64 handle;
429
/** Absolute timeout to wait */
430
__u64 timeout;
431
__u32 ip_type;
432
__u32 ip_instance;
433
__u32 ring;
434
__u32 ctx_id;
435
};
436
437
struct drm_amdgpu_wait_cs_out {
438
/** CS status: 0 - CS completed, 1 - CS still busy */
439
__u64 status;
440
};
441
442
union drm_amdgpu_wait_cs {
443
struct drm_amdgpu_wait_cs_in in;
444
struct drm_amdgpu_wait_cs_out out;
445
};
446
447
struct drm_amdgpu_fence {
448
__u32 ctx_id;
449
__u32 ip_type;
450
__u32 ip_instance;
451
__u32 ring;
452
__u64 seq_no;
453
};
454
455
struct drm_amdgpu_wait_fences_in {
456
/** This points to uint64_t * which points to fences */
457
__u64 fences;
458
__u32 fence_count;
459
__u32 wait_all;
460
__u64 timeout_ns;
461
};
462
463
struct drm_amdgpu_wait_fences_out {
464
__u32 status;
465
__u32 first_signaled;
466
};
467
468
union drm_amdgpu_wait_fences {
469
struct drm_amdgpu_wait_fences_in in;
470
struct drm_amdgpu_wait_fences_out out;
471
};
472
473
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
474
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
475
476
/* Sets or returns a value associated with a buffer. */
477
struct drm_amdgpu_gem_op {
478
/** GEM object handle */
479
__u32 handle;
480
/** AMDGPU_GEM_OP_* */
481
__u32 op;
482
/** Input or return value */
483
__u64 value;
484
};
485
486
#define AMDGPU_VA_OP_MAP 1
487
#define AMDGPU_VA_OP_UNMAP 2
488
#define AMDGPU_VA_OP_CLEAR 3
489
#define AMDGPU_VA_OP_REPLACE 4
490
491
/* Delay the page table update till the next CS */
492
#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
493
494
/* Mapping flags */
495
/* readable mapping */
496
#define AMDGPU_VM_PAGE_READABLE (1 << 1)
497
/* writable mapping */
498
#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
499
/* executable mapping, new for VI */
500
#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
501
/* partially resident texture */
502
#define AMDGPU_VM_PAGE_PRT (1 << 4)
503
/* MTYPE flags use bit 5 to 8 */
504
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
505
/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
506
#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
507
/* Use Non Coherent MTYPE instead of default MTYPE */
508
#define AMDGPU_VM_MTYPE_NC (1 << 5)
509
/* Use Write Combine MTYPE instead of default MTYPE */
510
#define AMDGPU_VM_MTYPE_WC (2 << 5)
511
/* Use Cache Coherent MTYPE instead of default MTYPE */
512
#define AMDGPU_VM_MTYPE_CC (3 << 5)
513
/* Use UnCached MTYPE instead of default MTYPE */
514
#define AMDGPU_VM_MTYPE_UC (4 << 5)
515
/* Use Read Write MTYPE instead of default MTYPE */
516
#define AMDGPU_VM_MTYPE_RW (5 << 5)
517
518
struct drm_amdgpu_gem_va {
519
/** GEM object handle */
520
__u32 handle;
521
__u32 _pad;
522
/** AMDGPU_VA_OP_* */
523
__u32 operation;
524
/** AMDGPU_VM_PAGE_* */
525
__u32 flags;
526
/** va address to assign . Must be correctly aligned.*/
527
__u64 va_address;
528
/** Specify offset inside of BO to assign. Must be correctly aligned.*/
529
__u64 offset_in_bo;
530
/** Specify mapping size. Must be correctly aligned. */
531
__u64 map_size;
532
};
533
534
#define AMDGPU_HW_IP_GFX 0
535
#define AMDGPU_HW_IP_COMPUTE 1
536
#define AMDGPU_HW_IP_DMA 2
537
#define AMDGPU_HW_IP_UVD 3
538
#define AMDGPU_HW_IP_VCE 4
539
#define AMDGPU_HW_IP_UVD_ENC 5
540
#define AMDGPU_HW_IP_VCN_DEC 6
541
#define AMDGPU_HW_IP_VCN_ENC 7
542
#define AMDGPU_HW_IP_VCN_JPEG 8
543
#define AMDGPU_HW_IP_NUM 9
544
545
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
546
547
#define AMDGPU_CHUNK_ID_IB 0x01
548
#define AMDGPU_CHUNK_ID_FENCE 0x02
549
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
550
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
551
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
552
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
553
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
554
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
555
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
556
557
struct drm_amdgpu_cs_chunk {
558
__u32 chunk_id;
559
__u32 length_dw;
560
__u64 chunk_data;
561
};
562
563
struct drm_amdgpu_cs_in {
564
/** Rendering context id */
565
__u32 ctx_id;
566
/** Handle of resource list associated with CS */
567
__u32 bo_list_handle;
568
__u32 num_chunks;
569
__u32 flags;
570
/** this points to __u64 * which point to cs chunks */
571
__u64 chunks;
572
};
573
574
struct drm_amdgpu_cs_out {
575
__u64 handle;
576
};
577
578
union drm_amdgpu_cs {
579
struct drm_amdgpu_cs_in in;
580
struct drm_amdgpu_cs_out out;
581
};
582
583
/* Specify flags to be used for IB */
584
585
/* This IB should be submitted to CE */
586
#define AMDGPU_IB_FLAG_CE (1<<0)
587
588
/* Preamble flag, which means the IB could be dropped if no context switch */
589
#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
590
591
/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
592
#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
593
594
/* The IB fence should do the L2 writeback but not invalidate any shader
595
* caches (L2/vL1/sL1/I$). */
596
#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
597
598
/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
599
* This will reset wave ID counters for the IB.
600
*/
601
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
602
603
/* Flag the IB as secure (TMZ)
604
*/
605
#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
606
607
/* Tell KMD to flush and invalidate caches
608
*/
609
#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6)
610
611
struct drm_amdgpu_cs_chunk_ib {
612
__u32 _pad;
613
/** AMDGPU_IB_FLAG_* */
614
__u32 flags;
615
/** Virtual address to begin IB execution */
616
__u64 va_start;
617
/** Size of submission */
618
__u32 ib_bytes;
619
/** HW IP to submit to */
620
__u32 ip_type;
621
/** HW IP index of the same type to submit to */
622
__u32 ip_instance;
623
/** Ring index to submit to */
624
__u32 ring;
625
};
626
627
struct drm_amdgpu_cs_chunk_dep {
628
__u32 ip_type;
629
__u32 ip_instance;
630
__u32 ring;
631
__u32 ctx_id;
632
__u64 handle;
633
};
634
635
struct drm_amdgpu_cs_chunk_fence {
636
__u32 handle;
637
__u32 offset;
638
};
639
640
struct drm_amdgpu_cs_chunk_sem {
641
__u32 handle;
642
};
643
644
struct drm_amdgpu_cs_chunk_syncobj {
645
__u32 handle;
646
__u32 flags;
647
__u64 point;
648
};
649
650
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
651
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
652
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
653
654
union drm_amdgpu_fence_to_handle {
655
struct {
656
struct drm_amdgpu_fence fence;
657
__u32 what;
658
__u32 pad;
659
} in;
660
struct {
661
__u32 handle;
662
} out;
663
};
664
665
struct drm_amdgpu_cs_chunk_data {
666
union {
667
struct drm_amdgpu_cs_chunk_ib ib_data;
668
struct drm_amdgpu_cs_chunk_fence fence_data;
669
};
670
};
671
672
/*
673
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
674
*
675
*/
676
#define AMDGPU_IDS_FLAGS_FUSION 0x1
677
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
678
#define AMDGPU_IDS_FLAGS_TMZ 0x4
679
680
/* indicate if acceleration can be working */
681
#define AMDGPU_INFO_ACCEL_WORKING 0x00
682
/* get the crtc_id from the mode object id? */
683
#define AMDGPU_INFO_CRTC_FROM_ID 0x01
684
/* query hw IP info */
685
#define AMDGPU_INFO_HW_IP_INFO 0x02
686
/* query hw IP instance count for the specified type */
687
#define AMDGPU_INFO_HW_IP_COUNT 0x03
688
/* timestamp for GL_ARB_timer_query */
689
#define AMDGPU_INFO_TIMESTAMP 0x05
690
/* Query the firmware version */
691
#define AMDGPU_INFO_FW_VERSION 0x0e
692
/* Subquery id: Query VCE firmware version */
693
#define AMDGPU_INFO_FW_VCE 0x1
694
/* Subquery id: Query UVD firmware version */
695
#define AMDGPU_INFO_FW_UVD 0x2
696
/* Subquery id: Query GMC firmware version */
697
#define AMDGPU_INFO_FW_GMC 0x03
698
/* Subquery id: Query GFX ME firmware version */
699
#define AMDGPU_INFO_FW_GFX_ME 0x04
700
/* Subquery id: Query GFX PFP firmware version */
701
#define AMDGPU_INFO_FW_GFX_PFP 0x05
702
/* Subquery id: Query GFX CE firmware version */
703
#define AMDGPU_INFO_FW_GFX_CE 0x06
704
/* Subquery id: Query GFX RLC firmware version */
705
#define AMDGPU_INFO_FW_GFX_RLC 0x07
706
/* Subquery id: Query GFX MEC firmware version */
707
#define AMDGPU_INFO_FW_GFX_MEC 0x08
708
/* Subquery id: Query SMC firmware version */
709
#define AMDGPU_INFO_FW_SMC 0x0a
710
/* Subquery id: Query SDMA firmware version */
711
#define AMDGPU_INFO_FW_SDMA 0x0b
712
/* Subquery id: Query PSP SOS firmware version */
713
#define AMDGPU_INFO_FW_SOS 0x0c
714
/* Subquery id: Query PSP ASD firmware version */
715
#define AMDGPU_INFO_FW_ASD 0x0d
716
/* Subquery id: Query VCN firmware version */
717
#define AMDGPU_INFO_FW_VCN 0x0e
718
/* Subquery id: Query GFX RLC SRLC firmware version */
719
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
720
/* Subquery id: Query GFX RLC SRLG firmware version */
721
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
722
/* Subquery id: Query GFX RLC SRLS firmware version */
723
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
724
/* Subquery id: Query DMCU firmware version */
725
#define AMDGPU_INFO_FW_DMCU 0x12
726
#define AMDGPU_INFO_FW_TA 0x13
727
/* Subquery id: Query DMCUB firmware version */
728
#define AMDGPU_INFO_FW_DMCUB 0x14
729
/* Subquery id: Query TOC firmware version */
730
#define AMDGPU_INFO_FW_TOC 0x15
731
732
/* number of bytes moved for TTM migration */
733
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
734
/* the used VRAM size */
735
#define AMDGPU_INFO_VRAM_USAGE 0x10
736
/* the used GTT size */
737
#define AMDGPU_INFO_GTT_USAGE 0x11
738
/* Information about GDS, etc. resource configuration */
739
#define AMDGPU_INFO_GDS_CONFIG 0x13
740
/* Query information about VRAM and GTT domains */
741
#define AMDGPU_INFO_VRAM_GTT 0x14
742
/* Query information about register in MMR address space*/
743
#define AMDGPU_INFO_READ_MMR_REG 0x15
744
/* Query information about device: rev id, family, etc. */
745
#define AMDGPU_INFO_DEV_INFO 0x16
746
/* visible vram usage */
747
#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
748
/* number of TTM buffer evictions */
749
#define AMDGPU_INFO_NUM_EVICTIONS 0x18
750
/* Query memory about VRAM and GTT domains */
751
#define AMDGPU_INFO_MEMORY 0x19
752
/* Query vce clock table */
753
#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
754
/* Query vbios related information */
755
#define AMDGPU_INFO_VBIOS 0x1B
756
/* Subquery id: Query vbios size */
757
#define AMDGPU_INFO_VBIOS_SIZE 0x1
758
/* Subquery id: Query vbios image */
759
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
760
/* Subquery id: Query vbios info */
761
#define AMDGPU_INFO_VBIOS_INFO 0x3
762
/* Query UVD handles */
763
#define AMDGPU_INFO_NUM_HANDLES 0x1C
764
/* Query sensor related information */
765
#define AMDGPU_INFO_SENSOR 0x1D
766
/* Subquery id: Query GPU shader clock */
767
#define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1
768
/* Subquery id: Query GPU memory clock */
769
#define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2
770
/* Subquery id: Query GPU temperature */
771
#define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3
772
/* Subquery id: Query GPU load */
773
#define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4
774
/* Subquery id: Query average GPU power */
775
#define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5
776
/* Subquery id: Query northbridge voltage */
777
#define AMDGPU_INFO_SENSOR_VDDNB 0x6
778
/* Subquery id: Query graphics voltage */
779
#define AMDGPU_INFO_SENSOR_VDDGFX 0x7
780
/* Subquery id: Query GPU stable pstate shader clock */
781
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
782
/* Subquery id: Query GPU stable pstate memory clock */
783
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
784
/* Number of VRAM page faults on CPU access. */
785
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
786
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
787
/* query ras mask of enabled features*/
788
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
789
/* query video encode/decode caps */
790
#define AMDGPU_INFO_VIDEO_CAPS 0x21
791
/* Subquery id: Decode */
792
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
793
/* Subquery id: Encode */
794
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
795
796
/* RAS MASK: UMC (VRAM) */
797
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
798
/* RAS MASK: SDMA */
799
#define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1)
800
/* RAS MASK: GFX */
801
#define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2)
802
/* RAS MASK: MMHUB */
803
#define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3)
804
/* RAS MASK: ATHUB */
805
#define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4)
806
/* RAS MASK: PCIE */
807
#define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5)
808
/* RAS MASK: HDP */
809
#define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6)
810
/* RAS MASK: XGMI */
811
#define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7)
812
/* RAS MASK: DF */
813
#define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8)
814
/* RAS MASK: SMN */
815
#define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9)
816
/* RAS MASK: SEM */
817
#define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10)
818
/* RAS MASK: MP0 */
819
#define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11)
820
/* RAS MASK: MP1 */
821
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
822
/* RAS MASK: FUSE */
823
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
824
825
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
826
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
827
#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
828
#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
829
830
struct drm_amdgpu_query_fw {
831
/** AMDGPU_INFO_FW_* */
832
__u32 fw_type;
833
/**
834
* Index of the IP if there are more IPs of
835
* the same type.
836
*/
837
__u32 ip_instance;
838
/**
839
* Index of the engine. Whether this is used depends
840
* on the firmware type. (e.g. MEC, SDMA)
841
*/
842
__u32 index;
843
__u32 _pad;
844
};
845
846
/* Input structure for the INFO ioctl */
847
struct drm_amdgpu_info {
848
/* Where the return value will be stored */
849
__u64 return_pointer;
850
/* The size of the return value. Just like "size" in "snprintf",
851
* it limits how many bytes the kernel can write. */
852
__u32 return_size;
853
/* The query request id. */
854
__u32 query;
855
856
union {
857
struct {
858
__u32 id;
859
__u32 _pad;
860
} mode_crtc;
861
862
struct {
863
/** AMDGPU_HW_IP_* */
864
__u32 type;
865
/**
866
* Index of the IP if there are more IPs of the same
867
* type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
868
*/
869
__u32 ip_instance;
870
} query_hw_ip;
871
872
struct {
873
__u32 dword_offset;
874
/** number of registers to read */
875
__u32 count;
876
__u32 instance;
877
/** For future use, no flags defined so far */
878
__u32 flags;
879
} read_mmr_reg;
880
881
struct drm_amdgpu_query_fw query_fw;
882
883
struct {
884
__u32 type;
885
__u32 offset;
886
} vbios_info;
887
888
struct {
889
__u32 type;
890
} sensor_info;
891
892
struct {
893
__u32 type;
894
} video_cap;
895
};
896
};
897
898
struct drm_amdgpu_info_gds {
899
/** GDS GFX partition size */
900
__u32 gds_gfx_partition_size;
901
/** GDS compute partition size */
902
__u32 compute_partition_size;
903
/** total GDS memory size */
904
__u32 gds_total_size;
905
/** GWS size per GFX partition */
906
__u32 gws_per_gfx_partition;
907
/** GSW size per compute partition */
908
__u32 gws_per_compute_partition;
909
/** OA size per GFX partition */
910
__u32 oa_per_gfx_partition;
911
/** OA size per compute partition */
912
__u32 oa_per_compute_partition;
913
__u32 _pad;
914
};
915
916
struct drm_amdgpu_info_vram_gtt {
917
__u64 vram_size;
918
__u64 vram_cpu_accessible_size;
919
__u64 gtt_size;
920
};
921
922
struct drm_amdgpu_heap_info {
923
/** max. physical memory */
924
__u64 total_heap_size;
925
926
/** Theoretical max. available memory in the given heap */
927
__u64 usable_heap_size;
928
929
/**
930
* Number of bytes allocated in the heap. This includes all processes
931
* and private allocations in the kernel. It changes when new buffers
932
* are allocated, freed, and moved. It cannot be larger than
933
* heap_size.
934
*/
935
__u64 heap_usage;
936
937
/**
938
* Theoretical possible max. size of buffer which
939
* could be allocated in the given heap
940
*/
941
__u64 max_allocation;
942
};
943
944
struct drm_amdgpu_memory_info {
945
struct drm_amdgpu_heap_info vram;
946
struct drm_amdgpu_heap_info cpu_accessible_vram;
947
struct drm_amdgpu_heap_info gtt;
948
};
949
950
struct drm_amdgpu_info_firmware {
951
__u32 ver;
952
__u32 feature;
953
};
954
955
struct drm_amdgpu_info_vbios {
956
__u8 name[64];
957
__u8 vbios_pn[64];
958
__u32 version;
959
__u32 pad;
960
__u8 vbios_ver_str[32];
961
__u8 date[32];
962
};
963
964
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
965
#define AMDGPU_VRAM_TYPE_GDDR1 1
966
#define AMDGPU_VRAM_TYPE_DDR2 2
967
#define AMDGPU_VRAM_TYPE_GDDR3 3
968
#define AMDGPU_VRAM_TYPE_GDDR4 4
969
#define AMDGPU_VRAM_TYPE_GDDR5 5
970
#define AMDGPU_VRAM_TYPE_HBM 6
971
#define AMDGPU_VRAM_TYPE_DDR3 7
972
#define AMDGPU_VRAM_TYPE_DDR4 8
973
#define AMDGPU_VRAM_TYPE_GDDR6 9
974
#define AMDGPU_VRAM_TYPE_DDR5 10
975
976
struct drm_amdgpu_info_device {
977
/** PCI Device ID */
978
__u32 device_id;
979
/** Internal chip revision: A0, A1, etc.) */
980
__u32 chip_rev;
981
__u32 external_rev;
982
/** Revision id in PCI Config space */
983
__u32 pci_rev;
984
__u32 family;
985
__u32 num_shader_engines;
986
__u32 num_shader_arrays_per_engine;
987
/* in KHz */
988
__u32 gpu_counter_freq;
989
__u64 max_engine_clock;
990
__u64 max_memory_clock;
991
/* cu information */
992
__u32 cu_active_number;
993
/* NOTE: cu_ao_mask is INVALID, DON'T use it */
994
__u32 cu_ao_mask;
995
__u32 cu_bitmap[4][4];
996
/** Render backend pipe mask. One render backend is CB+DB. */
997
__u32 enabled_rb_pipes_mask;
998
__u32 num_rb_pipes;
999
__u32 num_hw_gfx_contexts;
1000
__u32 _pad;
1001
__u64 ids_flags;
1002
/** Starting virtual address for UMDs. */
1003
__u64 virtual_address_offset;
1004
/** The maximum virtual address */
1005
__u64 virtual_address_max;
1006
/** Required alignment of virtual addresses. */
1007
__u32 virtual_address_alignment;
1008
/** Page table entry - fragment size */
1009
__u32 pte_fragment_size;
1010
__u32 gart_page_size;
1011
/** constant engine ram size*/
1012
__u32 ce_ram_size;
1013
/** video memory type info*/
1014
__u32 vram_type;
1015
/** video memory bit width*/
1016
__u32 vram_bit_width;
1017
/* vce harvesting instance */
1018
__u32 vce_harvest_config;
1019
/* gfx double offchip LDS buffers */
1020
__u32 gc_double_offchip_lds_buf;
1021
/* NGG Primitive Buffer */
1022
__u64 prim_buf_gpu_addr;
1023
/* NGG Position Buffer */
1024
__u64 pos_buf_gpu_addr;
1025
/* NGG Control Sideband */
1026
__u64 cntl_sb_buf_gpu_addr;
1027
/* NGG Parameter Cache */
1028
__u64 param_buf_gpu_addr;
1029
__u32 prim_buf_size;
1030
__u32 pos_buf_size;
1031
__u32 cntl_sb_buf_size;
1032
__u32 param_buf_size;
1033
/* wavefront size*/
1034
__u32 wave_front_size;
1035
/* shader visible vgprs*/
1036
__u32 num_shader_visible_vgprs;
1037
/* CU per shader array*/
1038
__u32 num_cu_per_sh;
1039
/* number of tcc blocks*/
1040
__u32 num_tcc_blocks;
1041
/* gs vgt table depth*/
1042
__u32 gs_vgt_table_depth;
1043
/* gs primitive buffer depth*/
1044
__u32 gs_prim_buffer_depth;
1045
/* max gs wavefront per vgt*/
1046
__u32 max_gs_waves_per_vgt;
1047
__u32 _pad1;
1048
/* always on cu bitmap */
1049
__u32 cu_ao_bitmap[4][4];
1050
/** Starting high virtual address for UMDs. */
1051
__u64 high_va_offset;
1052
/** The maximum high virtual address */
1053
__u64 high_va_max;
1054
/* gfx10 pa_sc_tile_steering_override */
1055
__u32 pa_sc_tile_steering_override;
1056
/* disabled TCCs */
1057
__u64 tcc_disabled_mask;
1058
};
1059
1060
struct drm_amdgpu_info_hw_ip {
1061
/** Version of h/w IP */
1062
__u32 hw_ip_version_major;
1063
__u32 hw_ip_version_minor;
1064
/** Capabilities */
1065
__u64 capabilities_flags;
1066
/** command buffer address start alignment*/
1067
__u32 ib_start_alignment;
1068
/** command buffer size alignment*/
1069
__u32 ib_size_alignment;
1070
/** Bitmask of available rings. Bit 0 means ring 0, etc. */
1071
__u32 available_rings;
1072
__u32 _pad;
1073
};
1074
1075
struct drm_amdgpu_info_num_handles {
1076
/** Max handles as supported by firmware for UVD */
1077
__u32 uvd_max_handles;
1078
/** Handles currently in use for UVD */
1079
__u32 uvd_used_handles;
1080
};
1081
1082
#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
1083
1084
struct drm_amdgpu_info_vce_clock_table_entry {
1085
/** System clock */
1086
__u32 sclk;
1087
/** Memory clock */
1088
__u32 mclk;
1089
/** VCE clock */
1090
__u32 eclk;
1091
__u32 pad;
1092
};
1093
1094
struct drm_amdgpu_info_vce_clock_table {
1095
struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
1096
__u32 num_valid_entries;
1097
__u32 pad;
1098
};
1099
1100
/* query video encode/decode caps */
1101
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
1102
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
1103
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
1104
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
1105
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
1106
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
1107
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
1108
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
1109
#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
1110
1111
struct drm_amdgpu_info_video_codec_info {
1112
__u32 valid;
1113
__u32 max_width;
1114
__u32 max_height;
1115
__u32 max_pixels_per_frame;
1116
__u32 max_level;
1117
__u32 pad;
1118
};
1119
1120
struct drm_amdgpu_info_video_caps {
1121
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
1122
};
1123
1124
/*
1125
* Supported GPU families
1126
*/
1127
#define AMDGPU_FAMILY_UNKNOWN 0
1128
#define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
1129
#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
1130
#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
1131
#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
1132
#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
1133
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
1134
#define AMDGPU_FAMILY_RV 142 /* Raven */
1135
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
1136
#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
1137
#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
1138
1139
#if defined(__cplusplus)
1140
}
1141
#endif
1142
1143
#endif
1144
1145