Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/Documentation/gpu/rfc/i915_vm_bind.h
26289 views
1
/* SPDX-License-Identifier: MIT */
2
/*
3
* Copyright © 2022 Intel Corporation
4
*/
5
6
/**
7
* DOC: I915_PARAM_VM_BIND_VERSION
8
*
9
* VM_BIND feature version supported.
10
* See typedef drm_i915_getparam_t param.
11
*
12
* Specifies the VM_BIND feature version supported.
13
* The following versions of VM_BIND have been defined:
14
*
15
* 0: No VM_BIND support.
16
*
17
* 1: In VM_UNBIND calls, the UMD must specify the exact mappings created
18
* previously with VM_BIND, the ioctl will not support unbinding multiple
19
* mappings or splitting them. Similarly, VM_BIND calls will not replace
20
* any existing mappings.
21
*
22
* 2: The restrictions on unbinding partial or multiple mappings is
23
* lifted, Similarly, binding will replace any mappings in the given range.
24
*
25
* See struct drm_i915_gem_vm_bind and struct drm_i915_gem_vm_unbind.
26
*/
27
#define I915_PARAM_VM_BIND_VERSION 57
28
29
/**
30
* DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
31
*
32
* Flag to opt-in for VM_BIND mode of binding during VM creation.
33
* See struct drm_i915_gem_vm_control flags.
34
*
35
* The older execbuf2 ioctl will not support VM_BIND mode of operation.
36
* For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
37
* execlist (See struct drm_i915_gem_execbuffer3 for more details).
38
*/
39
#define I915_VM_CREATE_FLAGS_USE_VM_BIND (1 << 0)
40
41
/* VM_BIND related ioctls */
42
#define DRM_I915_GEM_VM_BIND 0x3d
43
#define DRM_I915_GEM_VM_UNBIND 0x3e
44
#define DRM_I915_GEM_EXECBUFFER3 0x3f
45
46
#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
47
#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
48
#define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
49
50
/**
51
* struct drm_i915_gem_timeline_fence - An input or output timeline fence.
52
*
53
* The operation will wait for input fence to signal.
54
*
55
* The returned output fence will be signaled after the completion of the
56
* operation.
57
*/
58
struct drm_i915_gem_timeline_fence {
59
/** @handle: User's handle for a drm_syncobj to wait on or signal. */
60
__u32 handle;
61
62
/**
63
* @flags: Supported flags are:
64
*
65
* I915_TIMELINE_FENCE_WAIT:
66
* Wait for the input fence before the operation.
67
*
68
* I915_TIMELINE_FENCE_SIGNAL:
69
* Return operation completion fence as output.
70
*/
71
__u32 flags;
72
#define I915_TIMELINE_FENCE_WAIT (1 << 0)
73
#define I915_TIMELINE_FENCE_SIGNAL (1 << 1)
74
#define __I915_TIMELINE_FENCE_UNKNOWN_FLAGS (-(I915_TIMELINE_FENCE_SIGNAL << 1))
75
76
/**
77
* @value: A point in the timeline.
78
* Value must be 0 for a binary drm_syncobj. A Value of 0 for a
79
* timeline drm_syncobj is invalid as it turns a drm_syncobj into a
80
* binary one.
81
*/
82
__u64 value;
83
};
84
85
/**
86
* struct drm_i915_gem_vm_bind - VA to object mapping to bind.
87
*
88
* This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
89
* virtual address (VA) range to the section of an object that should be bound
90
* in the device page table of the specified address space (VM).
91
* The VA range specified must be unique (ie., not currently bound) and can
92
* be mapped to whole object or a section of the object (partial binding).
93
* Multiple VA mappings can be created to the same section of the object
94
* (aliasing).
95
*
96
* The @start, @offset and @length must be 4K page aligned. However the DG2 has
97
* 64K page size for device local memory and has compact page table. On that
98
* platform, for binding device local-memory objects, the @start, @offset and
99
* @length must be 64K aligned. Also, UMDs should not mix the local memory 64K
100
* page and the system memory 4K page bindings in the same 2M range.
101
*
102
* Error code -EINVAL will be returned if @start, @offset and @length are not
103
* properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
104
* -ENOSPC will be returned if the VA range specified can't be reserved.
105
*
106
* VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
107
* are not ordered. Furthermore, parts of the VM_BIND operation can be done
108
* asynchronously, if valid @fence is specified.
109
*/
110
struct drm_i915_gem_vm_bind {
111
/** @vm_id: VM (address space) id to bind */
112
__u32 vm_id;
113
114
/** @handle: Object handle */
115
__u32 handle;
116
117
/** @start: Virtual Address start to bind */
118
__u64 start;
119
120
/** @offset: Offset in object to bind */
121
__u64 offset;
122
123
/** @length: Length of mapping to bind */
124
__u64 length;
125
126
/**
127
* @flags: Supported flags are:
128
*
129
* I915_GEM_VM_BIND_CAPTURE:
130
* Capture this mapping in the dump upon GPU error.
131
*
132
* Note that @fence carries its own flags.
133
*/
134
__u64 flags;
135
#define I915_GEM_VM_BIND_CAPTURE (1 << 0)
136
137
/**
138
* @fence: Timeline fence for bind completion signaling.
139
*
140
* Timeline fence is of format struct drm_i915_gem_timeline_fence.
141
*
142
* It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
143
* is invalid, and an error will be returned.
144
*
145
* If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
146
* is not requested and binding is completed synchronously.
147
*/
148
struct drm_i915_gem_timeline_fence fence;
149
150
/**
151
* @extensions: Zero-terminated chain of extensions.
152
*
153
* For future extensions. See struct i915_user_extension.
154
*/
155
__u64 extensions;
156
};
157
158
/**
159
* struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
160
*
161
* This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
162
* address (VA) range that should be unbound from the device page table of the
163
* specified address space (VM). VM_UNBIND will force unbind the specified
164
* range from device page table without waiting for any GPU job to complete.
165
* It is UMDs responsibility to ensure the mapping is no longer in use before
166
* calling VM_UNBIND.
167
*
168
* If the specified mapping is not found, the ioctl will simply return without
169
* any error.
170
*
171
* VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
172
* are not ordered. Furthermore, parts of the VM_UNBIND operation can be done
173
* asynchronously, if valid @fence is specified.
174
*/
175
struct drm_i915_gem_vm_unbind {
176
/** @vm_id: VM (address space) id to bind */
177
__u32 vm_id;
178
179
/** @rsvd: Reserved, MBZ */
180
__u32 rsvd;
181
182
/** @start: Virtual Address start to unbind */
183
__u64 start;
184
185
/** @length: Length of mapping to unbind */
186
__u64 length;
187
188
/**
189
* @flags: Currently reserved, MBZ.
190
*
191
* Note that @fence carries its own flags.
192
*/
193
__u64 flags;
194
195
/**
196
* @fence: Timeline fence for unbind completion signaling.
197
*
198
* Timeline fence is of format struct drm_i915_gem_timeline_fence.
199
*
200
* It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
201
* is invalid, and an error will be returned.
202
*
203
* If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
204
* is not requested and unbinding is completed synchronously.
205
*/
206
struct drm_i915_gem_timeline_fence fence;
207
208
/**
209
* @extensions: Zero-terminated chain of extensions.
210
*
211
* For future extensions. See struct i915_user_extension.
212
*/
213
__u64 extensions;
214
};
215
216
/**
217
* struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
218
* ioctl.
219
*
220
* DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
221
* only works with this ioctl for submission.
222
* See I915_VM_CREATE_FLAGS_USE_VM_BIND.
223
*/
224
struct drm_i915_gem_execbuffer3 {
225
/**
226
* @ctx_id: Context id
227
*
228
* Only contexts with user engine map are allowed.
229
*/
230
__u32 ctx_id;
231
232
/**
233
* @engine_idx: Engine index
234
*
235
* An index in the user engine map of the context specified by @ctx_id.
236
*/
237
__u32 engine_idx;
238
239
/**
240
* @batch_address: Batch gpu virtual address/es.
241
*
242
* For normal submission, it is the gpu virtual address of the batch
243
* buffer. For parallel submission, it is a pointer to an array of
244
* batch buffer gpu virtual addresses with array size equal to the
245
* number of (parallel) engines involved in that submission (See
246
* struct i915_context_engines_parallel_submit).
247
*/
248
__u64 batch_address;
249
250
/** @flags: Currently reserved, MBZ */
251
__u64 flags;
252
253
/** @rsvd1: Reserved, MBZ */
254
__u32 rsvd1;
255
256
/** @fence_count: Number of fences in @timeline_fences array. */
257
__u32 fence_count;
258
259
/**
260
* @timeline_fences: Pointer to an array of timeline fences.
261
*
262
* Timeline fences are of format struct drm_i915_gem_timeline_fence.
263
*/
264
__u64 timeline_fences;
265
266
/** @rsvd2: Reserved, MBZ */
267
__u64 rsvd2;
268
269
/**
270
* @extensions: Zero-terminated chain of extensions.
271
*
272
* For future extensions. See struct i915_user_extension.
273
*/
274
__u64 extensions;
275
};
276
277
/**
278
* struct drm_i915_gem_create_ext_vm_private - Extension to make the object
279
* private to the specified VM.
280
*
281
* See struct drm_i915_gem_create_ext.
282
*/
283
struct drm_i915_gem_create_ext_vm_private {
284
#define I915_GEM_CREATE_EXT_VM_PRIVATE 2
285
/** @base: Extension link. See struct i915_user_extension. */
286
struct i915_user_extension base;
287
288
/** @vm_id: Id of the VM to which the object is private */
289
__u32 vm_id;
290
};
291
292