Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/freedreno/drm/freedreno_priv.h
4564 views
1
/*
2
* Copyright (C) 2012-2018 Rob Clark <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
* SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
*/
26
27
#ifndef FREEDRENO_PRIV_H_
28
#define FREEDRENO_PRIV_H_
29
30
#include <errno.h>
31
#include <fcntl.h>
32
#include <stdio.h>
33
#include <stdlib.h>
34
#include <string.h>
35
#include <unistd.h>
36
#include <sys/ioctl.h>
37
#include <sys/mman.h>
38
39
#include <xf86drm.h>
40
41
#include "util/hash_table.h"
42
#include "util/list.h"
43
#include "util/log.h"
44
#include "util/simple_mtx.h"
45
#include "util/u_atomic.h"
46
#include "util/u_debug.h"
47
#include "util/u_math.h"
48
49
#include "freedreno_drmif.h"
50
#include "freedreno_ringbuffer.h"
51
52
extern simple_mtx_t table_lock;
53
54
/*
55
* Stupid/simple growable array implementation:
56
*/
57
58
#define MAX_ARRAY_SIZE ((unsigned short)~0)
59
60
static inline void
61
grow(void **ptr, uint16_t nr, uint16_t *max, uint16_t sz)
62
{
63
assert((nr + 1) < MAX_ARRAY_SIZE);
64
if ((nr + 1) > *max) {
65
if (*max > MAX_ARRAY_SIZE/2)
66
*max = MAX_ARRAY_SIZE;
67
else if ((*max * 2) < (nr + 1))
68
*max = nr + 5;
69
else
70
*max = *max * 2;
71
*ptr = realloc(*ptr, *max * sz);
72
}
73
}
74
75
#define DECLARE_ARRAY(type, name) \
76
unsigned short nr_##name, max_##name; \
77
type *name;
78
79
#define APPEND(x, name, ...) \
80
({ \
81
grow((void **)&(x)->name, (x)->nr_##name, &(x)->max_##name, \
82
sizeof((x)->name[0])); \
83
(x)->name[(x)->nr_##name] = __VA_ARGS__; \
84
(x)->nr_##name++; \
85
})
86
87
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
88
89
90
struct fd_device_funcs {
91
int (*bo_new_handle)(struct fd_device *dev, uint32_t size, uint32_t flags,
92
uint32_t *handle);
93
struct fd_bo *(*bo_from_handle)(struct fd_device *dev, uint32_t size,
94
uint32_t handle);
95
struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
96
unsigned prio);
97
void (*destroy)(struct fd_device *dev);
98
};
99
100
struct fd_bo_bucket {
101
uint32_t size;
102
struct list_head list;
103
};
104
105
struct fd_bo_cache {
106
struct fd_bo_bucket cache_bucket[14 * 4];
107
int num_buckets;
108
time_t time;
109
};
110
111
struct fd_device {
112
int fd;
113
enum fd_version version;
114
int32_t refcnt;
115
116
/* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
117
*
118
* handle_table: maps handle to fd_bo
119
* name_table: maps flink name to fd_bo
120
*
121
* We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
122
* returns a new handle. So we need to figure out if the bo is already
123
* open in the process first, before calling gem-open.
124
*/
125
struct hash_table *handle_table, *name_table;
126
127
const struct fd_device_funcs *funcs;
128
129
struct fd_bo_cache bo_cache;
130
struct fd_bo_cache ring_cache;
131
132
int closefd; /* call close(fd) upon destruction */
133
134
/* just for valgrind: */
135
int bo_size;
136
137
/**
138
* List of deferred submits, protected by submit_lock. The deferred
139
* submits are tracked globally per-device, even if they execute in
140
* different order on the kernel side (ie. due to different priority
141
* submitqueues, etc) to preserve the order that they are passed off
142
* to the kernel. Once the kernel has them, it is the fences' job
143
* to preserve correct order of execution.
144
*/
145
struct list_head deferred_submits;
146
unsigned deferred_cmds;
147
simple_mtx_t submit_lock;
148
};
149
150
#define foreach_submit(name, list) \
151
list_for_each_entry(struct fd_submit, name, list, node)
152
#define foreach_submit_safe(name, list) \
153
list_for_each_entry_safe(struct fd_submit, name, list, node)
154
#define last_submit(list) \
155
list_last_entry(list, struct fd_submit, node)
156
157
void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
158
void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
159
struct fd_bo *fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size,
160
uint32_t flags);
161
int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
162
163
/* for where @table_lock is already held: */
164
void fd_bo_del_locked(struct fd_bo *bo);
165
void fd_device_del_locked(struct fd_device *dev);
166
void fd_pipe_del_locked(struct fd_pipe *pipe);
167
168
struct fd_pipe_funcs {
169
struct fd_ringbuffer *(*ringbuffer_new_object)(struct fd_pipe *pipe,
170
uint32_t size);
171
struct fd_submit *(*submit_new)(struct fd_pipe *pipe);
172
173
/**
174
* Flush any deferred submits (if deferred submits are supported by
175
* the pipe implementation)
176
*/
177
void (*flush)(struct fd_pipe *pipe, uint32_t fence);
178
179
int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param,
180
uint64_t *value);
181
int (*wait)(struct fd_pipe *pipe, const struct fd_fence *fence,
182
uint64_t timeout);
183
void (*destroy)(struct fd_pipe *pipe);
184
};
185
186
struct fd_pipe_control {
187
uint32_t fence;
188
};
189
#define control_ptr(pipe, member) \
190
(pipe)->control_mem, offsetof(struct fd_pipe_control, member), 0, 0
191
192
struct fd_pipe {
193
struct fd_device *dev;
194
enum fd_pipe_id id;
195
uint32_t gpu_id;
196
197
/**
198
* Note refcnt is *not* atomic, but protected by table_lock, since the
199
* table_lock is held in fd_bo_add_fence(), which is the hotpath.
200
*/
201
int32_t refcnt;
202
203
/**
204
* Previous fence seqno allocated for this pipe. The fd_pipe represents
205
* a single timeline, fences allocated by this pipe can be compared to
206
* each other, but fences from different pipes are not comparable (as
207
* there could be preemption of multiple priority level submitqueues at
208
* play)
209
*/
210
uint32_t last_fence;
211
212
struct fd_bo *control_mem;
213
volatile struct fd_pipe_control *control;
214
215
const struct fd_pipe_funcs *funcs;
216
};
217
218
uint32_t fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring);
219
220
static inline void
221
fd_pipe_flush(struct fd_pipe *pipe, uint32_t fence)
222
{
223
if (!pipe->funcs->flush)
224
return;
225
pipe->funcs->flush(pipe, fence);
226
}
227
228
struct fd_submit_funcs {
229
struct fd_ringbuffer *(*new_ringbuffer)(struct fd_submit *submit,
230
uint32_t size,
231
enum fd_ringbuffer_flags flags);
232
int (*flush)(struct fd_submit *submit, int in_fence_fd,
233
struct fd_submit_fence *out_fence);
234
void (*destroy)(struct fd_submit *submit);
235
};
236
237
struct fd_submit {
238
int32_t refcnt;
239
struct fd_pipe *pipe;
240
const struct fd_submit_funcs *funcs;
241
242
struct fd_ringbuffer *primary;
243
uint32_t fence;
244
struct list_head node; /* node in fd_pipe::deferred_submits */
245
};
246
247
static inline unsigned
248
fd_dev_count_deferred_cmds(struct fd_device *dev)
249
{
250
unsigned nr = 0;
251
252
simple_mtx_assert_locked(&dev->submit_lock);
253
254
list_for_each_entry (struct fd_submit, submit, &dev->deferred_submits, node) {
255
nr += fd_ringbuffer_cmd_count(submit->primary);
256
}
257
258
return nr;
259
}
260
261
struct fd_bo_funcs {
262
int (*offset)(struct fd_bo *bo, uint64_t *offset);
263
int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
264
void (*cpu_fini)(struct fd_bo *bo);
265
int (*madvise)(struct fd_bo *bo, int willneed);
266
uint64_t (*iova)(struct fd_bo *bo);
267
void (*set_name)(struct fd_bo *bo, const char *fmt, va_list ap);
268
void (*destroy)(struct fd_bo *bo);
269
};
270
271
struct fd_bo_fence {
272
/* For non-shared buffers, track the last pipe the buffer was active
273
* on, and the per-pipe fence value that indicates when the buffer is
274
* idle:
275
*/
276
uint32_t fence;
277
struct fd_pipe *pipe;
278
};
279
280
struct fd_bo {
281
struct fd_device *dev;
282
uint32_t size;
283
uint32_t handle;
284
uint32_t name;
285
int32_t refcnt;
286
uint32_t flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
287
uint64_t iova;
288
void *map;
289
const struct fd_bo_funcs *funcs;
290
291
enum {
292
NO_CACHE = 0,
293
BO_CACHE = 1,
294
RING_CACHE = 2,
295
} bo_reuse : 2;
296
297
/* Buffers that are shared (imported or exported) may be used in
298
* other processes, so we need to fallback to kernel to determine
299
* busyness.
300
*/
301
bool shared : 1;
302
303
/* We need to be able to disable userspace fence synchronization for
304
* special internal buffers, namely the pipe->control buffer, to avoid
305
* a circular reference loop.
306
*/
307
bool nosync : 1;
308
309
struct list_head list; /* bucket-list entry */
310
time_t free_time; /* time when added to bucket-list */
311
312
DECLARE_ARRAY(struct fd_bo_fence, fences);
313
314
/* In the common case, there is no more than one fence attached.
315
* This provides storage for the fences table until it grows to
316
* be larger than a single element.
317
*/
318
struct fd_bo_fence _inline_fence;
319
};
320
321
void fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence);
322
323
enum fd_bo_state {
324
FD_BO_STATE_IDLE,
325
FD_BO_STATE_BUSY,
326
FD_BO_STATE_UNKNOWN,
327
};
328
enum fd_bo_state fd_bo_state(struct fd_bo *bo);
329
330
struct fd_bo *fd_bo_new_ring(struct fd_device *dev, uint32_t size);
331
332
#define enable_debug 0 /* TODO make dynamic */
333
334
bool fd_dbg(void);
335
336
#define INFO_MSG(fmt, ...) \
337
do { \
338
if (fd_dbg()) \
339
mesa_logi("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
340
} while (0)
341
#define DEBUG_MSG(fmt, ...) \
342
do \
343
if (enable_debug) { \
344
mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
345
} \
346
while (0)
347
#define WARN_MSG(fmt, ...) \
348
do { \
349
mesa_logw("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
350
} while (0)
351
#define ERROR_MSG(fmt, ...) \
352
do { \
353
mesa_loge("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
354
} while (0)
355
356
#define U642VOID(x) ((void *)(unsigned long)(x))
357
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
358
359
#if HAVE_VALGRIND
360
#include <memcheck.h>
361
362
/*
363
* For tracking the backing memory (if valgrind enabled, we force a mmap
364
* for the purposes of tracking)
365
*/
366
static inline void
367
VG_BO_ALLOC(struct fd_bo *bo)
368
{
369
if (bo && RUNNING_ON_VALGRIND) {
370
VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
371
}
372
}
373
374
static inline void
375
VG_BO_FREE(struct fd_bo *bo)
376
{
377
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
378
}
379
380
/*
381
* For tracking bo structs that are in the buffer-cache, so that valgrind
382
* doesn't attribute ownership to the first one to allocate the recycled
383
* bo.
384
*
385
* Note that the list_head in fd_bo is used to track the buffers in cache
386
* so disable error reporting on the range while they are in cache so
387
* valgrind doesn't squawk about list traversal.
388
*
389
*/
390
static inline void
391
VG_BO_RELEASE(struct fd_bo *bo)
392
{
393
if (RUNNING_ON_VALGRIND) {
394
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
395
VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
396
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
397
}
398
}
399
static inline void
400
VG_BO_OBTAIN(struct fd_bo *bo)
401
{
402
if (RUNNING_ON_VALGRIND) {
403
VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
404
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
405
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
406
}
407
}
408
#else
409
static inline void
410
VG_BO_ALLOC(struct fd_bo *bo)
411
{
412
}
413
static inline void
414
VG_BO_FREE(struct fd_bo *bo)
415
{
416
}
417
static inline void
418
VG_BO_RELEASE(struct fd_bo *bo)
419
{
420
}
421
static inline void
422
VG_BO_OBTAIN(struct fd_bo *bo)
423
{
424
}
425
#endif
426
427
#define FD_DEFINE_CAST(parent, child) \
428
static inline struct child *to_##child(struct parent *x) \
429
{ \
430
return (struct child *)x; \
431
}
432
433
#endif /* FREEDRENO_PRIV_H_ */
434
435