Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
4561 views
1
/*
2
* Copyright © 2011 Marek Olšák <[email protected]>
3
* Copyright © 2015 Advanced Micro Devices, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining
7
* a copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*
23
* The above copyright notice and this permission notice (including the
24
* next paragraph) shall be included in all copies or substantial portions
25
* of the Software.
26
*/
27
28
#ifndef AMDGPU_CS_H
29
#define AMDGPU_CS_H
30
31
#include "amdgpu_bo.h"
32
#include "util/u_memory.h"
33
#include "drm-uapi/amdgpu_drm.h"
34
35
struct amdgpu_ctx {
36
struct amdgpu_winsys *ws;
37
amdgpu_context_handle ctx;
38
amdgpu_bo_handle user_fence_bo;
39
uint64_t *user_fence_cpu_address_base;
40
int refcount;
41
unsigned initial_num_total_rejected_cs;
42
unsigned num_rejected_cs;
43
};
44
45
struct amdgpu_cs_buffer {
46
struct amdgpu_winsys_bo *bo;
47
union {
48
struct {
49
uint32_t priority_usage;
50
} real;
51
struct {
52
uint32_t real_idx; /* index of underlying real BO */
53
} slab;
54
} u;
55
enum radeon_bo_usage usage;
56
};
57
58
enum ib_type {
59
IB_PREAMBLE,
60
IB_MAIN,
61
IB_PARALLEL_COMPUTE,
62
IB_NUM,
63
};
64
65
struct amdgpu_ib {
66
struct radeon_cmdbuf *rcs; /* pointer to the driver-owned data */
67
68
/* A buffer out of which new IBs are allocated. */
69
struct pb_buffer *big_ib_buffer;
70
uint8_t *ib_mapped;
71
unsigned used_ib_space;
72
73
/* The maximum seen size from cs_check_space. If the driver does
74
* cs_check_space and flush, the newly allocated IB should have at least
75
* this size.
76
*/
77
unsigned max_check_space_size;
78
79
unsigned max_ib_size;
80
uint32_t *ptr_ib_size;
81
bool ptr_ib_size_inside_ib;
82
enum ib_type ib_type;
83
};
84
85
struct amdgpu_fence_list {
86
struct pipe_fence_handle **list;
87
unsigned num;
88
unsigned max;
89
};
90
91
struct amdgpu_cs_context {
92
struct drm_amdgpu_cs_chunk_ib ib[IB_NUM];
93
94
/* Buffers. */
95
unsigned max_real_buffers;
96
unsigned num_real_buffers;
97
struct amdgpu_cs_buffer *real_buffers;
98
99
unsigned num_slab_buffers;
100
unsigned max_slab_buffers;
101
struct amdgpu_cs_buffer *slab_buffers;
102
103
unsigned num_sparse_buffers;
104
unsigned max_sparse_buffers;
105
struct amdgpu_cs_buffer *sparse_buffers;
106
107
int16_t *buffer_indices_hashlist;
108
109
struct amdgpu_winsys_bo *last_added_bo;
110
unsigned last_added_bo_index;
111
unsigned last_added_bo_usage;
112
uint32_t last_added_bo_priority_usage;
113
114
struct amdgpu_fence_list fence_dependencies;
115
struct amdgpu_fence_list syncobj_dependencies;
116
struct amdgpu_fence_list syncobj_to_signal;
117
118
/* The compute IB uses the dependencies above + these: */
119
struct amdgpu_fence_list compute_fence_dependencies;
120
struct amdgpu_fence_list compute_start_fence_dependencies;
121
122
struct pipe_fence_handle *fence;
123
124
/* the error returned from cs_flush for non-async submissions */
125
int error_code;
126
127
/* TMZ: will this command be submitted using the TMZ flag */
128
bool secure;
129
};
130
131
#define BUFFER_HASHLIST_SIZE 4096
132
133
struct amdgpu_cs {
134
struct amdgpu_ib main; /* must be first because this is inherited */
135
struct amdgpu_ib compute_ib; /* optional parallel compute IB */
136
struct amdgpu_winsys *ws;
137
struct amdgpu_ctx *ctx;
138
enum ring_type ring_type;
139
struct drm_amdgpu_cs_chunk_fence fence_chunk;
140
141
/* We flip between these two CS. While one is being consumed
142
* by the kernel in another thread, the other one is being filled
143
* by the pipe driver. */
144
struct amdgpu_cs_context csc1;
145
struct amdgpu_cs_context csc2;
146
/* The currently-used CS. */
147
struct amdgpu_cs_context *csc;
148
/* The CS being currently-owned by the other thread. */
149
struct amdgpu_cs_context *cst;
150
/* buffer_indices_hashlist[hash(bo)] returns -1 if the bo
151
* isn't part of any buffer lists or the index where the bo could be found.
152
* Since 1) hash collisions of 2 different bo can happen and 2) we use a
153
* single hashlist for the 3 buffer list, this is only a hint.
154
* amdgpu_lookup_buffer uses this hint to speed up buffers look up.
155
*/
156
int16_t buffer_indices_hashlist[BUFFER_HASHLIST_SIZE];
157
158
/* Flush CS. */
159
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
160
void *flush_data;
161
bool stop_exec_on_failure;
162
bool noop;
163
bool has_chaining;
164
165
struct util_queue_fence flush_completed;
166
struct pipe_fence_handle *next_fence;
167
struct pb_buffer *preamble_ib_bo;
168
};
169
170
struct amdgpu_fence {
171
struct pipe_reference reference;
172
/* If ctx == NULL, this fence is syncobj-based. */
173
uint32_t syncobj;
174
175
struct amdgpu_winsys *ws;
176
struct amdgpu_ctx *ctx; /* submission context */
177
struct amdgpu_cs_fence fence;
178
uint64_t *user_fence_cpu_address;
179
180
/* If the fence has been submitted. This is unsignalled for deferred fences
181
* (cs->next_fence) and while an IB is still being submitted in the submit
182
* thread. */
183
struct util_queue_fence submitted;
184
185
volatile int signalled; /* bool (int for atomicity) */
186
};
187
188
static inline bool amdgpu_fence_is_syncobj(struct amdgpu_fence *fence)
189
{
190
return fence->ctx == NULL;
191
}
192
193
static inline void amdgpu_ctx_unref(struct amdgpu_ctx *ctx)
194
{
195
if (p_atomic_dec_zero(&ctx->refcount)) {
196
amdgpu_cs_ctx_free(ctx->ctx);
197
amdgpu_bo_free(ctx->user_fence_bo);
198
FREE(ctx);
199
}
200
}
201
202
static inline void amdgpu_fence_reference(struct pipe_fence_handle **dst,
203
struct pipe_fence_handle *src)
204
{
205
struct amdgpu_fence **adst = (struct amdgpu_fence **)dst;
206
struct amdgpu_fence *asrc = (struct amdgpu_fence *)src;
207
208
if (pipe_reference(&(*adst)->reference, &asrc->reference)) {
209
struct amdgpu_fence *fence = *adst;
210
211
if (amdgpu_fence_is_syncobj(fence))
212
amdgpu_cs_destroy_syncobj(fence->ws->dev, fence->syncobj);
213
else
214
amdgpu_ctx_unref(fence->ctx);
215
216
util_queue_fence_destroy(&fence->submitted);
217
FREE(fence);
218
}
219
*adst = asrc;
220
}
221
222
int amdgpu_lookup_buffer_any_type(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo);
223
224
static inline struct amdgpu_cs *
225
amdgpu_cs(struct radeon_cmdbuf *rcs)
226
{
227
struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs->priv;
228
assert(!cs || cs->main.ib_type == IB_MAIN);
229
return cs;
230
}
231
232
#define get_container(member_ptr, container_type, container_member) \
233
(container_type *)((char *)(member_ptr) - offsetof(container_type, container_member))
234
235
static inline bool
236
amdgpu_bo_is_referenced_by_cs(struct amdgpu_cs *cs,
237
struct amdgpu_winsys_bo *bo)
238
{
239
return amdgpu_lookup_buffer_any_type(cs->csc, bo) != -1;
240
}
241
242
static inline bool
243
amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
244
struct amdgpu_winsys_bo *bo,
245
enum radeon_bo_usage usage)
246
{
247
int index;
248
struct amdgpu_cs_buffer *buffer;
249
250
index = amdgpu_lookup_buffer_any_type(cs->csc, bo);
251
if (index == -1)
252
return false;
253
254
buffer = bo->bo ? &cs->csc->real_buffers[index] :
255
bo->base.usage & RADEON_FLAG_SPARSE ? &cs->csc->sparse_buffers[index] :
256
&cs->csc->slab_buffers[index];
257
258
return (buffer->usage & usage) != 0;
259
}
260
261
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
262
bool absolute);
263
void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
264
unsigned num_fences,
265
struct pipe_fence_handle **fences);
266
void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
267
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
268
269
#endif
270
271