Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/iris/iris_context.c
4565 views
1
/*
2
* Copyright © 2017 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included
12
* in all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
* DEALINGS IN THE SOFTWARE.
21
*/
22
23
#include <stdio.h>
24
#include <time.h>
25
#include "pipe/p_defines.h"
26
#include "pipe/p_state.h"
27
#include "util/debug.h"
28
#include "util/ralloc.h"
29
#include "util/u_inlines.h"
30
#include "util/format/u_format.h"
31
#include "util/u_upload_mgr.h"
32
#include "drm-uapi/i915_drm.h"
33
#include "iris_context.h"
34
#include "iris_resource.h"
35
#include "iris_screen.h"
36
#include "common/intel_defines.h"
37
#include "common/intel_sample_positions.h"
38
39
/**
40
* The pipe->set_debug_callback() driver hook.
41
*/
42
static void
43
iris_set_debug_callback(struct pipe_context *ctx,
44
const struct pipe_debug_callback *cb)
45
{
46
struct iris_context *ice = (struct iris_context *)ctx;
47
48
if (cb)
49
ice->dbg = *cb;
50
else
51
memset(&ice->dbg, 0, sizeof(ice->dbg));
52
}
53
54
/**
55
* Called from the batch module when it detects a GPU hang.
56
*
57
* In this case, we've lost our GEM context, and can't rely on any existing
58
* state on the GPU. We must mark everything dirty and wipe away any saved
59
* assumptions about the last known state of the GPU.
60
*/
61
void
62
iris_lost_context_state(struct iris_batch *batch)
63
{
64
struct iris_context *ice = batch->ice;
65
66
if (batch->name == IRIS_BATCH_RENDER) {
67
batch->screen->vtbl.init_render_context(batch);
68
} else if (batch->name == IRIS_BATCH_COMPUTE) {
69
batch->screen->vtbl.init_compute_context(batch);
70
} else {
71
unreachable("unhandled batch reset");
72
}
73
74
ice->state.dirty = ~0ull;
75
ice->state.stage_dirty = ~0ull;
76
ice->state.current_hash_scale = 0;
77
memset(&ice->shaders.urb, 0, sizeof(ice->shaders.urb));
78
memset(ice->state.last_block, 0, sizeof(ice->state.last_block));
79
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
80
batch->last_surface_base_address = ~0ull;
81
batch->last_aux_map_state = 0;
82
batch->screen->vtbl.lost_genx_state(ice, batch);
83
}
84
85
static enum pipe_reset_status
86
iris_get_device_reset_status(struct pipe_context *ctx)
87
{
88
struct iris_context *ice = (struct iris_context *)ctx;
89
90
enum pipe_reset_status worst_reset = PIPE_NO_RESET;
91
92
/* Check the reset status of each batch's hardware context, and take the
93
* worst status (if one was guilty, proclaim guilt).
94
*/
95
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
96
/* This will also recreate the hardware contexts as necessary, so any
97
* future queries will show no resets. We only want to report once.
98
*/
99
enum pipe_reset_status batch_reset =
100
iris_batch_check_for_reset(&ice->batches[i]);
101
102
if (batch_reset == PIPE_NO_RESET)
103
continue;
104
105
if (worst_reset == PIPE_NO_RESET) {
106
worst_reset = batch_reset;
107
} else {
108
/* GUILTY < INNOCENT < UNKNOWN */
109
worst_reset = MIN2(worst_reset, batch_reset);
110
}
111
}
112
113
if (worst_reset != PIPE_NO_RESET && ice->reset.reset)
114
ice->reset.reset(ice->reset.data, worst_reset);
115
116
return worst_reset;
117
}
118
119
static void
120
iris_set_device_reset_callback(struct pipe_context *ctx,
121
const struct pipe_device_reset_callback *cb)
122
{
123
struct iris_context *ice = (struct iris_context *)ctx;
124
125
if (cb)
126
ice->reset = *cb;
127
else
128
memset(&ice->reset, 0, sizeof(ice->reset));
129
}
130
131
static void
132
iris_get_sample_position(struct pipe_context *ctx,
133
unsigned sample_count,
134
unsigned sample_index,
135
float *out_value)
136
{
137
union {
138
struct {
139
float x[16];
140
float y[16];
141
} a;
142
struct {
143
float _0XOffset, _1XOffset, _2XOffset, _3XOffset,
144
_4XOffset, _5XOffset, _6XOffset, _7XOffset,
145
_8XOffset, _9XOffset, _10XOffset, _11XOffset,
146
_12XOffset, _13XOffset, _14XOffset, _15XOffset;
147
float _0YOffset, _1YOffset, _2YOffset, _3YOffset,
148
_4YOffset, _5YOffset, _6YOffset, _7YOffset,
149
_8YOffset, _9YOffset, _10YOffset, _11YOffset,
150
_12YOffset, _13YOffset, _14YOffset, _15YOffset;
151
} v;
152
} u;
153
switch (sample_count) {
154
case 1: INTEL_SAMPLE_POS_1X(u.v._); break;
155
case 2: INTEL_SAMPLE_POS_2X(u.v._); break;
156
case 4: INTEL_SAMPLE_POS_4X(u.v._); break;
157
case 8: INTEL_SAMPLE_POS_8X(u.v._); break;
158
case 16: INTEL_SAMPLE_POS_16X(u.v._); break;
159
default: unreachable("invalid sample count");
160
}
161
162
out_value[0] = u.a.x[sample_index];
163
out_value[1] = u.a.y[sample_index];
164
}
165
166
static bool
167
create_dirty_dmabuf_set(struct iris_context *ice)
168
{
169
assert(ice->dirty_dmabufs == NULL);
170
171
ice->dirty_dmabufs = _mesa_pointer_set_create(ice);
172
return ice->dirty_dmabufs != NULL;
173
}
174
175
void
176
iris_mark_dirty_dmabuf(struct iris_context *ice,
177
struct pipe_resource *res)
178
{
179
if (!_mesa_set_search(ice->dirty_dmabufs, res)) {
180
_mesa_set_add(ice->dirty_dmabufs, res);
181
pipe_reference(NULL, &res->reference);
182
}
183
}
184
185
static void
186
clear_dirty_dmabuf_set(struct iris_context *ice)
187
{
188
set_foreach(ice->dirty_dmabufs, entry) {
189
struct pipe_resource *res = (struct pipe_resource *)entry->key;
190
if (pipe_reference(&res->reference, NULL))
191
res->screen->resource_destroy(res->screen, res);
192
}
193
194
_mesa_set_clear(ice->dirty_dmabufs, NULL);
195
}
196
197
void
198
iris_flush_dirty_dmabufs(struct iris_context *ice)
199
{
200
set_foreach(ice->dirty_dmabufs, entry) {
201
struct pipe_resource *res = (struct pipe_resource *)entry->key;
202
ice->ctx.flush_resource(&ice->ctx, res);
203
}
204
205
clear_dirty_dmabuf_set(ice);
206
}
207
208
209
/**
210
* Destroy a context, freeing any associated memory.
211
*/
212
static void
213
iris_destroy_context(struct pipe_context *ctx)
214
{
215
struct iris_context *ice = (struct iris_context *)ctx;
216
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
217
218
if (ctx->stream_uploader)
219
u_upload_destroy(ctx->stream_uploader);
220
if (ctx->const_uploader)
221
u_upload_destroy(ctx->const_uploader);
222
223
clear_dirty_dmabuf_set(ice);
224
225
screen->vtbl.destroy_state(ice);
226
227
for (unsigned i = 0; i < ARRAY_SIZE(ice->shaders.scratch_surfs); i++)
228
pipe_resource_reference(&ice->shaders.scratch_surfs[i].res, NULL);
229
230
iris_destroy_program_cache(ice);
231
iris_destroy_border_color_pool(ice);
232
if (screen->measure.config)
233
iris_destroy_ctx_measure(ice);
234
235
u_upload_destroy(ice->state.surface_uploader);
236
u_upload_destroy(ice->state.bindless_uploader);
237
u_upload_destroy(ice->state.dynamic_uploader);
238
u_upload_destroy(ice->query_buffer_uploader);
239
240
iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]);
241
iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]);
242
iris_destroy_binder(&ice->state.binder);
243
244
slab_destroy_child(&ice->transfer_pool);
245
slab_destroy_child(&ice->transfer_pool_unsync);
246
247
ralloc_free(ice);
248
}
249
250
#define genX_call(devinfo, func, ...) \
251
switch ((devinfo)->verx10) { \
252
case 125: \
253
gfx125_##func(__VA_ARGS__); \
254
break; \
255
case 120: \
256
gfx12_##func(__VA_ARGS__); \
257
break; \
258
case 110: \
259
gfx11_##func(__VA_ARGS__); \
260
break; \
261
case 90: \
262
gfx9_##func(__VA_ARGS__); \
263
break; \
264
case 80: \
265
gfx8_##func(__VA_ARGS__); \
266
break; \
267
default: \
268
unreachable("Unknown hardware generation"); \
269
}
270
271
/**
272
* Create a context.
273
*
274
* This is where each context begins.
275
*/
276
struct pipe_context *
277
iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
278
{
279
struct iris_screen *screen = (struct iris_screen*)pscreen;
280
const struct intel_device_info *devinfo = &screen->devinfo;
281
struct iris_context *ice = rzalloc(NULL, struct iris_context);
282
283
if (!ice)
284
return NULL;
285
286
struct pipe_context *ctx = &ice->ctx;
287
288
ctx->screen = pscreen;
289
ctx->priv = priv;
290
291
ctx->stream_uploader = u_upload_create_default(ctx);
292
if (!ctx->stream_uploader) {
293
free(ctx);
294
return NULL;
295
}
296
ctx->const_uploader = u_upload_create(ctx, 1024 * 1024,
297
PIPE_BIND_CONSTANT_BUFFER,
298
PIPE_USAGE_IMMUTABLE,
299
IRIS_RESOURCE_FLAG_DEVICE_MEM);
300
if (!ctx->const_uploader) {
301
u_upload_destroy(ctx->stream_uploader);
302
free(ctx);
303
return NULL;
304
}
305
306
if (!create_dirty_dmabuf_set(ice)) {
307
ralloc_free(ice);
308
return NULL;
309
}
310
311
ctx->destroy = iris_destroy_context;
312
ctx->set_debug_callback = iris_set_debug_callback;
313
ctx->set_device_reset_callback = iris_set_device_reset_callback;
314
ctx->get_device_reset_status = iris_get_device_reset_status;
315
ctx->get_sample_position = iris_get_sample_position;
316
317
iris_init_context_fence_functions(ctx);
318
iris_init_blit_functions(ctx);
319
iris_init_clear_functions(ctx);
320
iris_init_program_functions(ctx);
321
iris_init_resource_functions(ctx);
322
iris_init_flush_functions(ctx);
323
iris_init_perfquery_functions(ctx);
324
325
iris_init_program_cache(ice);
326
iris_init_border_color_pool(ice);
327
iris_init_binder(ice);
328
329
slab_create_child(&ice->transfer_pool, &screen->transfer_pool);
330
slab_create_child(&ice->transfer_pool_unsync, &screen->transfer_pool);
331
332
ice->state.surface_uploader =
333
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
334
IRIS_RESOURCE_FLAG_SURFACE_MEMZONE |
335
IRIS_RESOURCE_FLAG_DEVICE_MEM);
336
ice->state.bindless_uploader =
337
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
338
IRIS_RESOURCE_FLAG_BINDLESS_MEMZONE |
339
IRIS_RESOURCE_FLAG_DEVICE_MEM);
340
ice->state.dynamic_uploader =
341
u_upload_create(ctx, 64 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
342
IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE |
343
IRIS_RESOURCE_FLAG_DEVICE_MEM);
344
345
ice->query_buffer_uploader =
346
u_upload_create(ctx, 16 * 1024, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING,
347
0);
348
349
genX_call(devinfo, init_state, ice);
350
genX_call(devinfo, init_blorp, ice);
351
genX_call(devinfo, init_query, ice);
352
353
int priority = 0;
354
if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
355
priority = INTEL_CONTEXT_HIGH_PRIORITY;
356
if (flags & PIPE_CONTEXT_LOW_PRIORITY)
357
priority = INTEL_CONTEXT_LOW_PRIORITY;
358
359
if (INTEL_DEBUG & DEBUG_BATCH)
360
ice->state.sizes = _mesa_hash_table_u64_create(ice);
361
362
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
363
iris_init_batch(ice, (enum iris_batch_name) i, priority);
364
}
365
366
screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
367
screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
368
369
if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
370
return ctx;
371
372
/* Clover doesn't support u_threaded_context */
373
if (flags & PIPE_CONTEXT_COMPUTE_ONLY)
374
return ctx;
375
376
return threaded_context_create(ctx, &screen->transfer_pool,
377
iris_replace_buffer_storage,
378
NULL, /* TODO: asynchronous flushes? */
379
NULL,
380
false,
381
&ice->thrctx);
382
}
383
384