Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/anv_batch_chain.c
4547 views
1
/*
2
* Copyright © 2015 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include <assert.h>
25
#include <stdbool.h>
26
#include <string.h>
27
#include <unistd.h>
28
#include <fcntl.h>
29
30
#include "anv_private.h"
31
#include "anv_measure.h"
32
33
#include "genxml/gen8_pack.h"
34
#include "genxml/genX_bits.h"
35
#include "perf/intel_perf.h"
36
37
#include "util/debug.h"
38
39
/** \file anv_batch_chain.c
40
*
41
* This file contains functions related to anv_cmd_buffer as a data
42
* structure. This involves everything required to create and destroy
43
* the actual batch buffers as well as link them together and handle
44
* relocations and surface state. It specifically does *not* contain any
45
* handling of actual vkCmd calls beyond vkCmdExecuteCommands.
46
*/
47
48
/*-----------------------------------------------------------------------*
49
* Functions related to anv_reloc_list
50
*-----------------------------------------------------------------------*/
51
52
VkResult
53
anv_reloc_list_init(struct anv_reloc_list *list,
54
const VkAllocationCallbacks *alloc)
55
{
56
memset(list, 0, sizeof(*list));
57
return VK_SUCCESS;
58
}
59
60
static VkResult
61
anv_reloc_list_init_clone(struct anv_reloc_list *list,
62
const VkAllocationCallbacks *alloc,
63
const struct anv_reloc_list *other_list)
64
{
65
list->num_relocs = other_list->num_relocs;
66
list->array_length = other_list->array_length;
67
68
if (list->num_relocs > 0) {
69
list->relocs =
70
vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
71
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72
if (list->relocs == NULL)
73
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
74
75
list->reloc_bos =
76
vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
77
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
78
if (list->reloc_bos == NULL) {
79
vk_free(alloc, list->relocs);
80
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
81
}
82
83
memcpy(list->relocs, other_list->relocs,
84
list->array_length * sizeof(*list->relocs));
85
memcpy(list->reloc_bos, other_list->reloc_bos,
86
list->array_length * sizeof(*list->reloc_bos));
87
} else {
88
list->relocs = NULL;
89
list->reloc_bos = NULL;
90
}
91
92
list->dep_words = other_list->dep_words;
93
94
if (list->dep_words > 0) {
95
list->deps =
96
vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
97
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
98
memcpy(list->deps, other_list->deps,
99
list->dep_words * sizeof(BITSET_WORD));
100
} else {
101
list->deps = NULL;
102
}
103
104
return VK_SUCCESS;
105
}
106
107
void
108
anv_reloc_list_finish(struct anv_reloc_list *list,
109
const VkAllocationCallbacks *alloc)
110
{
111
vk_free(alloc, list->relocs);
112
vk_free(alloc, list->reloc_bos);
113
vk_free(alloc, list->deps);
114
}
115
116
static VkResult
117
anv_reloc_list_grow(struct anv_reloc_list *list,
118
const VkAllocationCallbacks *alloc,
119
size_t num_additional_relocs)
120
{
121
if (list->num_relocs + num_additional_relocs <= list->array_length)
122
return VK_SUCCESS;
123
124
size_t new_length = MAX2(16, list->array_length * 2);
125
while (new_length < list->num_relocs + num_additional_relocs)
126
new_length *= 2;
127
128
struct drm_i915_gem_relocation_entry *new_relocs =
129
vk_realloc(alloc, list->relocs,
130
new_length * sizeof(*list->relocs), 8,
131
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
132
if (new_relocs == NULL)
133
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
134
list->relocs = new_relocs;
135
136
struct anv_bo **new_reloc_bos =
137
vk_realloc(alloc, list->reloc_bos,
138
new_length * sizeof(*list->reloc_bos), 8,
139
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
140
if (new_reloc_bos == NULL)
141
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
142
list->reloc_bos = new_reloc_bos;
143
144
list->array_length = new_length;
145
146
return VK_SUCCESS;
147
}
148
149
static VkResult
150
anv_reloc_list_grow_deps(struct anv_reloc_list *list,
151
const VkAllocationCallbacks *alloc,
152
uint32_t min_num_words)
153
{
154
if (min_num_words <= list->dep_words)
155
return VK_SUCCESS;
156
157
uint32_t new_length = MAX2(32, list->dep_words * 2);
158
while (new_length < min_num_words)
159
new_length *= 2;
160
161
BITSET_WORD *new_deps =
162
vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
163
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
164
if (new_deps == NULL)
165
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
166
list->deps = new_deps;
167
168
/* Zero out the new data */
169
memset(list->deps + list->dep_words, 0,
170
(new_length - list->dep_words) * sizeof(BITSET_WORD));
171
list->dep_words = new_length;
172
173
return VK_SUCCESS;
174
}
175
176
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
177
178
VkResult
179
anv_reloc_list_add_bo(struct anv_reloc_list *list,
180
const VkAllocationCallbacks *alloc,
181
struct anv_bo *target_bo)
182
{
183
assert(!target_bo->is_wrapper);
184
assert(target_bo->flags & EXEC_OBJECT_PINNED);
185
186
uint32_t idx = target_bo->gem_handle;
187
VkResult result = anv_reloc_list_grow_deps(list, alloc,
188
(idx / BITSET_WORDBITS) + 1);
189
if (unlikely(result != VK_SUCCESS))
190
return result;
191
192
BITSET_SET(list->deps, idx);
193
194
return VK_SUCCESS;
195
}
196
197
VkResult
198
anv_reloc_list_add(struct anv_reloc_list *list,
199
const VkAllocationCallbacks *alloc,
200
uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
201
uint64_t *address_u64_out)
202
{
203
struct drm_i915_gem_relocation_entry *entry;
204
int index;
205
206
struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
207
uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
208
if (address_u64_out)
209
*address_u64_out = target_bo_offset + delta;
210
211
assert(unwrapped_target_bo->gem_handle > 0);
212
assert(unwrapped_target_bo->refcount > 0);
213
214
if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED)
215
return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo);
216
217
VkResult result = anv_reloc_list_grow(list, alloc, 1);
218
if (result != VK_SUCCESS)
219
return result;
220
221
/* XXX: Can we use I915_EXEC_HANDLE_LUT? */
222
index = list->num_relocs++;
223
list->reloc_bos[index] = target_bo;
224
entry = &list->relocs[index];
225
entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
226
entry->delta = delta;
227
entry->offset = offset;
228
entry->presumed_offset = target_bo_offset;
229
entry->read_domains = 0;
230
entry->write_domain = 0;
231
VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
232
233
return VK_SUCCESS;
234
}
235
236
static void
237
anv_reloc_list_clear(struct anv_reloc_list *list)
238
{
239
list->num_relocs = 0;
240
if (list->dep_words > 0)
241
memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
242
}
243
244
static VkResult
245
anv_reloc_list_append(struct anv_reloc_list *list,
246
const VkAllocationCallbacks *alloc,
247
struct anv_reloc_list *other, uint32_t offset)
248
{
249
VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
250
if (result != VK_SUCCESS)
251
return result;
252
253
if (other->num_relocs > 0) {
254
memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
255
other->num_relocs * sizeof(other->relocs[0]));
256
memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
257
other->num_relocs * sizeof(other->reloc_bos[0]));
258
259
for (uint32_t i = 0; i < other->num_relocs; i++)
260
list->relocs[i + list->num_relocs].offset += offset;
261
262
list->num_relocs += other->num_relocs;
263
}
264
265
anv_reloc_list_grow_deps(list, alloc, other->dep_words);
266
for (uint32_t w = 0; w < other->dep_words; w++)
267
list->deps[w] |= other->deps[w];
268
269
return VK_SUCCESS;
270
}
271
272
/*-----------------------------------------------------------------------*
273
* Functions related to anv_batch
274
*-----------------------------------------------------------------------*/
275
276
void *
277
anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
278
{
279
if (batch->next + num_dwords * 4 > batch->end) {
280
VkResult result = batch->extend_cb(batch, batch->user_data);
281
if (result != VK_SUCCESS) {
282
anv_batch_set_error(batch, result);
283
return NULL;
284
}
285
}
286
287
void *p = batch->next;
288
289
batch->next += num_dwords * 4;
290
assert(batch->next <= batch->end);
291
292
return p;
293
}
294
295
struct anv_address
296
anv_batch_address(struct anv_batch *batch, void *batch_location)
297
{
298
assert(batch->start < batch_location);
299
300
/* Allow a jump at the current location of the batch. */
301
assert(batch->next >= batch_location);
302
303
return anv_address_add(batch->start_addr, batch_location - batch->start);
304
}
305
306
void
307
anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
308
{
309
uint32_t size, offset;
310
311
size = other->next - other->start;
312
assert(size % 4 == 0);
313
314
if (batch->next + size > batch->end) {
315
VkResult result = batch->extend_cb(batch, batch->user_data);
316
if (result != VK_SUCCESS) {
317
anv_batch_set_error(batch, result);
318
return;
319
}
320
}
321
322
assert(batch->next + size <= batch->end);
323
324
VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
325
memcpy(batch->next, other->start, size);
326
327
offset = batch->next - batch->start;
328
VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
329
other->relocs, offset);
330
if (result != VK_SUCCESS) {
331
anv_batch_set_error(batch, result);
332
return;
333
}
334
335
batch->next += size;
336
}
337
338
/*-----------------------------------------------------------------------*
339
* Functions related to anv_batch_bo
340
*-----------------------------------------------------------------------*/
341
342
static VkResult
343
anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
344
uint32_t size,
345
struct anv_batch_bo **bbo_out)
346
{
347
VkResult result;
348
349
struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
350
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
351
if (bbo == NULL)
352
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
353
354
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
355
size, &bbo->bo);
356
if (result != VK_SUCCESS)
357
goto fail_alloc;
358
359
result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
360
if (result != VK_SUCCESS)
361
goto fail_bo_alloc;
362
363
*bbo_out = bbo;
364
365
return VK_SUCCESS;
366
367
fail_bo_alloc:
368
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
369
fail_alloc:
370
vk_free(&cmd_buffer->pool->alloc, bbo);
371
372
return result;
373
}
374
375
static VkResult
376
anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
377
const struct anv_batch_bo *other_bbo,
378
struct anv_batch_bo **bbo_out)
379
{
380
VkResult result;
381
382
struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
383
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
384
if (bbo == NULL)
385
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
386
387
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
388
other_bbo->bo->size, &bbo->bo);
389
if (result != VK_SUCCESS)
390
goto fail_alloc;
391
392
result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
393
&other_bbo->relocs);
394
if (result != VK_SUCCESS)
395
goto fail_bo_alloc;
396
397
bbo->length = other_bbo->length;
398
memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
399
*bbo_out = bbo;
400
401
return VK_SUCCESS;
402
403
fail_bo_alloc:
404
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
405
fail_alloc:
406
vk_free(&cmd_buffer->pool->alloc, bbo);
407
408
return result;
409
}
410
411
static void
412
anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
413
size_t batch_padding)
414
{
415
anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
416
bbo->bo->map, bbo->bo->size - batch_padding);
417
batch->relocs = &bbo->relocs;
418
anv_reloc_list_clear(&bbo->relocs);
419
}
420
421
static void
422
anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
423
size_t batch_padding)
424
{
425
batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
426
batch->start = bbo->bo->map;
427
batch->next = bbo->bo->map + bbo->length;
428
batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
429
batch->relocs = &bbo->relocs;
430
}
431
432
static void
433
anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
434
{
435
assert(batch->start == bbo->bo->map);
436
bbo->length = batch->next - batch->start;
437
VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
438
}
439
440
static VkResult
441
anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
442
struct anv_batch *batch, size_t aditional,
443
size_t batch_padding)
444
{
445
assert(batch->start == bbo->bo->map);
446
bbo->length = batch->next - batch->start;
447
448
size_t new_size = bbo->bo->size;
449
while (new_size <= bbo->length + aditional + batch_padding)
450
new_size *= 2;
451
452
if (new_size == bbo->bo->size)
453
return VK_SUCCESS;
454
455
struct anv_bo *new_bo;
456
VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
457
new_size, &new_bo);
458
if (result != VK_SUCCESS)
459
return result;
460
461
memcpy(new_bo->map, bbo->bo->map, bbo->length);
462
463
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
464
465
bbo->bo = new_bo;
466
anv_batch_bo_continue(bbo, batch, batch_padding);
467
468
return VK_SUCCESS;
469
}
470
471
static void
472
anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
473
struct anv_batch_bo *prev_bbo,
474
struct anv_batch_bo *next_bbo,
475
uint32_t next_bbo_offset)
476
{
477
const uint32_t bb_start_offset =
478
prev_bbo->length - GFX8_MI_BATCH_BUFFER_START_length * 4;
479
ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
480
481
/* Make sure we're looking at a MI_BATCH_BUFFER_START */
482
assert(((*bb_start >> 29) & 0x07) == 0);
483
assert(((*bb_start >> 23) & 0x3f) == 49);
484
485
if (cmd_buffer->device->physical->use_softpin) {
486
assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
487
assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
488
489
write_reloc(cmd_buffer->device,
490
prev_bbo->bo->map + bb_start_offset + 4,
491
next_bbo->bo->offset + next_bbo_offset, true);
492
} else {
493
uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
494
assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
495
496
prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
497
prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
498
499
/* Use a bogus presumed offset to force a relocation */
500
prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
501
}
502
}
503
504
static void
505
anv_batch_bo_destroy(struct anv_batch_bo *bbo,
506
struct anv_cmd_buffer *cmd_buffer)
507
{
508
anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
509
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
510
vk_free(&cmd_buffer->pool->alloc, bbo);
511
}
512
513
static VkResult
514
anv_batch_bo_list_clone(const struct list_head *list,
515
struct anv_cmd_buffer *cmd_buffer,
516
struct list_head *new_list)
517
{
518
VkResult result = VK_SUCCESS;
519
520
list_inithead(new_list);
521
522
struct anv_batch_bo *prev_bbo = NULL;
523
list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
524
struct anv_batch_bo *new_bbo = NULL;
525
result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
526
if (result != VK_SUCCESS)
527
break;
528
list_addtail(&new_bbo->link, new_list);
529
530
if (prev_bbo)
531
anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
532
533
prev_bbo = new_bbo;
534
}
535
536
if (result != VK_SUCCESS) {
537
list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
538
list_del(&bbo->link);
539
anv_batch_bo_destroy(bbo, cmd_buffer);
540
}
541
}
542
543
return result;
544
}
545
546
/*-----------------------------------------------------------------------*
547
* Functions related to anv_batch_bo
548
*-----------------------------------------------------------------------*/
549
550
static struct anv_batch_bo *
551
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
552
{
553
return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
554
}
555
556
struct anv_address
557
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
558
{
559
struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
560
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
561
return (struct anv_address) {
562
.bo = pool->block_pool.bo,
563
.offset = bt_block->offset - pool->start_offset,
564
};
565
}
566
567
static void
568
emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
569
struct anv_bo *bo, uint32_t offset)
570
{
571
/* In gfx8+ the address field grew to two dwords to accomodate 48 bit
572
* offsets. The high 16 bits are in the last dword, so we can use the gfx8
573
* version in either case, as long as we set the instruction length in the
574
* header accordingly. This means that we always emit three dwords here
575
* and all the padding and adjustment we do in this file works for all
576
* gens.
577
*/
578
579
#define GFX7_MI_BATCH_BUFFER_START_length 2
580
#define GFX7_MI_BATCH_BUFFER_START_length_bias 2
581
582
const uint32_t gfx7_length =
583
GFX7_MI_BATCH_BUFFER_START_length - GFX7_MI_BATCH_BUFFER_START_length_bias;
584
const uint32_t gfx8_length =
585
GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
586
587
anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
588
bbs.DWordLength = cmd_buffer->device->info.ver < 8 ?
589
gfx7_length : gfx8_length;
590
bbs.SecondLevelBatchBuffer = Firstlevelbatch;
591
bbs.AddressSpaceIndicator = ASI_PPGTT;
592
bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
593
}
594
}
595
596
static void
597
cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
598
struct anv_batch_bo *bbo)
599
{
600
struct anv_batch *batch = &cmd_buffer->batch;
601
struct anv_batch_bo *current_bbo =
602
anv_cmd_buffer_current_batch_bo(cmd_buffer);
603
604
/* We set the end of the batch a little short so we would be sure we
605
* have room for the chaining command. Since we're about to emit the
606
* chaining command, let's set it back where it should go.
607
*/
608
batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
609
assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
610
611
emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
612
613
anv_batch_bo_finish(current_bbo, batch);
614
}
615
616
static void
617
anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer *cmd_buffer_from,
618
struct anv_cmd_buffer *cmd_buffer_to)
619
{
620
assert(cmd_buffer_from->device->physical->use_softpin);
621
622
uint32_t *bb_start = cmd_buffer_from->batch_end;
623
624
struct anv_batch_bo *last_bbo =
625
list_last_entry(&cmd_buffer_from->batch_bos, struct anv_batch_bo, link);
626
struct anv_batch_bo *first_bbo =
627
list_first_entry(&cmd_buffer_to->batch_bos, struct anv_batch_bo, link);
628
629
struct GFX8_MI_BATCH_BUFFER_START gen_bb_start = {
630
__anv_cmd_header(GFX8_MI_BATCH_BUFFER_START),
631
.SecondLevelBatchBuffer = Firstlevelbatch,
632
.AddressSpaceIndicator = ASI_PPGTT,
633
.BatchBufferStartAddress = (struct anv_address) { first_bbo->bo, 0 },
634
};
635
struct anv_batch local_batch = {
636
.start = last_bbo->bo->map,
637
.end = last_bbo->bo->map + last_bbo->bo->size,
638
.relocs = &last_bbo->relocs,
639
.alloc = &cmd_buffer_from->pool->alloc,
640
};
641
642
__anv_cmd_pack(GFX8_MI_BATCH_BUFFER_START)(&local_batch, bb_start, &gen_bb_start);
643
644
last_bbo->chained = true;
645
}
646
647
static void
648
anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer *cmd_buffer)
649
{
650
assert(cmd_buffer->device->physical->use_softpin);
651
652
struct anv_batch_bo *last_bbo =
653
list_last_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
654
last_bbo->chained = false;
655
656
uint32_t *batch = cmd_buffer->batch_end;
657
anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
658
__anv_cmd_header(GFX8_MI_BATCH_BUFFER_END));
659
}
660
661
static VkResult
662
anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
663
{
664
struct anv_cmd_buffer *cmd_buffer = _data;
665
struct anv_batch_bo *new_bbo;
666
/* Cap reallocation to chunk. */
667
uint32_t alloc_size = MIN2(cmd_buffer->total_batch_size,
668
ANV_MAX_CMD_BUFFER_BATCH_SIZE);
669
670
VkResult result = anv_batch_bo_create(cmd_buffer, alloc_size, &new_bbo);
671
if (result != VK_SUCCESS)
672
return result;
673
674
cmd_buffer->total_batch_size += alloc_size;
675
676
struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
677
if (seen_bbo == NULL) {
678
anv_batch_bo_destroy(new_bbo, cmd_buffer);
679
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
680
}
681
*seen_bbo = new_bbo;
682
683
cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
684
685
list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
686
687
anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
688
689
return VK_SUCCESS;
690
}
691
692
static VkResult
693
anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
694
{
695
struct anv_cmd_buffer *cmd_buffer = _data;
696
struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
697
698
anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
699
GFX8_MI_BATCH_BUFFER_START_length * 4);
700
701
return VK_SUCCESS;
702
}
703
704
/** Allocate a binding table
705
*
706
* This function allocates a binding table. This is a bit more complicated
707
* than one would think due to a combination of Vulkan driver design and some
708
* unfortunate hardware restrictions.
709
*
710
* The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
711
* the binding table pointer which means that all binding tables need to live
712
* in the bottom 64k of surface state base address. The way the GL driver has
713
* classically dealt with this restriction is to emit all surface states
714
* on-the-fly into the batch and have a batch buffer smaller than 64k. This
715
* isn't really an option in Vulkan for a couple of reasons:
716
*
717
* 1) In Vulkan, we have growing (or chaining) batches so surface states have
718
* to live in their own buffer and we have to be able to re-emit
719
* STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
720
* order to avoid emitting STATE_BASE_ADDRESS any more often than needed
721
* (it's not that hard to hit 64k of just binding tables), we allocate
722
* surface state objects up-front when VkImageView is created. In order
723
* for this to work, surface state objects need to be allocated from a
724
* global buffer.
725
*
726
* 2) We tried to design the surface state system in such a way that it's
727
* already ready for bindless texturing. The way bindless texturing works
728
* on our hardware is that you have a big pool of surface state objects
729
* (with its own state base address) and the bindless handles are simply
730
* offsets into that pool. With the architecture we chose, we already
731
* have that pool and it's exactly the same pool that we use for regular
732
* surface states so we should already be ready for bindless.
733
*
734
* 3) For render targets, we need to be able to fill out the surface states
735
* later in vkBeginRenderPass so that we can assign clear colors
736
* correctly. One way to do this would be to just create the surface
737
* state data and then repeatedly copy it into the surface state BO every
738
* time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
739
* rather annoying and just being able to allocate them up-front and
740
* re-use them for the entire render pass.
741
*
742
* While none of these are technically blockers for emitting state on the fly
743
* like we do in GL, the ability to have a single surface state pool is
744
* simplifies things greatly. Unfortunately, it comes at a cost...
745
*
746
* Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
747
* place the binding tables just anywhere in surface state base address.
748
* Because 64k isn't a whole lot of space, we can't simply restrict the
749
* surface state buffer to 64k, we have to be more clever. The solution we've
750
* chosen is to have a block pool with a maximum size of 2G that starts at
751
* zero and grows in both directions. All surface states are allocated from
752
* the top of the pool (positive offsets) and we allocate blocks (< 64k) of
753
* binding tables from the bottom of the pool (negative offsets). Every time
754
* we allocate a new binding table block, we set surface state base address to
755
* point to the bottom of the binding table block. This way all of the
756
* binding tables in the block are in the bottom 64k of surface state base
757
* address. When we fill out the binding table, we add the distance between
758
* the bottom of our binding table block and zero of the block pool to the
759
* surface state offsets so that they are correct relative to out new surface
760
* state base address at the bottom of the binding table block.
761
*
762
* \see adjust_relocations_from_block_pool()
763
* \see adjust_relocations_too_block_pool()
764
*
765
* \param[in] entries The number of surface state entries the binding
766
* table should be able to hold.
767
*
768
* \param[out] state_offset The offset surface surface state base address
769
* where the surface states live. This must be
770
* added to the surface state offset when it is
771
* written into the binding table entry.
772
*
773
* \return An anv_state representing the binding table
774
*/
775
struct anv_state
776
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
777
uint32_t entries, uint32_t *state_offset)
778
{
779
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
780
781
uint32_t bt_size = align_u32(entries * 4, 32);
782
783
struct anv_state state = cmd_buffer->bt_next;
784
if (bt_size > state.alloc_size)
785
return (struct anv_state) { 0 };
786
787
state.alloc_size = bt_size;
788
cmd_buffer->bt_next.offset += bt_size;
789
cmd_buffer->bt_next.map += bt_size;
790
cmd_buffer->bt_next.alloc_size -= bt_size;
791
792
assert(bt_block->offset < 0);
793
*state_offset = -bt_block->offset;
794
795
return state;
796
}
797
798
struct anv_state
799
anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
800
{
801
struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
802
return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
803
isl_dev->ss.size, isl_dev->ss.align);
804
}
805
806
struct anv_state
807
anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
808
uint32_t size, uint32_t alignment)
809
{
810
return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
811
size, alignment);
812
}
813
814
VkResult
815
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
816
{
817
struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
818
if (bt_block == NULL) {
819
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
820
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
821
}
822
823
*bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
824
825
/* The bt_next state is a rolling state (we update it as we suballocate
826
* from it) which is relative to the start of the binding table block.
827
*/
828
cmd_buffer->bt_next = *bt_block;
829
cmd_buffer->bt_next.offset = 0;
830
831
return VK_SUCCESS;
832
}
833
834
VkResult
835
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
836
{
837
struct anv_batch_bo *batch_bo;
838
VkResult result;
839
840
list_inithead(&cmd_buffer->batch_bos);
841
842
cmd_buffer->total_batch_size = ANV_MIN_CMD_BUFFER_BATCH_SIZE;
843
844
result = anv_batch_bo_create(cmd_buffer,
845
cmd_buffer->total_batch_size,
846
&batch_bo);
847
if (result != VK_SUCCESS)
848
return result;
849
850
list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
851
852
cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
853
cmd_buffer->batch.user_data = cmd_buffer;
854
855
if (cmd_buffer->device->can_chain_batches) {
856
cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
857
} else {
858
cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
859
}
860
861
anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
862
GFX8_MI_BATCH_BUFFER_START_length * 4);
863
864
int success = u_vector_init(&cmd_buffer->seen_bbos,
865
sizeof(struct anv_bo *),
866
8 * sizeof(struct anv_bo *));
867
if (!success)
868
goto fail_batch_bo;
869
870
*(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
871
872
/* u_vector requires power-of-two size elements */
873
unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
874
success = u_vector_init(&cmd_buffer->bt_block_states,
875
pow2_state_size, 8 * pow2_state_size);
876
if (!success)
877
goto fail_seen_bbos;
878
879
result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
880
&cmd_buffer->pool->alloc);
881
if (result != VK_SUCCESS)
882
goto fail_bt_blocks;
883
cmd_buffer->last_ss_pool_center = 0;
884
885
result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
886
if (result != VK_SUCCESS)
887
goto fail_bt_blocks;
888
889
return VK_SUCCESS;
890
891
fail_bt_blocks:
892
u_vector_finish(&cmd_buffer->bt_block_states);
893
fail_seen_bbos:
894
u_vector_finish(&cmd_buffer->seen_bbos);
895
fail_batch_bo:
896
anv_batch_bo_destroy(batch_bo, cmd_buffer);
897
898
return result;
899
}
900
901
void
902
anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
903
{
904
struct anv_state *bt_block;
905
u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
906
anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
907
u_vector_finish(&cmd_buffer->bt_block_states);
908
909
anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
910
911
u_vector_finish(&cmd_buffer->seen_bbos);
912
913
/* Destroy all of the batch buffers */
914
list_for_each_entry_safe(struct anv_batch_bo, bbo,
915
&cmd_buffer->batch_bos, link) {
916
list_del(&bbo->link);
917
anv_batch_bo_destroy(bbo, cmd_buffer);
918
}
919
}
920
921
void
922
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
923
{
924
/* Delete all but the first batch bo */
925
assert(!list_is_empty(&cmd_buffer->batch_bos));
926
while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
927
struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
928
list_del(&bbo->link);
929
anv_batch_bo_destroy(bbo, cmd_buffer);
930
}
931
assert(!list_is_empty(&cmd_buffer->batch_bos));
932
933
anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
934
&cmd_buffer->batch,
935
GFX8_MI_BATCH_BUFFER_START_length * 4);
936
937
while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
938
struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
939
anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
940
}
941
assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
942
cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
943
cmd_buffer->bt_next.offset = 0;
944
945
anv_reloc_list_clear(&cmd_buffer->surface_relocs);
946
cmd_buffer->last_ss_pool_center = 0;
947
948
/* Reset the list of seen buffers */
949
cmd_buffer->seen_bbos.head = 0;
950
cmd_buffer->seen_bbos.tail = 0;
951
952
struct anv_batch_bo *first_bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
953
954
*(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = first_bbo;
955
956
957
assert(!cmd_buffer->device->can_chain_batches ||
958
first_bbo->bo->size == ANV_MIN_CMD_BUFFER_BATCH_SIZE);
959
cmd_buffer->total_batch_size = first_bbo->bo->size;
960
}
961
962
void
963
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
964
{
965
struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
966
967
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
968
/* When we start a batch buffer, we subtract a certain amount of
969
* padding from the end to ensure that we always have room to emit a
970
* BATCH_BUFFER_START to chain to the next BO. We need to remove
971
* that padding before we end the batch; otherwise, we may end up
972
* with our BATCH_BUFFER_END in another BO.
973
*/
974
cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
975
assert(cmd_buffer->batch.start == batch_bo->bo->map);
976
assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
977
978
/* Save end instruction location to override it later. */
979
cmd_buffer->batch_end = cmd_buffer->batch.next;
980
981
/* If we can chain this command buffer to another one, leave some place
982
* for the jump instruction.
983
*/
984
batch_bo->chained = anv_cmd_buffer_is_chainable(cmd_buffer);
985
if (batch_bo->chained)
986
emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
987
else
988
anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
989
990
/* Round batch up to an even number of dwords. */
991
if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
992
anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
993
994
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
995
} else {
996
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
997
/* If this is a secondary command buffer, we need to determine the
998
* mode in which it will be executed with vkExecuteCommands. We
999
* determine this statically here so that this stays in sync with the
1000
* actual ExecuteCommands implementation.
1001
*/
1002
const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
1003
if (!cmd_buffer->device->can_chain_batches) {
1004
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
1005
} else if (cmd_buffer->device->physical->use_call_secondary) {
1006
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
1007
/* If the secondary command buffer begins & ends in the same BO and
1008
* its length is less than the length of CS prefetch, add some NOOPs
1009
* instructions so the last MI_BATCH_BUFFER_START is outside the CS
1010
* prefetch.
1011
*/
1012
if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
1013
const struct intel_device_info *devinfo = &cmd_buffer->device->info;
1014
/* Careful to have everything in signed integer. */
1015
int32_t prefetch_len = devinfo->cs_prefetch_size;
1016
int32_t batch_len =
1017
cmd_buffer->batch.next - cmd_buffer->batch.start;
1018
1019
for (int32_t i = 0; i < (prefetch_len - batch_len); i += 4)
1020
anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
1021
}
1022
1023
void *jump_addr =
1024
anv_batch_emitn(&cmd_buffer->batch,
1025
GFX8_MI_BATCH_BUFFER_START_length,
1026
GFX8_MI_BATCH_BUFFER_START,
1027
.AddressSpaceIndicator = ASI_PPGTT,
1028
.SecondLevelBatchBuffer = Firstlevelbatch) +
1029
(GFX8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
1030
cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
1031
1032
/* The emit above may have caused us to chain batch buffers which
1033
* would mean that batch_bo is no longer valid.
1034
*/
1035
batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
1036
} else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
1037
(length < ANV_MIN_CMD_BUFFER_BATCH_SIZE / 2)) {
1038
/* If the secondary has exactly one batch buffer in its list *and*
1039
* that batch buffer is less than half of the maximum size, we're
1040
* probably better of simply copying it into our batch.
1041
*/
1042
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
1043
} else if (!(cmd_buffer->usage_flags &
1044
VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
1045
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
1046
1047
/* In order to chain, we need this command buffer to contain an
1048
* MI_BATCH_BUFFER_START which will jump back to the calling batch.
1049
* It doesn't matter where it points now so long as has a valid
1050
* relocation. We'll adjust it later as part of the chaining
1051
* process.
1052
*
1053
* We set the end of the batch a little short so we would be sure we
1054
* have room for the chaining command. Since we're about to emit the
1055
* chaining command, let's set it back where it should go.
1056
*/
1057
cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
1058
assert(cmd_buffer->batch.start == batch_bo->bo->map);
1059
assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
1060
1061
emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
1062
assert(cmd_buffer->batch.start == batch_bo->bo->map);
1063
} else {
1064
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
1065
}
1066
}
1067
1068
anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
1069
}
1070
1071
static VkResult
1072
anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
1073
struct list_head *list)
1074
{
1075
list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1076
struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1077
if (bbo_ptr == NULL)
1078
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1079
1080
*bbo_ptr = bbo;
1081
}
1082
1083
return VK_SUCCESS;
1084
}
1085
1086
void
1087
anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1088
struct anv_cmd_buffer *secondary)
1089
{
1090
anv_measure_add_secondary(primary, secondary);
1091
switch (secondary->exec_mode) {
1092
case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1093
anv_batch_emit_batch(&primary->batch, &secondary->batch);
1094
break;
1095
case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1096
struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1097
unsigned length = secondary->batch.end - secondary->batch.start;
1098
anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1099
GFX8_MI_BATCH_BUFFER_START_length * 4);
1100
anv_batch_emit_batch(&primary->batch, &secondary->batch);
1101
break;
1102
}
1103
case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1104
struct anv_batch_bo *first_bbo =
1105
list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1106
struct anv_batch_bo *last_bbo =
1107
list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1108
1109
emit_batch_buffer_start(primary, first_bbo->bo, 0);
1110
1111
struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1112
assert(primary->batch.start == this_bbo->bo->map);
1113
uint32_t offset = primary->batch.next - primary->batch.start;
1114
1115
/* Make the tail of the secondary point back to right after the
1116
* MI_BATCH_BUFFER_START in the primary batch.
1117
*/
1118
anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1119
1120
anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1121
break;
1122
}
1123
case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1124
struct list_head copy_list;
1125
VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1126
secondary,
1127
&copy_list);
1128
if (result != VK_SUCCESS)
1129
return; /* FIXME */
1130
1131
anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1132
1133
struct anv_batch_bo *first_bbo =
1134
list_first_entry(&copy_list, struct anv_batch_bo, link);
1135
struct anv_batch_bo *last_bbo =
1136
list_last_entry(&copy_list, struct anv_batch_bo, link);
1137
1138
cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1139
1140
list_splicetail(&copy_list, &primary->batch_bos);
1141
1142
anv_batch_bo_continue(last_bbo, &primary->batch,
1143
GFX8_MI_BATCH_BUFFER_START_length * 4);
1144
break;
1145
}
1146
case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1147
struct anv_batch_bo *first_bbo =
1148
list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1149
1150
uint64_t *write_return_addr =
1151
anv_batch_emitn(&primary->batch,
1152
GFX8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1153
GFX8_MI_STORE_DATA_IMM,
1154
.Address = secondary->return_addr)
1155
+ (GFX8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1156
1157
emit_batch_buffer_start(primary, first_bbo->bo, 0);
1158
1159
*write_return_addr =
1160
anv_address_physical(anv_batch_address(&primary->batch,
1161
primary->batch.next));
1162
1163
anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1164
break;
1165
}
1166
default:
1167
assert(!"Invalid execution mode");
1168
}
1169
1170
anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1171
&secondary->surface_relocs, 0);
1172
}
1173
1174
struct anv_execbuf {
1175
struct drm_i915_gem_execbuffer2 execbuf;
1176
1177
struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1178
1179
struct drm_i915_gem_exec_object2 * objects;
1180
uint32_t bo_count;
1181
struct anv_bo ** bos;
1182
1183
/* Allocated length of the 'objects' and 'bos' arrays */
1184
uint32_t array_length;
1185
1186
/* List of relocations for surface states, only used with platforms not
1187
* using softpin.
1188
*/
1189
void * surface_states_relocs;
1190
1191
/* Indicates whether any of the command buffers have relocations. This
1192
* doesn't not necessarily mean we'll need the kernel to process them. It
1193
* might be that a previous execbuf has already placed things in the VMA
1194
* and we can make i915 skip the relocations.
1195
*/
1196
bool has_relocs;
1197
1198
const VkAllocationCallbacks * alloc;
1199
VkSystemAllocationScope alloc_scope;
1200
1201
int perf_query_pass;
1202
};
1203
1204
static void
1205
anv_execbuf_init(struct anv_execbuf *exec)
1206
{
1207
memset(exec, 0, sizeof(*exec));
1208
}
1209
1210
static void
1211
anv_execbuf_finish(struct anv_execbuf *exec)
1212
{
1213
vk_free(exec->alloc, exec->surface_states_relocs);
1214
vk_free(exec->alloc, exec->objects);
1215
vk_free(exec->alloc, exec->bos);
1216
}
1217
1218
static void
1219
anv_execbuf_add_ext(struct anv_execbuf *exec,
1220
uint32_t ext_name,
1221
struct i915_user_extension *ext)
1222
{
1223
__u64 *iter = &exec->execbuf.cliprects_ptr;
1224
1225
exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1226
1227
while (*iter != 0) {
1228
iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1229
}
1230
1231
ext->name = ext_name;
1232
1233
*iter = (uintptr_t) ext;
1234
}
1235
1236
static VkResult
1237
anv_execbuf_add_bo_bitset(struct anv_device *device,
1238
struct anv_execbuf *exec,
1239
uint32_t dep_words,
1240
BITSET_WORD *deps,
1241
uint32_t extra_flags);
1242
1243
static VkResult
1244
anv_execbuf_add_bo(struct anv_device *device,
1245
struct anv_execbuf *exec,
1246
struct anv_bo *bo,
1247
struct anv_reloc_list *relocs,
1248
uint32_t extra_flags)
1249
{
1250
struct drm_i915_gem_exec_object2 *obj = NULL;
1251
1252
bo = anv_bo_unwrap(bo);
1253
1254
if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1255
obj = &exec->objects[bo->index];
1256
1257
if (obj == NULL) {
1258
/* We've never seen this one before. Add it to the list and assign
1259
* an id that we can use later.
1260
*/
1261
if (exec->bo_count >= exec->array_length) {
1262
uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1263
1264
struct drm_i915_gem_exec_object2 *new_objects =
1265
vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1266
if (new_objects == NULL)
1267
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1268
1269
struct anv_bo **new_bos =
1270
vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1271
if (new_bos == NULL) {
1272
vk_free(exec->alloc, new_objects);
1273
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1274
}
1275
1276
if (exec->objects) {
1277
memcpy(new_objects, exec->objects,
1278
exec->bo_count * sizeof(*new_objects));
1279
memcpy(new_bos, exec->bos,
1280
exec->bo_count * sizeof(*new_bos));
1281
}
1282
1283
vk_free(exec->alloc, exec->objects);
1284
vk_free(exec->alloc, exec->bos);
1285
1286
exec->objects = new_objects;
1287
exec->bos = new_bos;
1288
exec->array_length = new_len;
1289
}
1290
1291
assert(exec->bo_count < exec->array_length);
1292
1293
bo->index = exec->bo_count++;
1294
obj = &exec->objects[bo->index];
1295
exec->bos[bo->index] = bo;
1296
1297
obj->handle = bo->gem_handle;
1298
obj->relocation_count = 0;
1299
obj->relocs_ptr = 0;
1300
obj->alignment = 0;
1301
obj->offset = bo->offset;
1302
obj->flags = bo->flags | extra_flags;
1303
obj->rsvd1 = 0;
1304
obj->rsvd2 = 0;
1305
}
1306
1307
if (extra_flags & EXEC_OBJECT_WRITE) {
1308
obj->flags |= EXEC_OBJECT_WRITE;
1309
obj->flags &= ~EXEC_OBJECT_ASYNC;
1310
}
1311
1312
if (relocs != NULL) {
1313
assert(obj->relocation_count == 0);
1314
1315
if (relocs->num_relocs > 0) {
1316
/* This is the first time we've ever seen a list of relocations for
1317
* this BO. Go ahead and set the relocations and then walk the list
1318
* of relocations and add them all.
1319
*/
1320
exec->has_relocs = true;
1321
obj->relocation_count = relocs->num_relocs;
1322
obj->relocs_ptr = (uintptr_t) relocs->relocs;
1323
1324
for (size_t i = 0; i < relocs->num_relocs; i++) {
1325
VkResult result;
1326
1327
/* A quick sanity check on relocations */
1328
assert(relocs->relocs[i].offset < bo->size);
1329
result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1330
NULL, extra_flags);
1331
if (result != VK_SUCCESS)
1332
return result;
1333
}
1334
}
1335
1336
return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1337
relocs->deps, extra_flags);
1338
}
1339
1340
return VK_SUCCESS;
1341
}
1342
1343
/* Add BO dependencies to execbuf */
1344
static VkResult
1345
anv_execbuf_add_bo_bitset(struct anv_device *device,
1346
struct anv_execbuf *exec,
1347
uint32_t dep_words,
1348
BITSET_WORD *deps,
1349
uint32_t extra_flags)
1350
{
1351
for (uint32_t w = 0; w < dep_words; w++) {
1352
BITSET_WORD mask = deps[w];
1353
while (mask) {
1354
int i = u_bit_scan(&mask);
1355
uint32_t gem_handle = w * BITSET_WORDBITS + i;
1356
struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1357
assert(bo->refcount > 0);
1358
VkResult result =
1359
anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1360
if (result != VK_SUCCESS)
1361
return result;
1362
}
1363
}
1364
1365
return VK_SUCCESS;
1366
}
1367
1368
static void
1369
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1370
struct anv_reloc_list *list)
1371
{
1372
for (size_t i = 0; i < list->num_relocs; i++)
1373
list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1374
}
1375
1376
static void
1377
adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1378
struct anv_reloc_list *relocs,
1379
uint32_t last_pool_center_bo_offset)
1380
{
1381
assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1382
uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1383
1384
for (size_t i = 0; i < relocs->num_relocs; i++) {
1385
/* All of the relocations from this block pool to other BO's should
1386
* have been emitted relative to the surface block pool center. We
1387
* need to add the center offset to make them relative to the
1388
* beginning of the actual GEM bo.
1389
*/
1390
relocs->relocs[i].offset += delta;
1391
}
1392
}
1393
1394
static void
1395
adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1396
struct anv_bo *from_bo,
1397
struct anv_reloc_list *relocs,
1398
uint32_t last_pool_center_bo_offset)
1399
{
1400
assert(!from_bo->is_wrapper);
1401
assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1402
uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1403
1404
/* When we initially emit relocations into a block pool, we don't
1405
* actually know what the final center_bo_offset will be so we just emit
1406
* it as if center_bo_offset == 0. Now that we know what the center
1407
* offset is, we need to walk the list of relocations and adjust any
1408
* relocations that point to the pool bo with the correct offset.
1409
*/
1410
for (size_t i = 0; i < relocs->num_relocs; i++) {
1411
if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1412
/* Adjust the delta value in the relocation to correctly
1413
* correspond to the new delta. Initially, this value may have
1414
* been negative (if treated as unsigned), but we trust in
1415
* uint32_t roll-over to fix that for us at this point.
1416
*/
1417
relocs->relocs[i].delta += delta;
1418
1419
/* Since the delta has changed, we need to update the actual
1420
* relocated value with the new presumed value. This function
1421
* should only be called on batch buffers, so we know it isn't in
1422
* use by the GPU at the moment.
1423
*/
1424
assert(relocs->relocs[i].offset < from_bo->size);
1425
write_reloc(pool->block_pool.device,
1426
from_bo->map + relocs->relocs[i].offset,
1427
relocs->relocs[i].presumed_offset +
1428
relocs->relocs[i].delta, false);
1429
}
1430
}
1431
}
1432
1433
static void
1434
anv_reloc_list_apply(struct anv_device *device,
1435
struct anv_reloc_list *list,
1436
struct anv_bo *bo,
1437
bool always_relocate)
1438
{
1439
bo = anv_bo_unwrap(bo);
1440
1441
for (size_t i = 0; i < list->num_relocs; i++) {
1442
struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1443
if (list->relocs[i].presumed_offset == target_bo->offset &&
1444
!always_relocate)
1445
continue;
1446
1447
void *p = bo->map + list->relocs[i].offset;
1448
write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1449
list->relocs[i].presumed_offset = target_bo->offset;
1450
}
1451
}
1452
1453
/**
1454
* This function applies the relocation for a command buffer and writes the
1455
* actual addresses into the buffers as per what we were told by the kernel on
1456
* the previous execbuf2 call. This should be safe to do because, for each
1457
* relocated address, we have two cases:
1458
*
1459
* 1) The target BO is inactive (as seen by the kernel). In this case, it is
1460
* not in use by the GPU so updating the address is 100% ok. It won't be
1461
* in-use by the GPU (from our context) again until the next execbuf2
1462
* happens. If the kernel decides to move it in the next execbuf2, it
1463
* will have to do the relocations itself, but that's ok because it should
1464
* have all of the information needed to do so.
1465
*
1466
* 2) The target BO is active (as seen by the kernel). In this case, it
1467
* hasn't moved since the last execbuffer2 call because GTT shuffling
1468
* *only* happens when the BO is idle. (From our perspective, it only
1469
* happens inside the execbuffer2 ioctl, but the shuffling may be
1470
* triggered by another ioctl, with full-ppgtt this is limited to only
1471
* execbuffer2 ioctls on the same context, or memory pressure.) Since the
1472
* target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1473
* address and the relocated value we are writing into the BO will be the
1474
* same as the value that is already there.
1475
*
1476
* There is also a possibility that the target BO is active but the exact
1477
* RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1478
* use. In this case, the address currently in the RENDER_SURFACE_STATE
1479
* may be stale but it's still safe to write the relocation because that
1480
* particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1481
* won't be until the next execbuf2 call.
1482
*
1483
* By doing relocations on the CPU, we can tell the kernel that it doesn't
1484
* need to bother. We want to do this because the surface state buffer is
1485
* used by every command buffer so, if the kernel does the relocations, it
1486
* will always be busy and the kernel will always stall. This is also
1487
* probably the fastest mechanism for doing relocations since the kernel would
1488
* have to make a full copy of all the relocations lists.
1489
*/
1490
static bool
1491
execbuf_can_skip_relocations(struct anv_execbuf *exec)
1492
{
1493
if (!exec->has_relocs)
1494
return true;
1495
1496
static int userspace_relocs = -1;
1497
if (userspace_relocs < 0)
1498
userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1499
if (!userspace_relocs)
1500
return false;
1501
1502
/* First, we have to check to see whether or not we can even do the
1503
* relocation. New buffers which have never been submitted to the kernel
1504
* don't have a valid offset so we need to let the kernel do relocations so
1505
* that we can get offsets for them. On future execbuf2 calls, those
1506
* buffers will have offsets and we will be able to skip relocating.
1507
* Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1508
*/
1509
for (uint32_t i = 0; i < exec->bo_count; i++) {
1510
assert(!exec->bos[i]->is_wrapper);
1511
if (exec->bos[i]->offset == (uint64_t)-1)
1512
return false;
1513
}
1514
1515
return true;
1516
}
1517
1518
static void
1519
relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1520
struct anv_execbuf *exec)
1521
{
1522
/* Since surface states are shared between command buffers and we don't
1523
* know what order they will be submitted to the kernel, we don't know
1524
* what address is actually written in the surface state object at any
1525
* given time. The only option is to always relocate them.
1526
*/
1527
struct anv_bo *surface_state_bo =
1528
anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1529
anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1530
surface_state_bo,
1531
true /* always relocate surface states */);
1532
1533
/* Since we own all of the batch buffers, we know what values are stored
1534
* in the relocated addresses and only have to update them if the offsets
1535
* have changed.
1536
*/
1537
struct anv_batch_bo **bbo;
1538
u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1539
anv_reloc_list_apply(cmd_buffer->device,
1540
&(*bbo)->relocs, (*bbo)->bo, false);
1541
}
1542
1543
for (uint32_t i = 0; i < exec->bo_count; i++)
1544
exec->objects[i].offset = exec->bos[i]->offset;
1545
}
1546
1547
static void
1548
reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer *cmd_buffer)
1549
{
1550
/* In the case where we fall back to doing kernel relocations, we need to
1551
* ensure that the relocation list is valid. All relocations on the batch
1552
* buffers are already valid and kept up-to-date. Since surface states are
1553
* shared between command buffers and we don't know what order they will be
1554
* submitted to the kernel, we don't know what address is actually written
1555
* in the surface state object at any given time. The only option is to set
1556
* a bogus presumed offset and let the kernel relocate them.
1557
*/
1558
for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1559
cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1560
}
1561
1562
static VkResult
1563
setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1564
struct anv_cmd_buffer *cmd_buffer)
1565
{
1566
struct anv_state_pool *ss_pool =
1567
&cmd_buffer->device->surface_state_pool;
1568
1569
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1570
cmd_buffer->last_ss_pool_center);
1571
VkResult result;
1572
if (cmd_buffer->device->physical->use_softpin) {
1573
/* Add surface dependencies (BOs) to the execbuf */
1574
anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1575
cmd_buffer->surface_relocs.dep_words,
1576
cmd_buffer->surface_relocs.deps, 0);
1577
} else {
1578
/* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1579
* will get added automatically by processing relocations on the batch
1580
* buffer. We have to add the surface state BO manually because it has
1581
* relocations of its own that we need to be sure are processsed.
1582
*/
1583
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1584
ss_pool->block_pool.bo,
1585
&cmd_buffer->surface_relocs, 0);
1586
if (result != VK_SUCCESS)
1587
return result;
1588
}
1589
1590
/* First, we walk over all of the bos we've seen and add them and their
1591
* relocations to the validate list.
1592
*/
1593
struct anv_batch_bo **bbo;
1594
u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1595
adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1596
cmd_buffer->last_ss_pool_center);
1597
1598
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1599
(*bbo)->bo, &(*bbo)->relocs, 0);
1600
if (result != VK_SUCCESS)
1601
return result;
1602
}
1603
1604
/* Now that we've adjusted all of the surface state relocations, we need to
1605
* record the surface state pool center so future executions of the command
1606
* buffer can adjust correctly.
1607
*/
1608
cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1609
1610
return VK_SUCCESS;
1611
}
1612
1613
static void
1614
chain_command_buffers(struct anv_cmd_buffer **cmd_buffers,
1615
uint32_t num_cmd_buffers)
1616
{
1617
if (!anv_cmd_buffer_is_chainable(cmd_buffers[0])) {
1618
assert(num_cmd_buffers == 1);
1619
return;
1620
}
1621
1622
/* Chain the N-1 first batch buffers */
1623
for (uint32_t i = 0; i < (num_cmd_buffers - 1); i++)
1624
anv_cmd_buffer_record_chain_submit(cmd_buffers[i], cmd_buffers[i + 1]);
1625
1626
/* Put an end to the last one */
1627
anv_cmd_buffer_record_end_submit(cmd_buffers[num_cmd_buffers - 1]);
1628
}
1629
1630
static VkResult
1631
setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
1632
struct anv_queue *queue,
1633
struct anv_cmd_buffer **cmd_buffers,
1634
uint32_t num_cmd_buffers)
1635
{
1636
struct anv_device *device = queue->device;
1637
struct anv_state_pool *ss_pool = &device->surface_state_pool;
1638
VkResult result;
1639
1640
/* Edit the tail of the command buffers to chain them all together if they
1641
* can be.
1642
*/
1643
chain_command_buffers(cmd_buffers, num_cmd_buffers);
1644
1645
for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1646
result = setup_execbuf_for_cmd_buffer(execbuf, cmd_buffers[i]);
1647
if (result != VK_SUCCESS)
1648
return result;
1649
}
1650
1651
/* Add all the global BOs to the object list for softpin case. */
1652
if (device->physical->use_softpin) {
1653
anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1654
result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1655
if (result != VK_SUCCESS)
1656
return result;
1657
}
1658
1659
struct anv_block_pool *pool;
1660
pool = &device->dynamic_state_pool.block_pool;
1661
anv_block_pool_foreach_bo(bo, pool) {
1662
result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1663
if (result != VK_SUCCESS)
1664
return result;
1665
}
1666
1667
pool = &device->general_state_pool.block_pool;
1668
anv_block_pool_foreach_bo(bo, pool) {
1669
result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1670
if (result != VK_SUCCESS)
1671
return result;
1672
}
1673
1674
pool = &device->instruction_state_pool.block_pool;
1675
anv_block_pool_foreach_bo(bo, pool) {
1676
result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1677
if (result != VK_SUCCESS)
1678
return result;
1679
}
1680
1681
pool = &device->binding_table_pool.block_pool;
1682
anv_block_pool_foreach_bo(bo, pool) {
1683
result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1684
if (result != VK_SUCCESS)
1685
return result;
1686
}
1687
1688
/* Add the BOs for all user allocated memory objects because we can't
1689
* track after binding updates of VK_EXT_descriptor_indexing.
1690
*/
1691
list_for_each_entry(struct anv_device_memory, mem,
1692
&device->memory_objects, link) {
1693
result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1694
if (result != VK_SUCCESS)
1695
return result;
1696
}
1697
} else {
1698
/* We do not support chaining primary command buffers without
1699
* softpin.
1700
*/
1701
assert(num_cmd_buffers == 1);
1702
}
1703
1704
bool no_reloc = true;
1705
if (execbuf->has_relocs) {
1706
no_reloc = execbuf_can_skip_relocations(execbuf);
1707
if (no_reloc) {
1708
/* If we were able to successfully relocate everything, tell the
1709
* kernel that it can skip doing relocations. The requirement for
1710
* using NO_RELOC is:
1711
*
1712
* 1) The addresses written in the objects must match the
1713
* corresponding reloc.presumed_offset which in turn must match
1714
* the corresponding execobject.offset.
1715
*
1716
* 2) To avoid stalling, execobject.offset should match the current
1717
* address of that object within the active context.
1718
*
1719
* In order to satisfy all of the invariants that make userspace
1720
* relocations to be safe (see relocate_cmd_buffer()), we need to
1721
* further ensure that the addresses we use match those used by the
1722
* kernel for the most recent execbuf2.
1723
*
1724
* The kernel may still choose to do relocations anyway if something
1725
* has moved in the GTT. In this case, the relocation list still
1726
* needs to be valid. All relocations on the batch buffers are
1727
* already valid and kept up-to-date. For surface state relocations,
1728
* by applying the relocations in relocate_cmd_buffer, we ensured
1729
* that the address in the RENDER_SURFACE_STATE matches
1730
* presumed_offset, so it should be safe for the kernel to relocate
1731
* them as needed.
1732
*/
1733
for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1734
relocate_cmd_buffer(cmd_buffers[i], execbuf);
1735
1736
anv_reloc_list_apply(device, &cmd_buffers[i]->surface_relocs,
1737
device->surface_state_pool.block_pool.bo,
1738
true /* always relocate surface states */);
1739
}
1740
} else {
1741
/* In the case where we fall back to doing kernel relocations, we
1742
* need to ensure that the relocation list is valid. All relocations
1743
* on the batch buffers are already valid and kept up-to-date. Since
1744
* surface states are shared between command buffers and we don't
1745
* know what order they will be submitted to the kernel, we don't
1746
* know what address is actually written in the surface state object
1747
* at any given time. The only option is to set a bogus presumed
1748
* offset and let the kernel relocate them.
1749
*/
1750
for (uint32_t i = 0; i < num_cmd_buffers; i++)
1751
reset_cmd_buffer_surface_offsets(cmd_buffers[i]);
1752
}
1753
}
1754
1755
struct anv_batch_bo *first_batch_bo =
1756
list_first_entry(&cmd_buffers[0]->batch_bos, struct anv_batch_bo, link);
1757
1758
/* The kernel requires that the last entry in the validation list be the
1759
* batch buffer to execute. We can simply swap the element
1760
* corresponding to the first batch_bo in the chain with the last
1761
* element in the list.
1762
*/
1763
if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1764
uint32_t idx = first_batch_bo->bo->index;
1765
uint32_t last_idx = execbuf->bo_count - 1;
1766
1767
struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1768
assert(execbuf->bos[idx] == first_batch_bo->bo);
1769
1770
execbuf->objects[idx] = execbuf->objects[last_idx];
1771
execbuf->bos[idx] = execbuf->bos[last_idx];
1772
execbuf->bos[idx]->index = idx;
1773
1774
execbuf->objects[last_idx] = tmp_obj;
1775
execbuf->bos[last_idx] = first_batch_bo->bo;
1776
first_batch_bo->bo->index = last_idx;
1777
}
1778
1779
/* If we are pinning our BOs, we shouldn't have to relocate anything */
1780
if (device->physical->use_softpin)
1781
assert(!execbuf->has_relocs);
1782
1783
/* Now we go through and fixup all of the relocation lists to point to the
1784
* correct indices in the object array (I915_EXEC_HANDLE_LUT). We have to
1785
* do this after we reorder the list above as some of the indices may have
1786
* changed.
1787
*/
1788
struct anv_batch_bo **bbo;
1789
if (execbuf->has_relocs) {
1790
assert(num_cmd_buffers == 1);
1791
u_vector_foreach(bbo, &cmd_buffers[0]->seen_bbos)
1792
anv_cmd_buffer_process_relocs(cmd_buffers[0], &(*bbo)->relocs);
1793
1794
anv_cmd_buffer_process_relocs(cmd_buffers[0], &cmd_buffers[0]->surface_relocs);
1795
}
1796
1797
if (!device->info.has_llc) {
1798
__builtin_ia32_mfence();
1799
for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1800
u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
1801
for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1802
__builtin_ia32_clflush((*bbo)->bo->map + i);
1803
}
1804
}
1805
}
1806
1807
struct anv_batch *batch = &cmd_buffers[0]->batch;
1808
execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1809
.buffers_ptr = (uintptr_t) execbuf->objects,
1810
.buffer_count = execbuf->bo_count,
1811
.batch_start_offset = 0,
1812
/* On platforms that cannot chain batch buffers because of the i915
1813
* command parser, we have to provide the batch length. Everywhere else
1814
* we'll chain batches so no point in passing a length.
1815
*/
1816
.batch_len = device->can_chain_batches ? 0 : batch->next - batch->start,
1817
.cliprects_ptr = 0,
1818
.num_cliprects = 0,
1819
.DR1 = 0,
1820
.DR4 = 0,
1821
.flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | (no_reloc ? I915_EXEC_NO_RELOC : 0),
1822
.rsvd1 = device->context_id,
1823
.rsvd2 = 0,
1824
};
1825
1826
return VK_SUCCESS;
1827
}
1828
1829
static VkResult
1830
setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue)
1831
{
1832
struct anv_device *device = queue->device;
1833
VkResult result = anv_execbuf_add_bo(device, execbuf,
1834
device->trivial_batch_bo,
1835
NULL, 0);
1836
if (result != VK_SUCCESS)
1837
return result;
1838
1839
execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1840
.buffers_ptr = (uintptr_t) execbuf->objects,
1841
.buffer_count = execbuf->bo_count,
1842
.batch_start_offset = 0,
1843
.batch_len = 8, /* GFX7_MI_BATCH_BUFFER_END and NOOP */
1844
.flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1845
.rsvd1 = device->context_id,
1846
.rsvd2 = 0,
1847
};
1848
1849
return VK_SUCCESS;
1850
}
1851
1852
/* We lock around execbuf for three main reasons:
1853
*
1854
* 1) When a block pool is resized, we create a new gem handle with a
1855
* different size and, in the case of surface states, possibly a different
1856
* center offset but we re-use the same anv_bo struct when we do so. If
1857
* this happens in the middle of setting up an execbuf, we could end up
1858
* with our list of BOs out of sync with our list of gem handles.
1859
*
1860
* 2) The algorithm we use for building the list of unique buffers isn't
1861
* thread-safe. While the client is supposed to syncronize around
1862
* QueueSubmit, this would be extremely difficult to debug if it ever came
1863
* up in the wild due to a broken app. It's better to play it safe and
1864
* just lock around QueueSubmit.
1865
*
1866
* 3) The anv_cmd_buffer_execbuf function may perform relocations in
1867
* userspace. Due to the fact that the surface state buffer is shared
1868
* between batches, we can't afford to have that happen from multiple
1869
* threads at the same time. Even though the user is supposed to ensure
1870
* this doesn't happen, we play it safe as in (2) above.
1871
*
1872
* Since the only other things that ever take the device lock such as block
1873
* pool resize only rarely happen, this will almost never be contended so
1874
* taking a lock isn't really an expensive operation in this case.
1875
*/
1876
VkResult
1877
anv_queue_execbuf_locked(struct anv_queue *queue,
1878
struct anv_queue_submit *submit)
1879
{
1880
struct anv_device *device = queue->device;
1881
struct anv_execbuf execbuf;
1882
anv_execbuf_init(&execbuf);
1883
execbuf.alloc = submit->alloc;
1884
execbuf.alloc_scope = submit->alloc_scope;
1885
execbuf.perf_query_pass = submit->perf_query_pass;
1886
1887
/* Always add the workaround BO as it includes a driver identifier for the
1888
* error_state.
1889
*/
1890
VkResult result =
1891
anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
1892
if (result != VK_SUCCESS)
1893
goto error;
1894
1895
for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
1896
int signaled;
1897
struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
1898
1899
result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
1900
signaled ? EXEC_OBJECT_WRITE : 0);
1901
if (result != VK_SUCCESS)
1902
goto error;
1903
}
1904
1905
if (submit->cmd_buffer_count) {
1906
result = setup_execbuf_for_cmd_buffers(&execbuf, queue,
1907
submit->cmd_buffers,
1908
submit->cmd_buffer_count);
1909
} else if (submit->simple_bo) {
1910
result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
1911
if (result != VK_SUCCESS)
1912
goto error;
1913
1914
execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1915
.buffers_ptr = (uintptr_t) execbuf.objects,
1916
.buffer_count = execbuf.bo_count,
1917
.batch_start_offset = 0,
1918
.batch_len = submit->simple_bo_size,
1919
.flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1920
.rsvd1 = device->context_id,
1921
.rsvd2 = 0,
1922
};
1923
} else {
1924
result = setup_empty_execbuf(&execbuf, queue);
1925
}
1926
1927
if (result != VK_SUCCESS)
1928
goto error;
1929
1930
const bool has_perf_query =
1931
submit->perf_query_pass >= 0 &&
1932
submit->cmd_buffer_count &&
1933
submit->perf_query_pool;
1934
1935
if (INTEL_DEBUG & DEBUG_SUBMIT) {
1936
fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
1937
execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
1938
for (uint32_t i = 0; i < execbuf.bo_count; i++) {
1939
const struct anv_bo *bo = execbuf.bos[i];
1940
1941
fprintf(stderr, " BO: addr=0x%016"PRIx64" size=%010"PRIx64" handle=%05u name=%s\n",
1942
bo->offset, bo->size, bo->gem_handle, bo->name);
1943
}
1944
}
1945
1946
if (INTEL_DEBUG & DEBUG_BATCH) {
1947
fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
1948
if (submit->cmd_buffer_count) {
1949
if (has_perf_query) {
1950
struct anv_query_pool *query_pool = submit->perf_query_pool;
1951
struct anv_bo *pass_batch_bo = query_pool->bo;
1952
uint64_t pass_batch_offset =
1953
khr_perf_query_preamble_offset(query_pool,
1954
submit->perf_query_pass);
1955
1956
intel_print_batch(&device->decoder_ctx,
1957
pass_batch_bo->map + pass_batch_offset, 64,
1958
pass_batch_bo->offset + pass_batch_offset, false);
1959
}
1960
1961
for (uint32_t i = 0; i < submit->cmd_buffer_count; i++) {
1962
struct anv_batch_bo **bo =
1963
u_vector_tail(&submit->cmd_buffers[i]->seen_bbos);
1964
device->cmd_buffer_being_decoded = submit->cmd_buffers[i];
1965
intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1966
(*bo)->bo->size, (*bo)->bo->offset, false);
1967
device->cmd_buffer_being_decoded = NULL;
1968
}
1969
} else if (submit->simple_bo) {
1970
intel_print_batch(&device->decoder_ctx, submit->simple_bo->map,
1971
submit->simple_bo->size, submit->simple_bo->offset, false);
1972
} else {
1973
intel_print_batch(&device->decoder_ctx,
1974
device->trivial_batch_bo->map,
1975
device->trivial_batch_bo->size,
1976
device->trivial_batch_bo->offset, false);
1977
}
1978
}
1979
1980
if (submit->fence_count > 0) {
1981
assert(device->physical->has_syncobj);
1982
if (device->has_thread_submit) {
1983
execbuf.timeline_fences.fence_count = submit->fence_count;
1984
execbuf.timeline_fences.handles_ptr = (uintptr_t)submit->fences;
1985
execbuf.timeline_fences.values_ptr = (uintptr_t)submit->fence_values;
1986
anv_execbuf_add_ext(&execbuf,
1987
DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
1988
&execbuf.timeline_fences.base);
1989
} else {
1990
execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1991
execbuf.execbuf.num_cliprects = submit->fence_count;
1992
execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
1993
}
1994
}
1995
1996
if (submit->in_fence != -1) {
1997
assert(!device->has_thread_submit);
1998
execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1999
execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
2000
}
2001
2002
if (submit->need_out_fence) {
2003
assert(!device->has_thread_submit);
2004
execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
2005
}
2006
2007
if (has_perf_query) {
2008
struct anv_query_pool *query_pool = submit->perf_query_pool;
2009
assert(submit->perf_query_pass < query_pool->n_passes);
2010
struct intel_perf_query_info *query_info =
2011
query_pool->pass_query[submit->perf_query_pass];
2012
2013
/* Some performance queries just the pipeline statistic HW, no need for
2014
* OA in that case, so no need to reconfigure.
2015
*/
2016
if ((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0 &&
2017
(query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
2018
query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
2019
int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
2020
(void *)(uintptr_t) query_info->oa_metrics_set_id);
2021
if (ret < 0) {
2022
result = anv_device_set_lost(device,
2023
"i915-perf config failed: %s",
2024
strerror(errno));
2025
}
2026
}
2027
2028
struct anv_bo *pass_batch_bo = query_pool->bo;
2029
2030
struct drm_i915_gem_exec_object2 query_pass_object = {
2031
.handle = pass_batch_bo->gem_handle,
2032
.offset = pass_batch_bo->offset,
2033
.flags = pass_batch_bo->flags,
2034
};
2035
struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
2036
.buffers_ptr = (uintptr_t) &query_pass_object,
2037
.buffer_count = 1,
2038
.batch_start_offset = khr_perf_query_preamble_offset(query_pool,
2039
submit->perf_query_pass),
2040
.flags = I915_EXEC_HANDLE_LUT | queue->exec_flags,
2041
.rsvd1 = device->context_id,
2042
};
2043
2044
int ret = queue->device->no_hw ? 0 :
2045
anv_gem_execbuffer(queue->device, &query_pass_execbuf);
2046
if (ret)
2047
result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
2048
}
2049
2050
int ret = queue->device->no_hw ? 0 :
2051
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2052
if (ret)
2053
result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
2054
2055
struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
2056
for (uint32_t k = 0; k < execbuf.bo_count; k++) {
2057
if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
2058
assert(execbuf.bos[k]->offset == objects[k].offset);
2059
execbuf.bos[k]->offset = objects[k].offset;
2060
}
2061
2062
if (result == VK_SUCCESS && submit->need_out_fence)
2063
submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
2064
2065
error:
2066
pthread_cond_broadcast(&device->queue_submit);
2067
2068
anv_execbuf_finish(&execbuf);
2069
2070
return result;
2071
}
2072
2073