Path: blob/21.2-virgl/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
4561 views
/*1* Copyright © 2008 Jérôme Glisse2* Copyright © 2010 Marek Olšák <[email protected]>3* Copyright © 2015 Advanced Micro Devices, Inc.4* All Rights Reserved.5*6* Permission is hereby granted, free of charge, to any person obtaining7* a copy of this software and associated documentation files (the8* "Software"), to deal in the Software without restriction, including9* without limitation the rights to use, copy, modify, merge, publish,10* distribute, sub license, and/or sell copies of the Software, and to11* permit persons to whom the Software is furnished to do so, subject to12* the following conditions:13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,15* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES16* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND17* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS18* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER19* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,20* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE21* USE OR OTHER DEALINGS IN THE SOFTWARE.22*23* The above copyright notice and this permission notice (including the24* next paragraph) shall be included in all copies or substantial portions25* of the Software.26*/2728#include "amdgpu_cs.h"29#include "util/os_time.h"30#include <inttypes.h>31#include <stdio.h>3233#include "amd/common/sid.h"3435/* FENCES */3637static struct pipe_fence_handle *38amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,39unsigned ip_instance, unsigned ring)40{41struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);4243fence->reference.count = 1;44fence->ws = ctx->ws;45fence->ctx = ctx;46fence->fence.context = ctx->ctx;47fence->fence.ip_type = ip_type;48fence->fence.ip_instance = ip_instance;49fence->fence.ring = ring;50util_queue_fence_init(&fence->submitted);51util_queue_fence_reset(&fence->submitted);52p_atomic_inc(&ctx->refcount);53return (struct pipe_fence_handle *)fence;54}5556static struct pipe_fence_handle *57amdgpu_fence_import_syncobj(struct radeon_winsys *rws, int fd)58{59struct amdgpu_winsys *ws = amdgpu_winsys(rws);60struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);61int r;6263if (!fence)64return NULL;6566pipe_reference_init(&fence->reference, 1);67fence->ws = ws;6869r = amdgpu_cs_import_syncobj(ws->dev, fd, &fence->syncobj);70if (r) {71FREE(fence);72return NULL;73}7475util_queue_fence_init(&fence->submitted);7677assert(amdgpu_fence_is_syncobj(fence));78return (struct pipe_fence_handle*)fence;79}8081static struct pipe_fence_handle *82amdgpu_fence_import_sync_file(struct radeon_winsys *rws, int fd)83{84struct amdgpu_winsys *ws = amdgpu_winsys(rws);85struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);8687if (!fence)88return NULL;8990pipe_reference_init(&fence->reference, 1);91fence->ws = ws;92/* fence->ctx == NULL means that the fence is syncobj-based. */9394/* Convert sync_file into syncobj. */95int r = amdgpu_cs_create_syncobj(ws->dev, &fence->syncobj);96if (r) {97FREE(fence);98return NULL;99}100101r = amdgpu_cs_syncobj_import_sync_file(ws->dev, fence->syncobj, fd);102if (r) {103amdgpu_cs_destroy_syncobj(ws->dev, fence->syncobj);104FREE(fence);105return NULL;106}107108util_queue_fence_init(&fence->submitted);109110return (struct pipe_fence_handle*)fence;111}112113static int amdgpu_fence_export_sync_file(struct radeon_winsys *rws,114struct pipe_fence_handle *pfence)115{116struct amdgpu_winsys *ws = amdgpu_winsys(rws);117struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;118119if (amdgpu_fence_is_syncobj(fence)) {120int fd, r;121122/* Convert syncobj into sync_file. */123r = amdgpu_cs_syncobj_export_sync_file(ws->dev, fence->syncobj, &fd);124return r ? -1 : fd;125}126127util_queue_fence_wait(&fence->submitted);128129/* Convert the amdgpu fence into a fence FD. */130int fd;131if (amdgpu_cs_fence_to_handle(ws->dev, &fence->fence,132AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD,133(uint32_t*)&fd))134return -1;135136return fd;137}138139static int amdgpu_export_signalled_sync_file(struct radeon_winsys *rws)140{141struct amdgpu_winsys *ws = amdgpu_winsys(rws);142uint32_t syncobj;143int fd = -1;144145int r = amdgpu_cs_create_syncobj2(ws->dev, DRM_SYNCOBJ_CREATE_SIGNALED,146&syncobj);147if (r) {148return -1;149}150151r = amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, &fd);152if (r) {153fd = -1;154}155156amdgpu_cs_destroy_syncobj(ws->dev, syncobj);157return fd;158}159160static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,161uint64_t seq_no,162uint64_t *user_fence_cpu_address)163{164struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;165166afence->fence.fence = seq_no;167afence->user_fence_cpu_address = user_fence_cpu_address;168util_queue_fence_signal(&afence->submitted);169}170171static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)172{173struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;174175afence->signalled = true;176util_queue_fence_signal(&afence->submitted);177}178179bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,180bool absolute)181{182struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;183uint32_t expired;184int64_t abs_timeout;185uint64_t *user_fence_cpu;186int r;187188if (afence->signalled)189return true;190191if (absolute)192abs_timeout = timeout;193else194abs_timeout = os_time_get_absolute_timeout(timeout);195196/* Handle syncobjs. */197if (amdgpu_fence_is_syncobj(afence)) {198if (abs_timeout == OS_TIMEOUT_INFINITE)199abs_timeout = INT64_MAX;200201if (amdgpu_cs_syncobj_wait(afence->ws->dev, &afence->syncobj, 1,202abs_timeout, 0, NULL))203return false;204205afence->signalled = true;206return true;207}208209/* The fence might not have a number assigned if its IB is being210* submitted in the other thread right now. Wait until the submission211* is done. */212if (!util_queue_fence_wait_timeout(&afence->submitted, abs_timeout))213return false;214215user_fence_cpu = afence->user_fence_cpu_address;216if (user_fence_cpu) {217if (*user_fence_cpu >= afence->fence.fence) {218afence->signalled = true;219return true;220}221222/* No timeout, just query: no need for the ioctl. */223if (!absolute && !timeout)224return false;225}226227/* Now use the libdrm query. */228r = amdgpu_cs_query_fence_status(&afence->fence,229abs_timeout,230AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,231&expired);232if (r) {233fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");234return false;235}236237if (expired) {238/* This variable can only transition from false to true, so it doesn't239* matter if threads race for it. */240afence->signalled = true;241return true;242}243return false;244}245246static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,247struct pipe_fence_handle *fence,248uint64_t timeout)249{250return amdgpu_fence_wait(fence, timeout, false);251}252253static struct pipe_fence_handle *254amdgpu_cs_get_next_fence(struct radeon_cmdbuf *rcs)255{256struct amdgpu_cs *cs = amdgpu_cs(rcs);257struct pipe_fence_handle *fence = NULL;258259if (cs->noop)260return NULL;261262if (cs->next_fence) {263amdgpu_fence_reference(&fence, cs->next_fence);264return fence;265}266267fence = amdgpu_fence_create(cs->ctx,268cs->csc->ib[IB_MAIN].ip_type,269cs->csc->ib[IB_MAIN].ip_instance,270cs->csc->ib[IB_MAIN].ring);271if (!fence)272return NULL;273274amdgpu_fence_reference(&cs->next_fence, fence);275return fence;276}277278/* CONTEXTS */279280static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)281{282struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);283int r;284struct amdgpu_bo_alloc_request alloc_buffer = {};285amdgpu_bo_handle buf_handle;286287if (!ctx)288return NULL;289290ctx->ws = amdgpu_winsys(ws);291ctx->refcount = 1;292ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;293294r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);295if (r) {296fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);297goto error_create;298}299300alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;301alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;302alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;303304r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);305if (r) {306fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);307goto error_user_fence_alloc;308}309310r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);311if (r) {312fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);313goto error_user_fence_map;314}315316memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);317ctx->user_fence_bo = buf_handle;318319return (struct radeon_winsys_ctx*)ctx;320321error_user_fence_map:322amdgpu_bo_free(buf_handle);323error_user_fence_alloc:324amdgpu_cs_ctx_free(ctx->ctx);325error_create:326FREE(ctx);327return NULL;328}329330static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)331{332amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);333}334335static enum pipe_reset_status336amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx, bool full_reset_only,337bool *needs_reset)338{339struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;340int r;341342if (needs_reset)343*needs_reset = false;344345/* Return a failure due to a GPU hang. */346if (ctx->ws->info.drm_minor >= 24) {347uint64_t flags;348349if (full_reset_only &&350ctx->initial_num_total_rejected_cs == ctx->ws->num_total_rejected_cs) {351/* If the caller is only interested in full reset (= wants to ignore soft352* recoveries), we can use the rejected cs count as a quick first check.353*/354return PIPE_NO_RESET;355}356357r = amdgpu_cs_query_reset_state2(ctx->ctx, &flags);358if (r) {359fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);360return PIPE_NO_RESET;361}362363if (flags & AMDGPU_CTX_QUERY2_FLAGS_RESET) {364if (needs_reset)365*needs_reset = flags & AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;366if (flags & AMDGPU_CTX_QUERY2_FLAGS_GUILTY)367return PIPE_GUILTY_CONTEXT_RESET;368else369return PIPE_INNOCENT_CONTEXT_RESET;370}371} else {372uint32_t result, hangs;373374r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);375if (r) {376fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);377return PIPE_NO_RESET;378}379380if (needs_reset)381*needs_reset = true;382switch (result) {383case AMDGPU_CTX_GUILTY_RESET:384return PIPE_GUILTY_CONTEXT_RESET;385case AMDGPU_CTX_INNOCENT_RESET:386return PIPE_INNOCENT_CONTEXT_RESET;387case AMDGPU_CTX_UNKNOWN_RESET:388return PIPE_UNKNOWN_CONTEXT_RESET;389}390}391392/* Return a failure due to a rejected command submission. */393if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {394if (needs_reset)395*needs_reset = true;396return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :397PIPE_INNOCENT_CONTEXT_RESET;398}399if (needs_reset)400*needs_reset = false;401return PIPE_NO_RESET;402}403404/* COMMAND SUBMISSION */405406static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)407{408return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD &&409cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE &&410cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD_ENC &&411cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_DEC &&412cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_ENC &&413cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_JPEG;414}415416static inline unsigned amdgpu_cs_epilog_dws(struct amdgpu_cs *cs)417{418if (cs->has_chaining)419return 4; /* for chaining */420421return 0;422}423424static int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo,425struct amdgpu_cs_buffer *buffers, unsigned num_buffers)426{427unsigned hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);428int i = cs->buffer_indices_hashlist[hash];429430/* not found or found */431if (i < 0 || (i < num_buffers && buffers[i].bo == bo))432return i;433434/* Hash collision, look for the BO in the list of buffers linearly. */435for (int i = num_buffers - 1; i >= 0; i--) {436if (buffers[i].bo == bo) {437/* Put this buffer in the hash list.438* This will prevent additional hash collisions if there are439* several consecutive lookup_buffer calls for the same buffer.440*441* Example: Assuming buffers A,B,C collide in the hash list,442* the following sequence of buffers:443* AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC444* will collide here: ^ and here: ^,445* meaning that we should get very few collisions in the end. */446cs->buffer_indices_hashlist[hash] = i & 0x7fff;447return i;448}449}450return -1;451}452453int amdgpu_lookup_buffer_any_type(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)454{455struct amdgpu_cs_buffer *buffers;456int num_buffers;457458if (bo->bo) {459buffers = cs->real_buffers;460num_buffers = cs->num_real_buffers;461} else if (!(bo->base.usage & RADEON_FLAG_SPARSE)) {462buffers = cs->slab_buffers;463num_buffers = cs->num_slab_buffers;464} else {465buffers = cs->sparse_buffers;466num_buffers = cs->num_sparse_buffers;467}468469return amdgpu_lookup_buffer(cs, bo, buffers, num_buffers);470}471472static int473amdgpu_do_add_real_buffer(struct amdgpu_winsys *ws, struct amdgpu_cs_context *cs,474struct amdgpu_winsys_bo *bo)475{476struct amdgpu_cs_buffer *buffer;477int idx;478479/* New buffer, check if the backing array is large enough. */480if (cs->num_real_buffers >= cs->max_real_buffers) {481unsigned new_max =482MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));483struct amdgpu_cs_buffer *new_buffers;484485new_buffers = MALLOC(new_max * sizeof(*new_buffers));486487if (!new_buffers) {488fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");489FREE(new_buffers);490return -1;491}492493memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));494495FREE(cs->real_buffers);496497cs->max_real_buffers = new_max;498cs->real_buffers = new_buffers;499}500501idx = cs->num_real_buffers;502buffer = &cs->real_buffers[idx];503504memset(buffer, 0, sizeof(*buffer));505amdgpu_winsys_bo_reference(ws, &buffer->bo, bo);506cs->num_real_buffers++;507508return idx;509}510511static int512amdgpu_lookup_or_add_real_buffer(struct radeon_cmdbuf *rcs, struct amdgpu_cs *acs,513struct amdgpu_winsys_bo *bo)514{515struct amdgpu_cs_context *cs = acs->csc;516unsigned hash;517int idx = amdgpu_lookup_buffer(cs, bo, cs->real_buffers, cs->num_real_buffers);518519if (idx >= 0)520return idx;521522idx = amdgpu_do_add_real_buffer(acs->ws, cs, bo);523524hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);525cs->buffer_indices_hashlist[hash] = idx & 0x7fff;526527if (bo->base.placement & RADEON_DOMAIN_VRAM)528rcs->used_vram_kb += bo->base.size / 1024;529else if (bo->base.placement & RADEON_DOMAIN_GTT)530rcs->used_gart_kb += bo->base.size / 1024;531532return idx;533}534535static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_winsys *ws,536struct radeon_cmdbuf *rcs,537struct amdgpu_cs *acs,538struct amdgpu_winsys_bo *bo)539{540struct amdgpu_cs_context *cs = acs->csc;541struct amdgpu_cs_buffer *buffer;542unsigned hash;543int idx = amdgpu_lookup_buffer(cs, bo, cs->slab_buffers, cs->num_slab_buffers);544int real_idx;545546if (idx >= 0)547return idx;548549real_idx = amdgpu_lookup_or_add_real_buffer(rcs, acs, bo->u.slab.real);550if (real_idx < 0)551return -1;552553/* New buffer, check if the backing array is large enough. */554if (cs->num_slab_buffers >= cs->max_slab_buffers) {555unsigned new_max =556MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));557struct amdgpu_cs_buffer *new_buffers;558559new_buffers = REALLOC(cs->slab_buffers,560cs->max_slab_buffers * sizeof(*new_buffers),561new_max * sizeof(*new_buffers));562if (!new_buffers) {563fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");564return -1;565}566567cs->max_slab_buffers = new_max;568cs->slab_buffers = new_buffers;569}570571idx = cs->num_slab_buffers;572buffer = &cs->slab_buffers[idx];573574memset(buffer, 0, sizeof(*buffer));575amdgpu_winsys_bo_reference(ws, &buffer->bo, bo);576buffer->u.slab.real_idx = real_idx;577cs->num_slab_buffers++;578579hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);580cs->buffer_indices_hashlist[hash] = idx & 0x7fff;581582return idx;583}584585static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_winsys *ws,586struct radeon_cmdbuf *rcs,587struct amdgpu_cs *acs,588struct amdgpu_winsys_bo *bo)589{590struct amdgpu_cs_context *cs = acs->csc;591struct amdgpu_cs_buffer *buffer;592unsigned hash;593int idx = amdgpu_lookup_buffer(cs, bo, cs->sparse_buffers, cs->num_sparse_buffers);594595if (idx >= 0)596return idx;597598/* New buffer, check if the backing array is large enough. */599if (cs->num_sparse_buffers >= cs->max_sparse_buffers) {600unsigned new_max =601MAX2(cs->max_sparse_buffers + 16, (unsigned)(cs->max_sparse_buffers * 1.3));602struct amdgpu_cs_buffer *new_buffers;603604new_buffers = REALLOC(cs->sparse_buffers,605cs->max_sparse_buffers * sizeof(*new_buffers),606new_max * sizeof(*new_buffers));607if (!new_buffers) {608fprintf(stderr, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");609return -1;610}611612cs->max_sparse_buffers = new_max;613cs->sparse_buffers = new_buffers;614}615616idx = cs->num_sparse_buffers;617buffer = &cs->sparse_buffers[idx];618619memset(buffer, 0, sizeof(*buffer));620amdgpu_winsys_bo_reference(ws, &buffer->bo, bo);621cs->num_sparse_buffers++;622623hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);624cs->buffer_indices_hashlist[hash] = idx & 0x7fff;625626/* We delay adding the backing buffers until we really have to. However,627* we cannot delay accounting for memory use.628*/629simple_mtx_lock(&bo->lock);630631list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {632if (bo->base.placement & RADEON_DOMAIN_VRAM)633rcs->used_vram_kb += backing->bo->base.size / 1024;634else if (bo->base.placement & RADEON_DOMAIN_GTT)635rcs->used_gart_kb += backing->bo->base.size / 1024;636}637638simple_mtx_unlock(&bo->lock);639640return idx;641}642643static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs,644struct pb_buffer *buf,645enum radeon_bo_usage usage,646enum radeon_bo_domain domains,647enum radeon_bo_priority priority)648{649/* Don't use the "domains" parameter. Amdgpu doesn't support changing650* the buffer placement during command submission.651*/652struct amdgpu_cs *acs = amdgpu_cs(rcs);653struct amdgpu_cs_context *cs = acs->csc;654struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;655struct amdgpu_cs_buffer *buffer;656int index;657658/* Fast exit for no-op calls.659* This is very effective with suballocators and linear uploaders that660* are outside of the winsys.661*/662if (bo == cs->last_added_bo &&663(usage & cs->last_added_bo_usage) == usage &&664(1u << priority) & cs->last_added_bo_priority_usage)665return cs->last_added_bo_index;666667if (!(bo->base.usage & RADEON_FLAG_SPARSE)) {668if (!bo->bo) {669index = amdgpu_lookup_or_add_slab_buffer(acs->ws, rcs, acs, bo);670if (index < 0)671return 0;672673buffer = &cs->slab_buffers[index];674buffer->usage |= usage;675676usage &= ~RADEON_USAGE_SYNCHRONIZED;677index = buffer->u.slab.real_idx;678} else {679index = amdgpu_lookup_or_add_real_buffer(rcs, acs, bo);680if (index < 0)681return 0;682}683684buffer = &cs->real_buffers[index];685} else {686index = amdgpu_lookup_or_add_sparse_buffer(acs->ws, rcs, acs, bo);687if (index < 0)688return 0;689690buffer = &cs->sparse_buffers[index];691}692693buffer->u.real.priority_usage |= 1u << priority;694buffer->usage |= usage;695696cs->last_added_bo = bo;697cs->last_added_bo_index = index;698cs->last_added_bo_usage = buffer->usage;699cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;700return index;701}702703static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,704struct amdgpu_ib *ib,705struct amdgpu_cs *cs)706{707struct pb_buffer *pb;708uint8_t *mapped;709unsigned buffer_size;710711/* Always create a buffer that is at least as large as the maximum seen IB712* size, aligned to a power of two (and multiplied by 4 to reduce internal713* fragmentation if chaining is not available). Limit to 512k dwords, which714* is the largest power of two that fits into the size field of the715* INDIRECT_BUFFER packet.716*/717if (cs->has_chaining)718buffer_size = 4 * util_next_power_of_two(ib->max_ib_size);719else720buffer_size = 4 * util_next_power_of_two(4 * ib->max_ib_size);721722const unsigned min_size = MAX2(ib->max_check_space_size, 8 * 1024 * 4);723const unsigned max_size = 512 * 1024 * 4;724725buffer_size = MIN2(buffer_size, max_size);726buffer_size = MAX2(buffer_size, min_size); /* min_size is more important */727728enum radeon_bo_domain domain;729unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;730731if (cs->ring_type == RING_GFX ||732cs->ring_type == RING_COMPUTE ||733cs->ring_type == RING_DMA) {734domain = ws->info.smart_access_memory ? RADEON_DOMAIN_VRAM : RADEON_DOMAIN_GTT;735flags |= RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC;736} else {737/* UVD/VCE */738/* TODO: validate that UVD/VCE don't read from IBs and enable WC or even VRAM. */739domain = RADEON_DOMAIN_GTT;740}741742pb = amdgpu_bo_create(ws, buffer_size,743ws->info.gart_page_size,744domain, flags);745if (!pb)746return false;747748mapped = amdgpu_bo_map(&ws->dummy_ws.base, pb, NULL, PIPE_MAP_WRITE);749if (!mapped) {750radeon_bo_reference(&ws->dummy_ws.base, &pb, NULL);751return false;752}753754radeon_bo_reference(&ws->dummy_ws.base, &ib->big_ib_buffer, pb);755radeon_bo_reference(&ws->dummy_ws.base, &pb, NULL);756757ib->ib_mapped = mapped;758ib->used_ib_space = 0;759760return true;761}762763static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)764{765/* The maximum IB size including all chained IBs. */766switch (ib_type) {767case IB_MAIN:768/* Smaller submits means the GPU gets busy sooner and there is less769* waiting for buffers and fences. Proof:770* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1771*/772return 20 * 1024;773case IB_PARALLEL_COMPUTE:774/* Always chain this IB. */775return UINT_MAX;776default:777unreachable("bad ib_type");778}779}780781static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws,782struct radeon_cmdbuf *rcs,783struct amdgpu_ib *ib,784struct amdgpu_cs *cs)785{786/* Small IBs are better than big IBs, because the GPU goes idle quicker787* and there is less waiting for buffers and fences. Proof:788* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1789*/790struct drm_amdgpu_cs_chunk_ib *info = &cs->csc->ib[ib->ib_type];791/* This is the minimum size of a contiguous IB. */792unsigned ib_size = 4 * 1024 * 4;793794/* Always allocate at least the size of the biggest cs_check_space call,795* because precisely the last call might have requested this size.796*/797ib_size = MAX2(ib_size, ib->max_check_space_size);798799if (!cs->has_chaining) {800ib_size = MAX2(ib_size,8014 * MIN2(util_next_power_of_two(ib->max_ib_size),802amdgpu_ib_max_submit_dwords(ib->ib_type)));803}804805ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;806807rcs->prev_dw = 0;808rcs->num_prev = 0;809rcs->current.cdw = 0;810rcs->current.buf = NULL;811812/* Allocate a new buffer for IBs if the current buffer is all used. */813if (!ib->big_ib_buffer ||814ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {815if (!amdgpu_ib_new_buffer(ws, ib, cs))816return false;817}818819info->va_start = amdgpu_winsys_bo(ib->big_ib_buffer)->va + ib->used_ib_space;820info->ib_bytes = 0;821/* ib_bytes is in dwords and the conversion to bytes will be done before822* the CS ioctl. */823ib->ptr_ib_size = &info->ib_bytes;824ib->ptr_ib_size_inside_ib = false;825826amdgpu_cs_add_buffer(cs->main.rcs, ib->big_ib_buffer,827RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);828829rcs->current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);830831ib_size = ib->big_ib_buffer->size - ib->used_ib_space;832rcs->current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs);833rcs->gpu_address = info->va_start;834return true;835}836837static void amdgpu_set_ib_size(struct radeon_cmdbuf *rcs, struct amdgpu_ib *ib)838{839if (ib->ptr_ib_size_inside_ib) {840*ib->ptr_ib_size = rcs->current.cdw |841S_3F2_CHAIN(1) | S_3F2_VALID(1);842} else {843*ib->ptr_ib_size = rcs->current.cdw;844}845}846847static void amdgpu_ib_finalize(struct amdgpu_winsys *ws, struct radeon_cmdbuf *rcs,848struct amdgpu_ib *ib)849{850amdgpu_set_ib_size(rcs, ib);851ib->used_ib_space += rcs->current.cdw * 4;852ib->used_ib_space = align(ib->used_ib_space, ws->info.ib_alignment);853ib->max_ib_size = MAX2(ib->max_ib_size, rcs->prev_dw + rcs->current.cdw);854}855856static bool amdgpu_init_cs_context(struct amdgpu_winsys *ws,857struct amdgpu_cs_context *cs,858enum ring_type ring_type)859{860switch (ring_type) {861case RING_DMA:862cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;863break;864865case RING_UVD:866cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;867break;868869case RING_UVD_ENC:870cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD_ENC;871break;872873case RING_VCE:874cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;875break;876877case RING_VCN_DEC:878cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;879break;880881case RING_VCN_ENC:882cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_ENC;883break;884885case RING_VCN_JPEG:886cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_JPEG;887break;888889case RING_COMPUTE:890case RING_GFX:891cs->ib[IB_MAIN].ip_type = ring_type == RING_GFX ? AMDGPU_HW_IP_GFX :892AMDGPU_HW_IP_COMPUTE;893894/* The kernel shouldn't invalidate L2 and vL1. The proper place for cache895* invalidation is the beginning of IBs (the previous commit does that),896* because completion of an IB doesn't care about the state of GPU caches,897* but the beginning of an IB does. Draw calls from multiple IBs can be898* executed in parallel, so draw calls from the current IB can finish after899* the next IB starts drawing, and so the cache flush at the end of IB900* is always late.901*/902if (ws->info.drm_minor >= 26)903cs->ib[IB_MAIN].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;904break;905906default:907assert(0);908}909910cs->ib[IB_PARALLEL_COMPUTE].ip_type = AMDGPU_HW_IP_COMPUTE;911cs->ib[IB_PARALLEL_COMPUTE].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;912913cs->last_added_bo = NULL;914return true;915}916917static void cleanup_fence_list(struct amdgpu_fence_list *fences)918{919for (unsigned i = 0; i < fences->num; i++)920amdgpu_fence_reference(&fences->list[i], NULL);921fences->num = 0;922}923924static void amdgpu_cs_context_cleanup(struct amdgpu_winsys *ws, struct amdgpu_cs_context *cs)925{926unsigned i;927928for (i = 0; i < cs->num_real_buffers; i++) {929amdgpu_winsys_bo_reference(ws, &cs->real_buffers[i].bo, NULL);930}931for (i = 0; i < cs->num_slab_buffers; i++) {932amdgpu_winsys_bo_reference(ws, &cs->slab_buffers[i].bo, NULL);933}934for (i = 0; i < cs->num_sparse_buffers; i++) {935amdgpu_winsys_bo_reference(ws, &cs->sparse_buffers[i].bo, NULL);936}937cleanup_fence_list(&cs->fence_dependencies);938cleanup_fence_list(&cs->syncobj_dependencies);939cleanup_fence_list(&cs->syncobj_to_signal);940cleanup_fence_list(&cs->compute_fence_dependencies);941cleanup_fence_list(&cs->compute_start_fence_dependencies);942943cs->num_real_buffers = 0;944cs->num_slab_buffers = 0;945cs->num_sparse_buffers = 0;946amdgpu_fence_reference(&cs->fence, NULL);947cs->last_added_bo = NULL;948}949950static void amdgpu_destroy_cs_context(struct amdgpu_winsys *ws, struct amdgpu_cs_context *cs)951{952amdgpu_cs_context_cleanup(ws, cs);953FREE(cs->real_buffers);954FREE(cs->slab_buffers);955FREE(cs->sparse_buffers);956FREE(cs->fence_dependencies.list);957FREE(cs->syncobj_dependencies.list);958FREE(cs->syncobj_to_signal.list);959FREE(cs->compute_fence_dependencies.list);960FREE(cs->compute_start_fence_dependencies.list);961}962963964static bool965amdgpu_cs_create(struct radeon_cmdbuf *rcs,966struct radeon_winsys_ctx *rwctx,967enum ring_type ring_type,968void (*flush)(void *ctx, unsigned flags,969struct pipe_fence_handle **fence),970void *flush_ctx,971bool stop_exec_on_failure)972{973struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;974struct amdgpu_cs *cs;975976cs = CALLOC_STRUCT(amdgpu_cs);977if (!cs) {978return false;979}980981util_queue_fence_init(&cs->flush_completed);982983cs->ws = ctx->ws;984cs->ctx = ctx;985cs->flush_cs = flush;986cs->flush_data = flush_ctx;987cs->ring_type = ring_type;988cs->stop_exec_on_failure = stop_exec_on_failure;989cs->noop = ctx->ws->noop_cs;990cs->has_chaining = ctx->ws->info.chip_class >= GFX7 &&991(ring_type == RING_GFX || ring_type == RING_COMPUTE);992993struct amdgpu_cs_fence_info fence_info;994fence_info.handle = cs->ctx->user_fence_bo;995fence_info.offset = cs->ring_type * 4;996amdgpu_cs_chunk_fence_info_to_data(&fence_info, (void*)&cs->fence_chunk);997998cs->main.ib_type = IB_MAIN;999cs->compute_ib.ib_type = IB_PARALLEL_COMPUTE;10001001if (!amdgpu_init_cs_context(ctx->ws, &cs->csc1, ring_type)) {1002FREE(cs);1003return false;1004}10051006if (!amdgpu_init_cs_context(ctx->ws, &cs->csc2, ring_type)) {1007amdgpu_destroy_cs_context(ctx->ws, &cs->csc1);1008FREE(cs);1009return false;1010}10111012memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));10131014/* Set the first submission context as current. */1015cs->csc = &cs->csc1;1016cs->cst = &cs->csc2;10171018/* Assign to both amdgpu_cs_context; only csc will use it. */1019cs->csc1.buffer_indices_hashlist = cs->buffer_indices_hashlist;1020cs->csc2.buffer_indices_hashlist = cs->buffer_indices_hashlist;10211022cs->main.rcs = rcs;1023rcs->priv = cs;10241025if (!amdgpu_get_new_ib(ctx->ws, rcs, &cs->main, cs)) {1026amdgpu_destroy_cs_context(ctx->ws, &cs->csc2);1027amdgpu_destroy_cs_context(ctx->ws, &cs->csc1);1028FREE(cs);1029rcs->priv = NULL;1030return false;1031}10321033p_atomic_inc(&ctx->ws->num_cs);1034return true;1035}10361037static bool1038amdgpu_cs_add_parallel_compute_ib(struct radeon_cmdbuf *compute_cs,1039struct radeon_cmdbuf *gfx_cs,1040bool uses_gds_ordered_append)1041{1042struct amdgpu_cs *cs = amdgpu_cs(gfx_cs);1043struct amdgpu_winsys *ws = cs->ws;10441045if (cs->ring_type != RING_GFX)1046return false;10471048/* only one secondary IB can be added */1049if (cs->compute_ib.ib_mapped)1050return false;10511052/* Allocate the compute IB. */1053if (!amdgpu_get_new_ib(ws, compute_cs, &cs->compute_ib, cs))1054return false;10551056if (uses_gds_ordered_append) {1057cs->csc1.ib[IB_PARALLEL_COMPUTE].flags |=1058AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID;1059cs->csc2.ib[IB_PARALLEL_COMPUTE].flags |=1060AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID;1061}10621063cs->compute_ib.rcs = compute_cs;1064compute_cs->priv = cs;1065return true;1066}10671068static bool1069amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_ib,1070unsigned preamble_num_dw)1071{1072struct amdgpu_cs *cs = amdgpu_cs(rcs);1073struct amdgpu_winsys *ws = cs->ws;1074struct amdgpu_cs_context *csc[2] = {&cs->csc1, &cs->csc2};1075unsigned size = align(preamble_num_dw * 4, ws->info.ib_alignment);1076struct pb_buffer *preamble_bo;1077uint32_t *map;10781079/* Create the preamble IB buffer. */1080preamble_bo = amdgpu_bo_create(ws, size, ws->info.ib_alignment,1081RADEON_DOMAIN_VRAM,1082RADEON_FLAG_NO_INTERPROCESS_SHARING |1083RADEON_FLAG_GTT_WC |1084RADEON_FLAG_READ_ONLY);1085if (!preamble_bo)1086return false;10871088map = (uint32_t*)amdgpu_bo_map(&ws->dummy_ws.base, preamble_bo, NULL,1089PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);1090if (!map) {1091radeon_bo_reference(&ws->dummy_ws.base, &preamble_bo, NULL);1092return false;1093}10941095/* Upload the preamble IB. */1096memcpy(map, preamble_ib, preamble_num_dw * 4);10971098/* Pad the IB. */1099uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ring_type];1100while (preamble_num_dw & ib_pad_dw_mask)1101map[preamble_num_dw++] = PKT3_NOP_PAD;1102amdgpu_bo_unmap(&ws->dummy_ws.base, preamble_bo);11031104for (unsigned i = 0; i < 2; i++) {1105csc[i]->ib[IB_PREAMBLE] = csc[i]->ib[IB_MAIN];1106csc[i]->ib[IB_PREAMBLE].flags |= AMDGPU_IB_FLAG_PREAMBLE;1107csc[i]->ib[IB_PREAMBLE].va_start = amdgpu_winsys_bo(preamble_bo)->va;1108csc[i]->ib[IB_PREAMBLE].ib_bytes = preamble_num_dw * 4;11091110csc[i]->ib[IB_MAIN].flags |= AMDGPU_IB_FLAG_PREEMPT;1111}11121113assert(!cs->preamble_ib_bo);1114cs->preamble_ib_bo = preamble_bo;11151116amdgpu_cs_add_buffer(rcs, cs->preamble_ib_bo, RADEON_USAGE_READ, 0,1117RADEON_PRIO_IB1);1118return true;1119}11201121static bool amdgpu_cs_validate(struct radeon_cmdbuf *rcs)1122{1123return true;1124}11251126static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,1127bool force_chaining)1128{1129struct amdgpu_cs *cs = amdgpu_cs(rcs);1130struct amdgpu_ib *ib = rcs == cs->main.rcs ? &cs->main : &cs->compute_ib;1131unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;1132unsigned cs_epilog_dw = amdgpu_cs_epilog_dws(cs);1133unsigned need_byte_size = (dw + cs_epilog_dw) * 4;1134uint64_t va;1135uint32_t *new_ptr_ib_size;11361137assert(rcs->current.cdw <= rcs->current.max_dw);11381139/* 125% of the size for IB epilog. */1140unsigned safe_byte_size = need_byte_size + need_byte_size / 4;1141ib->max_check_space_size = MAX2(ib->max_check_space_size,1142safe_byte_size);11431144/* If force_chaining is true, we can't return. We have to chain. */1145if (!force_chaining) {1146if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))1147return false;11481149ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);11501151if (rcs->current.max_dw - rcs->current.cdw >= dw)1152return true;1153}11541155if (!cs->has_chaining) {1156assert(!force_chaining);1157return false;1158}11591160/* Allocate a new chunk */1161if (rcs->num_prev >= rcs->max_prev) {1162unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);1163struct radeon_cmdbuf_chunk *new_prev;11641165new_prev = REALLOC(rcs->prev,1166sizeof(*new_prev) * rcs->max_prev,1167sizeof(*new_prev) * new_max_prev);1168if (!new_prev)1169return false;11701171rcs->prev = new_prev;1172rcs->max_prev = new_max_prev;1173}11741175if (!amdgpu_ib_new_buffer(cs->ws, ib, cs))1176return false;11771178assert(ib->used_ib_space == 0);1179va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;11801181/* This space was originally reserved. */1182rcs->current.max_dw += cs_epilog_dw;11831184/* Pad with NOPs but leave 4 dwords for INDIRECT_BUFFER. */1185uint32_t ib_pad_dw_mask = cs->ws->info.ib_pad_dw_mask[cs->ring_type];1186while ((rcs->current.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)1187radeon_emit(rcs, PKT3_NOP_PAD);11881189radeon_emit(rcs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));1190radeon_emit(rcs, va);1191radeon_emit(rcs, va >> 32);1192new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw++];1193assert((rcs->current.cdw & ib_pad_dw_mask) == 0);11941195assert((rcs->current.cdw & 7) == 0);1196assert(rcs->current.cdw <= rcs->current.max_dw);11971198amdgpu_set_ib_size(rcs, ib);1199ib->ptr_ib_size = new_ptr_ib_size;1200ib->ptr_ib_size_inside_ib = true;12011202/* Hook up the new chunk */1203rcs->prev[rcs->num_prev].buf = rcs->current.buf;1204rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;1205rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */1206rcs->num_prev++;12071208rcs->prev_dw += rcs->current.cdw;1209rcs->current.cdw = 0;12101211rcs->current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);1212rcs->current.max_dw = ib->big_ib_buffer->size / 4 - cs_epilog_dw;1213rcs->gpu_address = va;12141215amdgpu_cs_add_buffer(cs->main.rcs, ib->big_ib_buffer,1216RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);12171218return true;1219}12201221static unsigned amdgpu_cs_get_buffer_list(struct radeon_cmdbuf *rcs,1222struct radeon_bo_list_item *list)1223{1224struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;1225int i;12261227if (list) {1228for (i = 0; i < cs->num_real_buffers; i++) {1229list[i].bo_size = cs->real_buffers[i].bo->base.size;1230list[i].vm_address = cs->real_buffers[i].bo->va;1231list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;1232}1233}1234return cs->num_real_buffers;1235}12361237static void add_fence_to_list(struct amdgpu_fence_list *fences,1238struct amdgpu_fence *fence)1239{1240unsigned idx = fences->num++;12411242if (idx >= fences->max) {1243unsigned size;1244const unsigned increment = 8;12451246fences->max = idx + increment;1247size = fences->max * sizeof(fences->list[0]);1248fences->list = realloc(fences->list, size);1249/* Clear the newly-allocated elements. */1250memset(fences->list + idx, 0,1251increment * sizeof(fences->list[0]));1252}1253amdgpu_fence_reference(&fences->list[idx], (struct pipe_fence_handle*)fence);1254}12551256static bool is_noop_fence_dependency(struct amdgpu_cs *acs,1257struct amdgpu_fence *fence)1258{1259struct amdgpu_cs_context *cs = acs->csc;12601261/* Detect no-op dependencies only when there is only 1 ring,1262* because IBs on one ring are always executed one at a time.1263*1264* We always want no dependency between back-to-back gfx IBs, because1265* we need the parallelism between IBs for good performance.1266*/1267if ((acs->ring_type == RING_GFX ||1268acs->ws->info.num_rings[acs->ring_type] == 1) &&1269!amdgpu_fence_is_syncobj(fence) &&1270fence->ctx == acs->ctx &&1271fence->fence.ip_type == cs->ib[IB_MAIN].ip_type &&1272fence->fence.ip_instance == cs->ib[IB_MAIN].ip_instance &&1273fence->fence.ring == cs->ib[IB_MAIN].ring)1274return true;12751276return amdgpu_fence_wait((void *)fence, 0, false);1277}12781279static void amdgpu_cs_add_fence_dependency(struct radeon_cmdbuf *rws,1280struct pipe_fence_handle *pfence,1281unsigned dependency_flags)1282{1283struct amdgpu_cs *acs = amdgpu_cs(rws);1284struct amdgpu_cs_context *cs = acs->csc;1285struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;12861287util_queue_fence_wait(&fence->submitted);12881289if (dependency_flags & RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY) {1290/* Syncobjs are not needed here. */1291assert(!amdgpu_fence_is_syncobj(fence));12921293if (acs->ws->info.has_scheduled_fence_dependency &&1294dependency_flags & RADEON_DEPENDENCY_START_FENCE)1295add_fence_to_list(&cs->compute_start_fence_dependencies, fence);1296else1297add_fence_to_list(&cs->compute_fence_dependencies, fence);1298return;1299}13001301/* Start fences are not needed here. */1302assert(!(dependency_flags & RADEON_DEPENDENCY_START_FENCE));13031304if (is_noop_fence_dependency(acs, fence))1305return;13061307if (amdgpu_fence_is_syncobj(fence))1308add_fence_to_list(&cs->syncobj_dependencies, fence);1309else1310add_fence_to_list(&cs->fence_dependencies, fence);1311}13121313static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs *acs,1314struct amdgpu_cs_buffer *buffer)1315{1316struct amdgpu_cs_context *cs = acs->csc;1317struct amdgpu_winsys_bo *bo = buffer->bo;1318unsigned new_num_fences = 0;13191320for (unsigned j = 0; j < bo->num_fences; ++j) {1321struct amdgpu_fence *bo_fence = (void *)bo->fences[j];13221323if (is_noop_fence_dependency(acs, bo_fence))1324continue;13251326amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);1327new_num_fences++;13281329if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))1330continue;13311332add_fence_to_list(&cs->fence_dependencies, bo_fence);1333}13341335for (unsigned j = new_num_fences; j < bo->num_fences; ++j)1336amdgpu_fence_reference(&bo->fences[j], NULL);13371338bo->num_fences = new_num_fences;1339}13401341/* Add the given list of fences to the buffer's fence list.1342*1343* Must be called with the winsys bo_fence_lock held.1344*/1345void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,1346unsigned num_fences,1347struct pipe_fence_handle **fences)1348{1349if (bo->num_fences + num_fences > bo->max_fences) {1350unsigned new_max_fences = MAX2(bo->num_fences + num_fences, bo->max_fences * 2);1351struct pipe_fence_handle **new_fences =1352REALLOC(bo->fences,1353bo->num_fences * sizeof(*new_fences),1354new_max_fences * sizeof(*new_fences));1355if (likely(new_fences && new_max_fences < UINT16_MAX)) {1356bo->fences = new_fences;1357bo->max_fences = new_max_fences;1358} else {1359unsigned drop;13601361fprintf(stderr, new_fences ? "amdgpu_add_fences: too many fences, dropping some\n"1362: "amdgpu_add_fences: allocation failure, dropping fence(s)\n");1363free(new_fences);13641365if (!bo->num_fences)1366return;13671368bo->num_fences--; /* prefer to keep the most recent fence if possible */1369amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);13701371drop = bo->num_fences + num_fences - bo->max_fences;1372num_fences -= drop;1373fences += drop;1374}1375}13761377for (unsigned i = 0; i < num_fences; ++i) {1378bo->fences[bo->num_fences] = NULL;1379amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]);1380bo->num_fences++;1381}1382}13831384static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs *acs,1385struct pipe_fence_handle *fence,1386unsigned num_buffers,1387struct amdgpu_cs_buffer *buffers)1388{1389for (unsigned i = 0; i < num_buffers; i++) {1390struct amdgpu_cs_buffer *buffer = &buffers[i];1391struct amdgpu_winsys_bo *bo = buffer->bo;13921393amdgpu_add_bo_fence_dependencies(acs, buffer);1394p_atomic_inc(&bo->num_active_ioctls);1395amdgpu_add_fences(bo, 1, &fence);1396}1397}13981399/* Since the kernel driver doesn't synchronize execution between different1400* rings automatically, we have to add fence dependencies manually.1401*/1402static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs *acs)1403{1404struct amdgpu_cs_context *cs = acs->csc;14051406amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);1407amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);1408amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_sparse_buffers, cs->sparse_buffers);1409}14101411static void amdgpu_cs_add_syncobj_signal(struct radeon_cmdbuf *rws,1412struct pipe_fence_handle *fence)1413{1414struct amdgpu_cs *acs = amdgpu_cs(rws);1415struct amdgpu_cs_context *cs = acs->csc;14161417assert(amdgpu_fence_is_syncobj((struct amdgpu_fence *)fence));14181419add_fence_to_list(&cs->syncobj_to_signal, (struct amdgpu_fence*)fence);1420}14211422/* Add backing of sparse buffers to the buffer list.1423*1424* This is done late, during submission, to keep the buffer list short before1425* submit, and to avoid managing fences for the backing buffers.1426*/1427static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_winsys *ws,1428struct amdgpu_cs_context *cs)1429{1430for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {1431struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];1432struct amdgpu_winsys_bo *bo = buffer->bo;14331434simple_mtx_lock(&bo->lock);14351436list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {1437/* We can directly add the buffer here, because we know that each1438* backing buffer occurs only once.1439*/1440int idx = amdgpu_do_add_real_buffer(ws, cs, backing->bo);1441if (idx < 0) {1442fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);1443simple_mtx_unlock(&bo->lock);1444return false;1445}14461447cs->real_buffers[idx].u.real.priority_usage = buffer->u.real.priority_usage;1448}14491450simple_mtx_unlock(&bo->lock);1451}14521453return true;1454}14551456static void amdgpu_cs_submit_ib(void *job, void *gdata, int thread_index)1457{1458struct amdgpu_cs *acs = (struct amdgpu_cs*)job;1459struct amdgpu_winsys *ws = acs->ws;1460struct amdgpu_cs_context *cs = acs->cst;1461int i, r;1462uint32_t bo_list = 0;1463uint64_t seq_no = 0;1464bool has_user_fence = amdgpu_cs_has_user_fence(cs);1465bool use_bo_list_create = ws->info.drm_minor < 27;1466struct drm_amdgpu_bo_list_in bo_list_in;1467unsigned initial_num_real_buffers = cs->num_real_buffers;14681469#if DEBUG1470/* Prepare the buffer list. */1471if (ws->debug_all_bos) {1472/* The buffer list contains all buffers. This is a slow path that1473* ensures that no buffer is missing in the BO list.1474*/1475unsigned num_handles = 0;1476struct drm_amdgpu_bo_list_entry *list =1477alloca(ws->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));1478struct amdgpu_winsys_bo *bo;14791480simple_mtx_lock(&ws->global_bo_list_lock);1481LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {1482list[num_handles].bo_handle = bo->u.real.kms_handle;1483list[num_handles].bo_priority = 0;1484++num_handles;1485}14861487r = amdgpu_bo_list_create_raw(ws->dev, ws->num_buffers, list, &bo_list);1488simple_mtx_unlock(&ws->global_bo_list_lock);1489if (r) {1490fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);1491goto cleanup;1492}1493} else1494#endif1495{1496if (!amdgpu_add_sparse_backing_buffers(ws, cs)) {1497fprintf(stderr, "amdgpu: amdgpu_add_sparse_backing_buffers failed\n");1498r = -ENOMEM;1499goto cleanup;1500}15011502struct drm_amdgpu_bo_list_entry *list =1503alloca((cs->num_real_buffers + 2) * sizeof(struct drm_amdgpu_bo_list_entry));15041505unsigned num_handles = 0;1506for (i = 0; i < cs->num_real_buffers; ++i) {1507struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];1508assert(buffer->u.real.priority_usage != 0);15091510list[num_handles].bo_handle = buffer->bo->u.real.kms_handle;1511list[num_handles].bo_priority = (util_last_bit(buffer->u.real.priority_usage) - 1) / 2;1512++num_handles;1513}15141515if (use_bo_list_create) {1516/* Legacy path creating the buffer list handle and passing it to the CS ioctl. */1517r = amdgpu_bo_list_create_raw(ws->dev, num_handles, list, &bo_list);1518if (r) {1519fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);1520goto cleanup;1521}1522} else {1523/* Standard path passing the buffer list via the CS ioctl. */1524bo_list_in.operation = ~0;1525bo_list_in.list_handle = ~0;1526bo_list_in.bo_number = num_handles;1527bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);1528bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)list;1529}1530}15311532if (acs->ring_type == RING_GFX)1533ws->gfx_bo_list_counter += cs->num_real_buffers;15341535if (acs->stop_exec_on_failure && acs->ctx->num_rejected_cs) {1536r = -ECANCELED;1537} else {1538struct drm_amdgpu_cs_chunk chunks[7];1539unsigned num_chunks = 0;15401541/* BO list */1542if (!use_bo_list_create) {1543chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;1544chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;1545chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;1546num_chunks++;1547}15481549/* Fence dependencies. */1550unsigned num_dependencies = cs->fence_dependencies.num;1551if (num_dependencies) {1552struct drm_amdgpu_cs_chunk_dep *dep_chunk =1553alloca(num_dependencies * sizeof(*dep_chunk));15541555for (unsigned i = 0; i < num_dependencies; i++) {1556struct amdgpu_fence *fence =1557(struct amdgpu_fence*)cs->fence_dependencies.list[i];15581559assert(util_queue_fence_is_signalled(&fence->submitted));1560amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);1561}15621563chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;1564chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies;1565chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;1566num_chunks++;1567}15681569/* Syncobj dependencies. */1570unsigned num_syncobj_dependencies = cs->syncobj_dependencies.num;1571if (num_syncobj_dependencies) {1572struct drm_amdgpu_cs_chunk_sem *sem_chunk =1573alloca(num_syncobj_dependencies * sizeof(sem_chunk[0]));15741575for (unsigned i = 0; i < num_syncobj_dependencies; i++) {1576struct amdgpu_fence *fence =1577(struct amdgpu_fence*)cs->syncobj_dependencies.list[i];15781579if (!amdgpu_fence_is_syncobj(fence))1580continue;15811582assert(util_queue_fence_is_signalled(&fence->submitted));1583sem_chunk[i].handle = fence->syncobj;1584}15851586chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;1587chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4 * num_syncobj_dependencies;1588chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;1589num_chunks++;1590}15911592/* Submit the parallel compute IB first. */1593if (cs->ib[IB_PARALLEL_COMPUTE].ib_bytes > 0) {1594unsigned old_num_chunks = num_chunks;15951596/* Add compute fence dependencies. */1597unsigned num_dependencies = cs->compute_fence_dependencies.num;1598if (num_dependencies) {1599struct drm_amdgpu_cs_chunk_dep *dep_chunk =1600alloca(num_dependencies * sizeof(*dep_chunk));16011602for (unsigned i = 0; i < num_dependencies; i++) {1603struct amdgpu_fence *fence =1604(struct amdgpu_fence*)cs->compute_fence_dependencies.list[i];16051606assert(util_queue_fence_is_signalled(&fence->submitted));1607amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);1608}16091610chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;1611chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies;1612chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;1613num_chunks++;1614}16151616/* Add compute start fence dependencies. */1617unsigned num_start_dependencies = cs->compute_start_fence_dependencies.num;1618if (num_start_dependencies) {1619struct drm_amdgpu_cs_chunk_dep *dep_chunk =1620alloca(num_start_dependencies * sizeof(*dep_chunk));16211622for (unsigned i = 0; i < num_start_dependencies; i++) {1623struct amdgpu_fence *fence =1624(struct amdgpu_fence*)cs->compute_start_fence_dependencies.list[i];16251626assert(util_queue_fence_is_signalled(&fence->submitted));1627amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);1628}16291630chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES;1631chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_start_dependencies;1632chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;1633num_chunks++;1634}16351636/* Convert from dwords to bytes. */1637cs->ib[IB_PARALLEL_COMPUTE].ib_bytes *= 4;1638chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;1639chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;1640chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_PARALLEL_COMPUTE];1641num_chunks++;16421643r = acs->noop ? 0 : amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,1644num_chunks, chunks, NULL);1645if (r)1646goto finalize;16471648/* Back off the compute chunks. */1649num_chunks = old_num_chunks;1650}16511652/* Syncobj signals. */1653unsigned num_syncobj_to_signal = cs->syncobj_to_signal.num;1654if (num_syncobj_to_signal) {1655struct drm_amdgpu_cs_chunk_sem *sem_chunk =1656alloca(num_syncobj_to_signal * sizeof(sem_chunk[0]));16571658for (unsigned i = 0; i < num_syncobj_to_signal; i++) {1659struct amdgpu_fence *fence =1660(struct amdgpu_fence*)cs->syncobj_to_signal.list[i];16611662assert(amdgpu_fence_is_syncobj(fence));1663sem_chunk[i].handle = fence->syncobj;1664}16651666chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_OUT;1667chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 41668* num_syncobj_to_signal;1669chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;1670num_chunks++;1671}16721673/* Fence */1674if (has_user_fence) {1675chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_FENCE;1676chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;1677chunks[num_chunks].chunk_data = (uintptr_t)&acs->fence_chunk;1678num_chunks++;1679}16801681/* IB */1682if (cs->ib[IB_PREAMBLE].ib_bytes) {1683chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;1684chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;1685chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_PREAMBLE];1686num_chunks++;1687}16881689/* IB */1690cs->ib[IB_MAIN].ib_bytes *= 4; /* Convert from dwords to bytes. */1691chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;1692chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;1693chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];1694num_chunks++;16951696if (cs->secure) {1697cs->ib[IB_PREAMBLE].flags |= AMDGPU_IB_FLAGS_SECURE;1698cs->ib[IB_MAIN].flags |= AMDGPU_IB_FLAGS_SECURE;1699} else {1700cs->ib[IB_PREAMBLE].flags &= ~AMDGPU_IB_FLAGS_SECURE;1701cs->ib[IB_MAIN].flags &= ~AMDGPU_IB_FLAGS_SECURE;1702}17031704assert(num_chunks <= ARRAY_SIZE(chunks));17051706r = acs->noop ? 0 : amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,1707num_chunks, chunks, &seq_no);1708}1709finalize:1710if (r) {1711if (r == -ENOMEM)1712fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");1713else if (r == -ECANCELED)1714fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");1715else1716fprintf(stderr, "amdgpu: The CS has been rejected, "1717"see dmesg for more information (%i).\n", r);17181719acs->ctx->num_rejected_cs++;1720ws->num_total_rejected_cs++;1721} else if (!acs->noop) {1722/* Success. */1723uint64_t *user_fence = NULL;17241725/* Need to reserve 4 QWORD for user fence:1726* QWORD[0]: completed fence1727* QWORD[1]: preempted fence1728* QWORD[2]: reset fence1729* QWORD[3]: preempted then reset1730**/1731if (has_user_fence)1732user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type * 4;1733amdgpu_fence_submitted(cs->fence, seq_no, user_fence);1734}17351736/* Cleanup. */1737if (bo_list)1738amdgpu_bo_list_destroy_raw(ws->dev, bo_list);17391740cleanup:1741/* If there was an error, signal the fence, because it won't be signalled1742* by the hardware. */1743if (r || acs->noop)1744amdgpu_fence_signalled(cs->fence);17451746cs->error_code = r;17471748/* Only decrement num_active_ioctls for those buffers where we incremented it. */1749for (i = 0; i < initial_num_real_buffers; i++)1750p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);1751for (i = 0; i < cs->num_slab_buffers; i++)1752p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);1753for (i = 0; i < cs->num_sparse_buffers; i++)1754p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);17551756amdgpu_cs_context_cleanup(ws, cs);1757}17581759/* Make sure the previous submission is completed. */1760void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs)1761{1762struct amdgpu_cs *cs = amdgpu_cs(rcs);17631764/* Wait for any pending ioctl of this CS to complete. */1765util_queue_fence_wait(&cs->flush_completed);1766}17671768static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,1769unsigned flags,1770struct pipe_fence_handle **fence)1771{1772struct amdgpu_cs *cs = amdgpu_cs(rcs);1773struct amdgpu_winsys *ws = cs->ws;1774int error_code = 0;1775uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ring_type];17761777rcs->current.max_dw += amdgpu_cs_epilog_dws(cs);17781779/* Pad the IB according to the mask. */1780switch (cs->ring_type) {1781case RING_DMA:1782if (ws->info.chip_class <= GFX6) {1783while (rcs->current.cdw & ib_pad_dw_mask)1784radeon_emit(rcs, 0xf0000000); /* NOP packet */1785} else {1786while (rcs->current.cdw & ib_pad_dw_mask)1787radeon_emit(rcs, 0x00000000); /* NOP packet */1788}1789break;1790case RING_GFX:1791case RING_COMPUTE:1792if (ws->info.gfx_ib_pad_with_type2) {1793while (rcs->current.cdw & ib_pad_dw_mask)1794radeon_emit(rcs, PKT2_NOP_PAD);1795} else {1796while (rcs->current.cdw & ib_pad_dw_mask)1797radeon_emit(rcs, PKT3_NOP_PAD);1798}1799if (cs->ring_type == RING_GFX)1800ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;18011802/* Also pad secondary IBs. */1803if (cs->compute_ib.ib_mapped) {1804while (cs->compute_ib.rcs->current.cdw & ib_pad_dw_mask)1805radeon_emit(cs->compute_ib.rcs, PKT3_NOP_PAD);1806}1807break;1808case RING_UVD:1809case RING_UVD_ENC:1810while (rcs->current.cdw & ib_pad_dw_mask)1811radeon_emit(rcs, 0x80000000); /* type2 nop packet */1812break;1813case RING_VCN_JPEG:1814if (rcs->current.cdw % 2)1815assert(0);1816while (rcs->current.cdw & ib_pad_dw_mask) {1817radeon_emit(rcs, 0x60000000); /* nop packet */1818radeon_emit(rcs, 0x00000000);1819}1820break;1821case RING_VCN_DEC:1822while (rcs->current.cdw & ib_pad_dw_mask)1823radeon_emit(rcs, 0x81ff); /* nop packet */1824break;1825default:1826break;1827}18281829if (rcs->current.cdw > rcs->current.max_dw) {1830fprintf(stderr, "amdgpu: command stream overflowed\n");1831}18321833/* If the CS is not empty or overflowed.... */1834if (likely(radeon_emitted(rcs, 0) &&1835rcs->current.cdw <= rcs->current.max_dw &&1836!(flags & RADEON_FLUSH_NOOP))) {1837struct amdgpu_cs_context *cur = cs->csc;18381839/* Set IB sizes. */1840amdgpu_ib_finalize(ws, rcs, &cs->main);18411842if (cs->compute_ib.ib_mapped)1843amdgpu_ib_finalize(ws, cs->compute_ib.rcs, &cs->compute_ib);18441845/* Create a fence. */1846amdgpu_fence_reference(&cur->fence, NULL);1847if (cs->next_fence) {1848/* just move the reference */1849cur->fence = cs->next_fence;1850cs->next_fence = NULL;1851} else {1852cur->fence = amdgpu_fence_create(cs->ctx,1853cur->ib[IB_MAIN].ip_type,1854cur->ib[IB_MAIN].ip_instance,1855cur->ib[IB_MAIN].ring);1856}1857if (fence)1858amdgpu_fence_reference(fence, cur->fence);18591860amdgpu_cs_sync_flush(rcs);18611862/* Prepare buffers.1863*1864* This fence must be held until the submission is queued to ensure1865* that the order of fence dependency updates matches the order of1866* submissions.1867*/1868simple_mtx_lock(&ws->bo_fence_lock);1869amdgpu_add_fence_dependencies_bo_lists(cs);18701871/* Swap command streams. "cst" is going to be submitted. */1872cs->csc = cs->cst;1873cs->cst = cur;18741875/* Submit. */1876util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,1877amdgpu_cs_submit_ib, NULL, 0);18781879if (flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)1880cs->csc->secure = !cs->cst->secure;1881else1882cs->csc->secure = cs->cst->secure;18831884/* The submission has been queued, unlock the fence now. */1885simple_mtx_unlock(&ws->bo_fence_lock);18861887if (!(flags & PIPE_FLUSH_ASYNC)) {1888amdgpu_cs_sync_flush(rcs);1889error_code = cur->error_code;1890}1891} else {1892if (flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)1893cs->csc->secure = !cs->csc->secure;1894amdgpu_cs_context_cleanup(ws, cs->csc);1895}18961897memset(cs->csc->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));18981899amdgpu_get_new_ib(ws, rcs, &cs->main, cs);1900if (cs->compute_ib.ib_mapped)1901amdgpu_get_new_ib(ws, cs->compute_ib.rcs, &cs->compute_ib, cs);19021903if (cs->preamble_ib_bo) {1904amdgpu_cs_add_buffer(rcs, cs->preamble_ib_bo, RADEON_USAGE_READ, 0,1905RADEON_PRIO_IB1);1906}19071908rcs->used_gart_kb = 0;1909rcs->used_vram_kb = 0;19101911if (cs->ring_type == RING_GFX)1912ws->num_gfx_IBs++;1913else if (cs->ring_type == RING_DMA)1914ws->num_sdma_IBs++;19151916return error_code;1917}19181919static void amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)1920{1921struct amdgpu_cs *cs = amdgpu_cs(rcs);19221923if (!cs)1924return;19251926amdgpu_cs_sync_flush(rcs);1927util_queue_fence_destroy(&cs->flush_completed);1928p_atomic_dec(&cs->ws->num_cs);1929radeon_bo_reference(&cs->ws->dummy_ws.base, &cs->preamble_ib_bo, NULL);1930radeon_bo_reference(&cs->ws->dummy_ws.base, &cs->main.big_ib_buffer, NULL);1931FREE(rcs->prev);1932radeon_bo_reference(&cs->ws->dummy_ws.base, &cs->compute_ib.big_ib_buffer, NULL);1933if (cs->compute_ib.rcs)1934FREE(cs->compute_ib.rcs->prev);1935amdgpu_destroy_cs_context(cs->ws, &cs->csc1);1936amdgpu_destroy_cs_context(cs->ws, &cs->csc2);1937amdgpu_fence_reference(&cs->next_fence, NULL);1938FREE(cs);1939}19401941static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs,1942struct pb_buffer *_buf,1943enum radeon_bo_usage usage)1944{1945struct amdgpu_cs *cs = amdgpu_cs(rcs);1946struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;19471948return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);1949}19501951void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws)1952{1953ws->base.ctx_create = amdgpu_ctx_create;1954ws->base.ctx_destroy = amdgpu_ctx_destroy;1955ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;1956ws->base.cs_create = amdgpu_cs_create;1957ws->base.cs_add_parallel_compute_ib = amdgpu_cs_add_parallel_compute_ib;1958ws->base.cs_setup_preemption = amdgpu_cs_setup_preemption;1959ws->base.cs_destroy = amdgpu_cs_destroy;1960ws->base.cs_add_buffer = amdgpu_cs_add_buffer;1961ws->base.cs_validate = amdgpu_cs_validate;1962ws->base.cs_check_space = amdgpu_cs_check_space;1963ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;1964ws->base.cs_flush = amdgpu_cs_flush;1965ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;1966ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;1967ws->base.cs_sync_flush = amdgpu_cs_sync_flush;1968ws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;1969ws->base.cs_add_syncobj_signal = amdgpu_cs_add_syncobj_signal;1970ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;1971ws->base.fence_reference = amdgpu_fence_reference;1972ws->base.fence_import_syncobj = amdgpu_fence_import_syncobj;1973ws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;1974ws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;1975ws->base.export_signalled_sync_file = amdgpu_export_signalled_sync_file;1976}197719781979