Path: blob/21.2-virgl/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
4566 views
/*1* Copyright © 2008 Jérôme Glisse2* Copyright © 2010 Marek Olšák <[email protected]>3* All Rights Reserved.4*5* Permission is hereby granted, free of charge, to any person obtaining6* a copy of this software and associated documentation files (the7* "Software"), to deal in the Software without restriction, including8* without limitation the rights to use, copy, modify, merge, publish,9* distribute, sub license, and/or sell copies of the Software, and to10* permit persons to whom the Software is furnished to do so, subject to11* the following conditions:12*13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,14* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES15* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND16* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS17* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER18* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,19* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE20* USE OR OTHER DEALINGS IN THE SOFTWARE.21*22* The above copyright notice and this permission notice (including the23* next paragraph) shall be included in all copies or substantial portions24* of the Software.25*/2627/*28This file replaces libdrm's radeon_cs_gem with our own implemention.29It's optimized specifically for Radeon DRM.30Adding buffers and space checking are faster and simpler than their31counterparts in libdrm (the time complexity of all the functions32is O(1) in nearly all scenarios, thanks to hashing).3334It works like this:3536cs_add_buffer(cs, buf, read_domain, write_domain) adds a new relocation and37also adds the size of 'buf' to the used_gart and used_vram winsys variables38based on the domains, which are simply or'd for the accounting purposes.39The adding is skipped if the reloc is already present in the list, but it40accounts any newly-referenced domains.4142cs_validate is then called, which just checks:43used_vram/gart < vram/gart_size * 0.844The 0.8 number allows for some memory fragmentation. If the validation45fails, the pipe driver flushes CS and tries do the validation again,46i.e. it validates only that one operation. If it fails again, it drops47the operation on the floor and prints some nasty message to stderr.48(done in the pipe driver)4950cs_write_reloc(cs, buf) just writes a reloc that has been added using51cs_add_buffer. The read_domain and write_domain parameters have been removed,52because we already specify them in cs_add_buffer.53*/5455#include "radeon_drm_cs.h"5657#include "util/u_memory.h"58#include "util/os_time.h"5960#include <stdio.h>61#include <stdlib.h>62#include <stdint.h>63#include <xf86drm.h>646566#define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))6768static struct pipe_fence_handle *radeon_cs_create_fence(struct radeon_cmdbuf *rcs);69static void radeon_fence_reference(struct pipe_fence_handle **dst,70struct pipe_fence_handle *src);7172static struct radeon_winsys_ctx *radeon_drm_ctx_create(struct radeon_winsys *ws)73{74struct radeon_ctx *ctx = CALLOC_STRUCT(radeon_ctx);75if (!ctx)76return NULL;7778ctx->ws = (struct radeon_drm_winsys*)ws;79ctx->gpu_reset_counter = radeon_drm_get_gpu_reset_counter(ctx->ws);80return (struct radeon_winsys_ctx*)ctx;81}8283static void radeon_drm_ctx_destroy(struct radeon_winsys_ctx *ctx)84{85FREE(ctx);86}8788static enum pipe_reset_status89radeon_drm_ctx_query_reset_status(struct radeon_winsys_ctx *rctx, bool full_reset_only,90bool *needs_reset)91{92struct radeon_ctx *ctx = (struct radeon_ctx*)rctx;9394unsigned latest = radeon_drm_get_gpu_reset_counter(ctx->ws);9596if (ctx->gpu_reset_counter == latest) {97if (needs_reset)98*needs_reset = false;99return PIPE_NO_RESET;100}101102if (needs_reset)103*needs_reset = true;104105ctx->gpu_reset_counter = latest;106return PIPE_UNKNOWN_CONTEXT_RESET;107}108109static bool radeon_init_cs_context(struct radeon_cs_context *csc,110struct radeon_drm_winsys *ws)111{112int i;113114csc->fd = ws->fd;115116csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;117csc->chunks[0].length_dw = 0;118csc->chunks[0].chunk_data = (uint64_t)(uintptr_t)csc->buf;119csc->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;120csc->chunks[1].length_dw = 0;121csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;122csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;123csc->chunks[2].length_dw = 2;124csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;125126csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];127csc->chunk_array[1] = (uint64_t)(uintptr_t)&csc->chunks[1];128csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];129130csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;131132for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {133csc->reloc_indices_hashlist[i] = -1;134}135return true;136}137138static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)139{140unsigned i;141142for (i = 0; i < csc->num_relocs; i++) {143p_atomic_dec(&csc->relocs_bo[i].bo->num_cs_references);144radeon_ws_bo_reference(&csc->relocs_bo[i].bo, NULL);145}146for (i = 0; i < csc->num_slab_buffers; ++i) {147p_atomic_dec(&csc->slab_buffers[i].bo->num_cs_references);148radeon_ws_bo_reference(&csc->slab_buffers[i].bo, NULL);149}150151csc->num_relocs = 0;152csc->num_validated_relocs = 0;153csc->num_slab_buffers = 0;154csc->chunks[0].length_dw = 0;155csc->chunks[1].length_dw = 0;156157for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {158csc->reloc_indices_hashlist[i] = -1;159}160}161162static void radeon_destroy_cs_context(struct radeon_cs_context *csc)163{164radeon_cs_context_cleanup(csc);165FREE(csc->slab_buffers);166FREE(csc->relocs_bo);167FREE(csc->relocs);168}169170171static bool172radeon_drm_cs_create(struct radeon_cmdbuf *rcs,173struct radeon_winsys_ctx *ctx,174enum ring_type ring_type,175void (*flush)(void *ctx, unsigned flags,176struct pipe_fence_handle **fence),177void *flush_ctx,178bool stop_exec_on_failure)179{180struct radeon_drm_winsys *ws = ((struct radeon_ctx*)ctx)->ws;181struct radeon_drm_cs *cs;182183cs = CALLOC_STRUCT(radeon_drm_cs);184if (!cs) {185return false;186}187util_queue_fence_init(&cs->flush_completed);188189cs->ws = ws;190cs->flush_cs = flush;191cs->flush_data = flush_ctx;192193if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {194FREE(cs);195return false;196}197if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {198radeon_destroy_cs_context(&cs->csc1);199FREE(cs);200return false;201}202203/* Set the first command buffer as current. */204cs->csc = &cs->csc1;205cs->cst = &cs->csc2;206cs->ring_type = ring_type;207208memset(rcs, 0, sizeof(*rcs));209rcs->current.buf = cs->csc->buf;210rcs->current.max_dw = ARRAY_SIZE(cs->csc->buf);211rcs->priv = cs;212213p_atomic_inc(&ws->num_cs);214return true;215}216217int radeon_lookup_buffer(struct radeon_cs_context *csc, struct radeon_bo *bo)218{219unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);220struct radeon_bo_item *buffers;221unsigned num_buffers;222int i = csc->reloc_indices_hashlist[hash];223224if (bo->handle) {225buffers = csc->relocs_bo;226num_buffers = csc->num_relocs;227} else {228buffers = csc->slab_buffers;229num_buffers = csc->num_slab_buffers;230}231232/* not found or found */233if (i == -1 || (i < num_buffers && buffers[i].bo == bo))234return i;235236/* Hash collision, look for the BO in the list of relocs linearly. */237for (i = num_buffers - 1; i >= 0; i--) {238if (buffers[i].bo == bo) {239/* Put this reloc in the hash list.240* This will prevent additional hash collisions if there are241* several consecutive lookup_buffer calls for the same buffer.242*243* Example: Assuming buffers A,B,C collide in the hash list,244* the following sequence of relocs:245* AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC246* will collide here: ^ and here: ^,247* meaning that we should get very few collisions in the end. */248csc->reloc_indices_hashlist[hash] = i;249return i;250}251}252return -1;253}254255static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,256struct radeon_bo *bo)257{258struct radeon_cs_context *csc = cs->csc;259struct drm_radeon_cs_reloc *reloc;260unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);261int i = -1;262263i = radeon_lookup_buffer(csc, bo);264265if (i >= 0) {266/* For async DMA, every add_buffer call must add a buffer to the list267* no matter how many duplicates there are. This is due to the fact268* the DMA CS checker doesn't use NOP packets for offset patching,269* but always uses the i-th buffer from the list to patch the i-th270* offset. If there are N offsets in a DMA CS, there must also be N271* buffers in the relocation list.272*273* This doesn't have to be done if virtual memory is enabled,274* because there is no offset patching with virtual memory.275*/276if (cs->ring_type != RING_DMA || cs->ws->info.r600_has_virtual_memory) {277return i;278}279}280281/* New relocation, check if the backing array is large enough. */282if (csc->num_relocs >= csc->max_relocs) {283uint32_t size;284csc->max_relocs = MAX2(csc->max_relocs + 16, (unsigned)(csc->max_relocs * 1.3));285286size = csc->max_relocs * sizeof(csc->relocs_bo[0]);287csc->relocs_bo = realloc(csc->relocs_bo, size);288289size = csc->max_relocs * sizeof(struct drm_radeon_cs_reloc);290csc->relocs = realloc(csc->relocs, size);291292csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;293}294295/* Initialize the new relocation. */296csc->relocs_bo[csc->num_relocs].bo = NULL;297csc->relocs_bo[csc->num_relocs].u.real.priority_usage = 0;298radeon_ws_bo_reference(&csc->relocs_bo[csc->num_relocs].bo, bo);299p_atomic_inc(&bo->num_cs_references);300reloc = &csc->relocs[csc->num_relocs];301reloc->handle = bo->handle;302reloc->read_domains = 0;303reloc->write_domain = 0;304reloc->flags = 0;305306csc->reloc_indices_hashlist[hash] = csc->num_relocs;307308csc->chunks[1].length_dw += RELOC_DWORDS;309310return csc->num_relocs++;311}312313static int radeon_lookup_or_add_slab_buffer(struct radeon_drm_cs *cs,314struct radeon_bo *bo)315{316struct radeon_cs_context *csc = cs->csc;317unsigned hash;318struct radeon_bo_item *item;319int idx;320int real_idx;321322idx = radeon_lookup_buffer(csc, bo);323if (idx >= 0)324return idx;325326real_idx = radeon_lookup_or_add_real_buffer(cs, bo->u.slab.real);327328/* Check if the backing array is large enough. */329if (csc->num_slab_buffers >= csc->max_slab_buffers) {330unsigned new_max = MAX2(csc->max_slab_buffers + 16,331(unsigned)(csc->max_slab_buffers * 1.3));332struct radeon_bo_item *new_buffers =333REALLOC(csc->slab_buffers,334csc->max_slab_buffers * sizeof(*new_buffers),335new_max * sizeof(*new_buffers));336if (!new_buffers) {337fprintf(stderr, "radeon_lookup_or_add_slab_buffer: allocation failure\n");338return -1;339}340341csc->max_slab_buffers = new_max;342csc->slab_buffers = new_buffers;343}344345/* Initialize the new relocation. */346idx = csc->num_slab_buffers++;347item = &csc->slab_buffers[idx];348349item->bo = NULL;350item->u.slab.real_idx = real_idx;351radeon_ws_bo_reference(&item->bo, bo);352p_atomic_inc(&bo->num_cs_references);353354hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);355csc->reloc_indices_hashlist[hash] = idx;356357return idx;358}359360static unsigned radeon_drm_cs_add_buffer(struct radeon_cmdbuf *rcs,361struct pb_buffer *buf,362enum radeon_bo_usage usage,363enum radeon_bo_domain domains,364enum radeon_bo_priority priority)365{366struct radeon_drm_cs *cs = radeon_drm_cs(rcs);367struct radeon_bo *bo = (struct radeon_bo*)buf;368enum radeon_bo_domain added_domains;369370/* If VRAM is just stolen system memory, allow both VRAM and371* GTT, whichever has free space. If a buffer is evicted from372* VRAM to GTT, it will stay there.373*/374if (!cs->ws->info.has_dedicated_vram)375domains |= RADEON_DOMAIN_GTT;376377enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;378enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;379struct drm_radeon_cs_reloc *reloc;380int index;381382if (!bo->handle) {383index = radeon_lookup_or_add_slab_buffer(cs, bo);384if (index < 0)385return 0;386387index = cs->csc->slab_buffers[index].u.slab.real_idx;388} else {389index = radeon_lookup_or_add_real_buffer(cs, bo);390}391392reloc = &cs->csc->relocs[index];393added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);394reloc->read_domains |= rd;395reloc->write_domain |= wd;396reloc->flags = MAX2(reloc->flags, priority);397cs->csc->relocs_bo[index].u.real.priority_usage |= 1u << priority;398399if (added_domains & RADEON_DOMAIN_VRAM)400rcs->used_vram_kb += bo->base.size / 1024;401else if (added_domains & RADEON_DOMAIN_GTT)402rcs->used_gart_kb += bo->base.size / 1024;403404return index;405}406407static int radeon_drm_cs_lookup_buffer(struct radeon_cmdbuf *rcs,408struct pb_buffer *buf)409{410struct radeon_drm_cs *cs = radeon_drm_cs(rcs);411412return radeon_lookup_buffer(cs->csc, (struct radeon_bo*)buf);413}414415static bool radeon_drm_cs_validate(struct radeon_cmdbuf *rcs)416{417struct radeon_drm_cs *cs = radeon_drm_cs(rcs);418bool status =419rcs->used_gart_kb < cs->ws->info.gart_size_kb * 0.8 &&420rcs->used_vram_kb < cs->ws->info.vram_size_kb * 0.8;421422if (status) {423cs->csc->num_validated_relocs = cs->csc->num_relocs;424} else {425/* Remove lately-added buffers. The validation failed with them426* and the CS is about to be flushed because of that. Keep only427* the already-validated buffers. */428unsigned i;429430for (i = cs->csc->num_validated_relocs; i < cs->csc->num_relocs; i++) {431p_atomic_dec(&cs->csc->relocs_bo[i].bo->num_cs_references);432radeon_ws_bo_reference(&cs->csc->relocs_bo[i].bo, NULL);433}434cs->csc->num_relocs = cs->csc->num_validated_relocs;435436/* Flush if there are any relocs. Clean up otherwise. */437if (cs->csc->num_relocs) {438cs->flush_cs(cs->flush_data,439RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);440} else {441radeon_cs_context_cleanup(cs->csc);442rcs->used_vram_kb = 0;443rcs->used_gart_kb = 0;444445assert(rcs->current.cdw == 0);446if (rcs->current.cdw != 0) {447fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);448}449}450}451return status;452}453454static bool radeon_drm_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,455bool force_chaining)456{457assert(rcs->current.cdw <= rcs->current.max_dw);458return rcs->current.max_dw - rcs->current.cdw >= dw;459}460461static unsigned radeon_drm_cs_get_buffer_list(struct radeon_cmdbuf *rcs,462struct radeon_bo_list_item *list)463{464struct radeon_drm_cs *cs = radeon_drm_cs(rcs);465int i;466467if (list) {468for (i = 0; i < cs->csc->num_relocs; i++) {469list[i].bo_size = cs->csc->relocs_bo[i].bo->base.size;470list[i].vm_address = cs->csc->relocs_bo[i].bo->va;471list[i].priority_usage = cs->csc->relocs_bo[i].u.real.priority_usage;472}473}474return cs->csc->num_relocs;475}476477void radeon_drm_cs_emit_ioctl_oneshot(void *job, void *gdata, int thread_index)478{479struct radeon_cs_context *csc = ((struct radeon_drm_cs*)job)->cst;480unsigned i;481int r;482483r = drmCommandWriteRead(csc->fd, DRM_RADEON_CS,484&csc->cs, sizeof(struct drm_radeon_cs));485if (r) {486if (r == -ENOMEM)487fprintf(stderr, "radeon: Not enough memory for command submission.\n");488else if (debug_get_bool_option("RADEON_DUMP_CS", false)) {489unsigned i;490491fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");492for (i = 0; i < csc->chunks[0].length_dw; i++) {493fprintf(stderr, "0x%08X\n", csc->buf[i]);494}495} else {496fprintf(stderr, "radeon: The kernel rejected CS, "497"see dmesg for more information (%i).\n", r);498}499}500501for (i = 0; i < csc->num_relocs; i++)502p_atomic_dec(&csc->relocs_bo[i].bo->num_active_ioctls);503for (i = 0; i < csc->num_slab_buffers; i++)504p_atomic_dec(&csc->slab_buffers[i].bo->num_active_ioctls);505506radeon_cs_context_cleanup(csc);507}508509/*510* Make sure previous submission of this cs are completed511*/512void radeon_drm_cs_sync_flush(struct radeon_cmdbuf *rcs)513{514struct radeon_drm_cs *cs = radeon_drm_cs(rcs);515516/* Wait for any pending ioctl of this CS to complete. */517if (util_queue_is_initialized(&cs->ws->cs_queue))518util_queue_fence_wait(&cs->flush_completed);519}520521/* Add the given fence to a slab buffer fence list.522*523* There is a potential race condition when bo participates in submissions on524* two or more threads simultaneously. Since we do not know which of the525* submissions will be sent to the GPU first, we have to keep the fences526* of all submissions.527*528* However, fences that belong to submissions that have already returned from529* their respective ioctl do not have to be kept, because we know that they530* will signal earlier.531*/532static void radeon_bo_slab_fence(struct radeon_bo *bo, struct radeon_bo *fence)533{534unsigned dst;535536assert(fence->num_cs_references);537538/* Cleanup older fences */539dst = 0;540for (unsigned src = 0; src < bo->u.slab.num_fences; ++src) {541if (bo->u.slab.fences[src]->num_cs_references) {542bo->u.slab.fences[dst] = bo->u.slab.fences[src];543dst++;544} else {545radeon_ws_bo_reference(&bo->u.slab.fences[src], NULL);546}547}548bo->u.slab.num_fences = dst;549550/* Check available space for the new fence */551if (bo->u.slab.num_fences >= bo->u.slab.max_fences) {552unsigned new_max_fences = bo->u.slab.max_fences + 1;553struct radeon_bo **new_fences = REALLOC(bo->u.slab.fences,554bo->u.slab.max_fences * sizeof(*new_fences),555new_max_fences * sizeof(*new_fences));556if (!new_fences) {557fprintf(stderr, "radeon_bo_slab_fence: allocation failure, dropping fence\n");558return;559}560561bo->u.slab.fences = new_fences;562bo->u.slab.max_fences = new_max_fences;563}564565/* Add the new fence */566bo->u.slab.fences[bo->u.slab.num_fences] = NULL;567radeon_ws_bo_reference(&bo->u.slab.fences[bo->u.slab.num_fences], fence);568bo->u.slab.num_fences++;569}570571static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,572unsigned flags,573struct pipe_fence_handle **pfence)574{575struct radeon_drm_cs *cs = radeon_drm_cs(rcs);576struct radeon_cs_context *tmp;577578switch (cs->ring_type) {579case RING_DMA:580/* pad DMA ring to 8 DWs */581if (cs->ws->info.chip_class <= GFX6) {582while (rcs->current.cdw & 7)583radeon_emit(rcs, 0xf0000000); /* NOP packet */584} else {585while (rcs->current.cdw & 7)586radeon_emit(rcs, 0x00000000); /* NOP packet */587}588break;589case RING_GFX:590/* pad GFX ring to 8 DWs to meet CP fetch alignment requirements591* r6xx, requires at least 4 dw alignment to avoid a hw bug.592*/593if (cs->ws->info.gfx_ib_pad_with_type2) {594while (rcs->current.cdw & 7)595radeon_emit(rcs, 0x80000000); /* type2 nop packet */596} else {597while (rcs->current.cdw & 7)598radeon_emit(rcs, 0xffff1000); /* type3 nop packet */599}600break;601case RING_UVD:602while (rcs->current.cdw & 15)603radeon_emit(rcs, 0x80000000); /* type2 nop packet */604break;605default:606break;607}608609if (rcs->current.cdw > rcs->current.max_dw) {610fprintf(stderr, "radeon: command stream overflowed\n");611}612613if (pfence || cs->csc->num_slab_buffers) {614struct pipe_fence_handle *fence;615616if (cs->next_fence) {617fence = cs->next_fence;618cs->next_fence = NULL;619} else {620fence = radeon_cs_create_fence(rcs);621}622623if (fence) {624if (pfence)625radeon_fence_reference(pfence, fence);626627mtx_lock(&cs->ws->bo_fence_lock);628for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {629struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;630p_atomic_inc(&bo->num_active_ioctls);631radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);632}633mtx_unlock(&cs->ws->bo_fence_lock);634635radeon_fence_reference(&fence, NULL);636}637} else {638radeon_fence_reference(&cs->next_fence, NULL);639}640641radeon_drm_cs_sync_flush(rcs);642643/* Swap command streams. */644tmp = cs->csc;645cs->csc = cs->cst;646cs->cst = tmp;647648/* If the CS is not empty or overflowed, emit it in a separate thread. */649if (rcs->current.cdw && rcs->current.cdw <= rcs->current.max_dw &&650!cs->ws->noop_cs && !(flags & RADEON_FLUSH_NOOP)) {651unsigned i, num_relocs;652653num_relocs = cs->cst->num_relocs;654655cs->cst->chunks[0].length_dw = rcs->current.cdw;656657for (i = 0; i < num_relocs; i++) {658/* Update the number of active asynchronous CS ioctls for the buffer. */659p_atomic_inc(&cs->cst->relocs_bo[i].bo->num_active_ioctls);660}661662switch (cs->ring_type) {663case RING_DMA:664cs->cst->flags[0] = 0;665cs->cst->flags[1] = RADEON_CS_RING_DMA;666cs->cst->cs.num_chunks = 3;667if (cs->ws->info.r600_has_virtual_memory) {668cs->cst->flags[0] |= RADEON_CS_USE_VM;669}670break;671672case RING_UVD:673cs->cst->flags[0] = 0;674cs->cst->flags[1] = RADEON_CS_RING_UVD;675cs->cst->cs.num_chunks = 3;676break;677678case RING_VCE:679cs->cst->flags[0] = 0;680cs->cst->flags[1] = RADEON_CS_RING_VCE;681cs->cst->cs.num_chunks = 3;682break;683684default:685case RING_GFX:686case RING_COMPUTE:687cs->cst->flags[0] = RADEON_CS_KEEP_TILING_FLAGS;688cs->cst->flags[1] = RADEON_CS_RING_GFX;689cs->cst->cs.num_chunks = 3;690691if (cs->ws->info.r600_has_virtual_memory) {692cs->cst->flags[0] |= RADEON_CS_USE_VM;693cs->cst->cs.num_chunks = 3;694}695if (flags & PIPE_FLUSH_END_OF_FRAME) {696cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;697cs->cst->cs.num_chunks = 3;698}699if (cs->ring_type == RING_COMPUTE) {700cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;701cs->cst->cs.num_chunks = 3;702}703break;704}705706if (util_queue_is_initialized(&cs->ws->cs_queue)) {707util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,708radeon_drm_cs_emit_ioctl_oneshot, NULL, 0);709if (!(flags & PIPE_FLUSH_ASYNC))710radeon_drm_cs_sync_flush(rcs);711} else {712radeon_drm_cs_emit_ioctl_oneshot(cs, NULL, 0);713}714} else {715radeon_cs_context_cleanup(cs->cst);716}717718/* Prepare a new CS. */719rcs->current.buf = cs->csc->buf;720rcs->current.cdw = 0;721rcs->used_vram_kb = 0;722rcs->used_gart_kb = 0;723724if (cs->ring_type == RING_GFX)725cs->ws->num_gfx_IBs++;726else if (cs->ring_type == RING_DMA)727cs->ws->num_sdma_IBs++;728return 0;729}730731static void radeon_drm_cs_destroy(struct radeon_cmdbuf *rcs)732{733struct radeon_drm_cs *cs = radeon_drm_cs(rcs);734735if (!cs)736return;737738radeon_drm_cs_sync_flush(rcs);739util_queue_fence_destroy(&cs->flush_completed);740radeon_cs_context_cleanup(&cs->csc1);741radeon_cs_context_cleanup(&cs->csc2);742p_atomic_dec(&cs->ws->num_cs);743radeon_destroy_cs_context(&cs->csc1);744radeon_destroy_cs_context(&cs->csc2);745radeon_fence_reference(&cs->next_fence, NULL);746FREE(cs);747}748749static bool radeon_bo_is_referenced(struct radeon_cmdbuf *rcs,750struct pb_buffer *_buf,751enum radeon_bo_usage usage)752{753struct radeon_drm_cs *cs = radeon_drm_cs(rcs);754struct radeon_bo *bo = (struct radeon_bo*)_buf;755int index;756757if (!bo->num_cs_references)758return false;759760index = radeon_lookup_buffer(cs->csc, bo);761if (index == -1)762return false;763764if (!bo->handle)765index = cs->csc->slab_buffers[index].u.slab.real_idx;766767if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)768return true;769if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)770return true;771772return false;773}774775/* FENCES */776777static struct pipe_fence_handle *radeon_cs_create_fence(struct radeon_cmdbuf *rcs)778{779struct radeon_drm_cs *cs = radeon_drm_cs(rcs);780struct pb_buffer *fence;781782/* Create a fence, which is a dummy BO. */783fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1,784RADEON_DOMAIN_GTT,785RADEON_FLAG_NO_SUBALLOC786| RADEON_FLAG_NO_INTERPROCESS_SHARING);787if (!fence)788return NULL;789790/* Add the fence as a dummy relocation. */791cs->ws->base.cs_add_buffer(rcs, fence,792RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,793RADEON_PRIO_FENCE);794return (struct pipe_fence_handle*)fence;795}796797static bool radeon_fence_wait(struct radeon_winsys *ws,798struct pipe_fence_handle *fence,799uint64_t timeout)800{801return ws->buffer_wait(ws, (struct pb_buffer*)fence, timeout,802RADEON_USAGE_READWRITE);803}804805static void radeon_fence_reference(struct pipe_fence_handle **dst,806struct pipe_fence_handle *src)807{808pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);809}810811static struct pipe_fence_handle *radeon_drm_cs_get_next_fence(struct radeon_cmdbuf *rcs)812{813struct radeon_drm_cs *cs = radeon_drm_cs(rcs);814struct pipe_fence_handle *fence = NULL;815816if (cs->next_fence) {817radeon_fence_reference(&fence, cs->next_fence);818return fence;819}820821fence = radeon_cs_create_fence(rcs);822if (!fence)823return NULL;824825radeon_fence_reference(&cs->next_fence, fence);826return fence;827}828829static void830radeon_drm_cs_add_fence_dependency(struct radeon_cmdbuf *cs,831struct pipe_fence_handle *fence,832unsigned dependency_flags)833{834/* TODO: Handle the following unlikely multi-threaded scenario:835*836* Thread 1 / Context 1 Thread 2 / Context 2837* -------------------- --------------------838* f = cs_get_next_fence()839* cs_add_fence_dependency(f)840* cs_flush()841* cs_flush()842*843* We currently assume that this does not happen because we don't support844* asynchronous flushes on Radeon.845*/846}847848void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)849{850ws->base.ctx_create = radeon_drm_ctx_create;851ws->base.ctx_destroy = radeon_drm_ctx_destroy;852ws->base.ctx_query_reset_status = radeon_drm_ctx_query_reset_status;853ws->base.cs_create = radeon_drm_cs_create;854ws->base.cs_destroy = radeon_drm_cs_destroy;855ws->base.cs_add_buffer = radeon_drm_cs_add_buffer;856ws->base.cs_lookup_buffer = radeon_drm_cs_lookup_buffer;857ws->base.cs_validate = radeon_drm_cs_validate;858ws->base.cs_check_space = radeon_drm_cs_check_space;859ws->base.cs_get_buffer_list = radeon_drm_cs_get_buffer_list;860ws->base.cs_flush = radeon_drm_cs_flush;861ws->base.cs_get_next_fence = radeon_drm_cs_get_next_fence;862ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;863ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;864ws->base.cs_add_fence_dependency = radeon_drm_cs_add_fence_dependency;865ws->base.fence_wait = radeon_fence_wait;866ws->base.fence_reference = radeon_fence_reference;867}868869870