Path: blob/21.2-virgl/src/gallium/drivers/r600/evergreen_hw_context.c
4570 views
/*1* Copyright 2010 Jerome Glisse <[email protected]>2*3* Permission is hereby granted, free of charge, to any person obtaining a4* copy of this software and associated documentation files (the "Software"),5* to deal in the Software without restriction, including without limitation6* on the rights to use, copy, modify, merge, publish, distribute, sub7* license, and/or sell copies of the Software, and to permit persons to whom8* the Software is furnished to do so, subject to the following conditions:9*10* The above copyright notice and this permission notice (including the next11* paragraph) shall be included in all copies or substantial portions of the12* Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,16* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL17* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,18* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR19* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE20* USE OR OTHER DEALINGS IN THE SOFTWARE.21*22* Authors:23* Jerome Glisse24*/25#include "r600_pipe.h"26#include "evergreend.h"27#include "util/u_memory.h"28#include "util/u_math.h"2930void evergreen_dma_copy_buffer(struct r600_context *rctx,31struct pipe_resource *dst,32struct pipe_resource *src,33uint64_t dst_offset,34uint64_t src_offset,35uint64_t size)36{37struct radeon_cmdbuf *cs = &rctx->b.dma.cs;38unsigned i, ncopy, csize, sub_cmd, shift;39struct r600_resource *rdst = (struct r600_resource*)dst;40struct r600_resource *rsrc = (struct r600_resource*)src;4142/* Mark the buffer range of destination as valid (initialized),43* so that transfer_map knows it should wait for the GPU when mapping44* that range. */45util_range_add(&rdst->b.b, &rdst->valid_buffer_range, dst_offset,46dst_offset + size);4748dst_offset += rdst->gpu_address;49src_offset += rsrc->gpu_address;5051/* see if we use dword or byte copy */52if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {53size >>= 2;54sub_cmd = EG_DMA_COPY_DWORD_ALIGNED;55shift = 2;56} else {57sub_cmd = EG_DMA_COPY_BYTE_ALIGNED;58shift = 0;59}60ncopy = (size / EG_DMA_COPY_MAX_SIZE) + !!(size % EG_DMA_COPY_MAX_SIZE);6162r600_need_dma_space(&rctx->b, ncopy * 5, rdst, rsrc);63for (i = 0; i < ncopy; i++) {64csize = size < EG_DMA_COPY_MAX_SIZE ? size : EG_DMA_COPY_MAX_SIZE;65/* emit reloc before writing cs so that cs is always in consistent state */66radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ, 0);67radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE, 0);68radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize));69radeon_emit(cs, dst_offset & 0xffffffff);70radeon_emit(cs, src_offset & 0xffffffff);71radeon_emit(cs, (dst_offset >> 32UL) & 0xff);72radeon_emit(cs, (src_offset >> 32UL) & 0xff);73dst_offset += csize << shift;74src_offset += csize << shift;75size -= csize;76}77}7879/* The max number of bytes to copy per packet. */80#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)8182void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,83struct pipe_resource *dst, uint64_t offset,84unsigned size, uint32_t clear_value,85enum r600_coherency coher)86{87struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;8889assert(size);90assert(rctx->screen->b.has_cp_dma);9192/* Mark the buffer range of destination as valid (initialized),93* so that transfer_map knows it should wait for the GPU when mapping94* that range. */95util_range_add(dst, &r600_resource(dst)->valid_buffer_range, offset,96offset + size);9798offset += r600_resource(dst)->gpu_address;99100/* Flush the cache where the resource is bound. */101rctx->b.flags |= r600_get_flush_flags(coher) |102R600_CONTEXT_WAIT_3D_IDLE;103104while (size) {105unsigned sync = 0;106unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);107unsigned reloc;108109r600_need_cs_space(rctx,11010 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +111R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);112113/* Flush the caches for the first copy only. */114if (rctx->b.flags) {115r600_flush_emit(rctx);116}117118/* Do the synchronization after the last copy, so that all data is written to memory. */119if (size == byte_count) {120sync = PKT3_CP_DMA_CP_SYNC;121}122123/* This must be done after r600_need_cs_space. */124reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,125(struct r600_resource*)dst, RADEON_USAGE_WRITE,126RADEON_PRIO_CP_DMA);127128radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));129radeon_emit(cs, clear_value); /* DATA [31:0] */130radeon_emit(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */131radeon_emit(cs, offset); /* DST_ADDR_LO [31:0] */132radeon_emit(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */133radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */134135radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));136radeon_emit(cs, reloc);137138size -= byte_count;139offset += byte_count;140}141142/* CP DMA is executed in ME, but index buffers are read by PFP.143* This ensures that ME (CP DMA) is idle before PFP starts fetching144* indices. If we wanted to execute CP DMA in PFP, this packet145* should precede it.146*/147if (coher == R600_COHERENCY_SHADER)148r600_emit_pfp_sync_me(rctx);149}150151152