Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/r600/evergreen_hw_context.c
4570 views
1
/*
2
* Copyright 2010 Jerome Glisse <[email protected]>
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*
23
* Authors:
24
* Jerome Glisse
25
*/
26
#include "r600_pipe.h"
27
#include "evergreend.h"
28
#include "util/u_memory.h"
29
#include "util/u_math.h"
30
31
void evergreen_dma_copy_buffer(struct r600_context *rctx,
32
struct pipe_resource *dst,
33
struct pipe_resource *src,
34
uint64_t dst_offset,
35
uint64_t src_offset,
36
uint64_t size)
37
{
38
struct radeon_cmdbuf *cs = &rctx->b.dma.cs;
39
unsigned i, ncopy, csize, sub_cmd, shift;
40
struct r600_resource *rdst = (struct r600_resource*)dst;
41
struct r600_resource *rsrc = (struct r600_resource*)src;
42
43
/* Mark the buffer range of destination as valid (initialized),
44
* so that transfer_map knows it should wait for the GPU when mapping
45
* that range. */
46
util_range_add(&rdst->b.b, &rdst->valid_buffer_range, dst_offset,
47
dst_offset + size);
48
49
dst_offset += rdst->gpu_address;
50
src_offset += rsrc->gpu_address;
51
52
/* see if we use dword or byte copy */
53
if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
54
size >>= 2;
55
sub_cmd = EG_DMA_COPY_DWORD_ALIGNED;
56
shift = 2;
57
} else {
58
sub_cmd = EG_DMA_COPY_BYTE_ALIGNED;
59
shift = 0;
60
}
61
ncopy = (size / EG_DMA_COPY_MAX_SIZE) + !!(size % EG_DMA_COPY_MAX_SIZE);
62
63
r600_need_dma_space(&rctx->b, ncopy * 5, rdst, rsrc);
64
for (i = 0; i < ncopy; i++) {
65
csize = size < EG_DMA_COPY_MAX_SIZE ? size : EG_DMA_COPY_MAX_SIZE;
66
/* emit reloc before writing cs so that cs is always in consistent state */
67
radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ, 0);
68
radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE, 0);
69
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize));
70
radeon_emit(cs, dst_offset & 0xffffffff);
71
radeon_emit(cs, src_offset & 0xffffffff);
72
radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
73
radeon_emit(cs, (src_offset >> 32UL) & 0xff);
74
dst_offset += csize << shift;
75
src_offset += csize << shift;
76
size -= csize;
77
}
78
}
79
80
/* The max number of bytes to copy per packet. */
81
#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
82
83
void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
84
struct pipe_resource *dst, uint64_t offset,
85
unsigned size, uint32_t clear_value,
86
enum r600_coherency coher)
87
{
88
struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
89
90
assert(size);
91
assert(rctx->screen->b.has_cp_dma);
92
93
/* Mark the buffer range of destination as valid (initialized),
94
* so that transfer_map knows it should wait for the GPU when mapping
95
* that range. */
96
util_range_add(dst, &r600_resource(dst)->valid_buffer_range, offset,
97
offset + size);
98
99
offset += r600_resource(dst)->gpu_address;
100
101
/* Flush the cache where the resource is bound. */
102
rctx->b.flags |= r600_get_flush_flags(coher) |
103
R600_CONTEXT_WAIT_3D_IDLE;
104
105
while (size) {
106
unsigned sync = 0;
107
unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
108
unsigned reloc;
109
110
r600_need_cs_space(rctx,
111
10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
112
R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);
113
114
/* Flush the caches for the first copy only. */
115
if (rctx->b.flags) {
116
r600_flush_emit(rctx);
117
}
118
119
/* Do the synchronization after the last copy, so that all data is written to memory. */
120
if (size == byte_count) {
121
sync = PKT3_CP_DMA_CP_SYNC;
122
}
123
124
/* This must be done after r600_need_cs_space. */
125
reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
126
(struct r600_resource*)dst, RADEON_USAGE_WRITE,
127
RADEON_PRIO_CP_DMA);
128
129
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
130
radeon_emit(cs, clear_value); /* DATA [31:0] */
131
radeon_emit(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
132
radeon_emit(cs, offset); /* DST_ADDR_LO [31:0] */
133
radeon_emit(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */
134
radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
135
136
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
137
radeon_emit(cs, reloc);
138
139
size -= byte_count;
140
offset += byte_count;
141
}
142
143
/* CP DMA is executed in ME, but index buffers are read by PFP.
144
* This ensures that ME (CP DMA) is idle before PFP starts fetching
145
* indices. If we wanted to execute CP DMA in PFP, this packet
146
* should precede it.
147
*/
148
if (coher == R600_COHERENCY_SHADER)
149
r600_emit_pfp_sync_me(rctx);
150
}
151
152