Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/intel/vulkan/genX_gpu_memcpy.c
4547 views
1
/*
2
* Copyright © 2016 Intel Corporation
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
* IN THE SOFTWARE.
22
*/
23
24
#include "anv_private.h"
25
26
#include "genxml/gen_macros.h"
27
#include "genxml/genX_pack.h"
28
29
#include "common/intel_l3_config.h"
30
31
/**
32
* This file implements some lightweight memcpy/memset operations on the GPU
33
* using a vertex buffer and streamout.
34
*/
35
36
/**
37
* Returns the greatest common divisor of a and b that is a power of two.
38
*/
39
static uint64_t
40
gcd_pow2_u64(uint64_t a, uint64_t b)
41
{
42
assert(a > 0 || b > 0);
43
44
unsigned a_log2 = ffsll(a) - 1;
45
unsigned b_log2 = ffsll(b) - 1;
46
47
/* If either a or b is 0, then a_log2 or b_log2 will be UINT_MAX in which
48
* case, the MIN2() will take the other one. If both are 0 then we will
49
* hit the assert above.
50
*/
51
return 1 << MIN2(a_log2, b_log2);
52
}
53
54
void
55
genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
56
struct anv_address dst, struct anv_address src,
57
uint32_t size)
58
{
59
if (size == 0)
60
return;
61
62
/* The maximum copy block size is 4 32-bit components at a time. */
63
assert(size % 4 == 0);
64
unsigned bs = gcd_pow2_u64(16, size);
65
66
enum isl_format format;
67
switch (bs) {
68
case 4: format = ISL_FORMAT_R32_UINT; break;
69
case 8: format = ISL_FORMAT_R32G32_UINT; break;
70
case 16: format = ISL_FORMAT_R32G32B32A32_UINT; break;
71
default:
72
unreachable("Invalid size");
73
}
74
75
if (!cmd_buffer->state.current_l3_config) {
76
const struct intel_l3_config *cfg =
77
intel_get_default_l3_config(&cmd_buffer->device->info);
78
genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
79
}
80
81
genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(cmd_buffer, 32, src, size);
82
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
83
84
genX(flush_pipeline_select_3d)(cmd_buffer);
85
86
uint32_t *dw;
87
dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_VERTEX_BUFFERS));
88
GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, dw + 1,
89
&(struct GENX(VERTEX_BUFFER_STATE)) {
90
.VertexBufferIndex = 32, /* Reserved for this */
91
.AddressModifyEnable = true,
92
.BufferStartingAddress = src,
93
.BufferPitch = bs,
94
.MOCS = anv_mocs(cmd_buffer->device, src.bo, 0),
95
#if GFX_VER >= 12
96
.L3BypassDisable = true,
97
#endif
98
#if (GFX_VER >= 8)
99
.BufferSize = size,
100
#else
101
.EndAddress = anv_address_add(src, size - 1),
102
#endif
103
});
104
105
dw = anv_batch_emitn(&cmd_buffer->batch, 3, GENX(3DSTATE_VERTEX_ELEMENTS));
106
GENX(VERTEX_ELEMENT_STATE_pack)(&cmd_buffer->batch, dw + 1,
107
&(struct GENX(VERTEX_ELEMENT_STATE)) {
108
.VertexBufferIndex = 32,
109
.Valid = true,
110
.SourceElementFormat = format,
111
.SourceElementOffset = 0,
112
.Component0Control = (bs >= 4) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
113
.Component1Control = (bs >= 8) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
114
.Component2Control = (bs >= 12) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
115
.Component3Control = (bs >= 16) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
116
});
117
118
#if GFX_VER >= 8
119
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
120
vfi.InstancingEnable = false;
121
vfi.VertexElementIndex = 0;
122
}
123
#endif
124
125
#if GFX_VER >= 8
126
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_SGVS), sgvs);
127
#endif
128
129
/* Disable all shader stages */
130
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VS), vs);
131
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HS), hs);
132
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_TE), te);
133
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DS), DS);
134
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_GS), gs);
135
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_PS), gs);
136
137
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SBE), sbe) {
138
sbe.VertexURBEntryReadOffset = 1;
139
sbe.NumberofSFOutputAttributes = 1;
140
sbe.VertexURBEntryReadLength = 1;
141
#if GFX_VER >= 8
142
sbe.ForceVertexURBEntryReadLength = true;
143
sbe.ForceVertexURBEntryReadOffset = true;
144
#endif
145
146
#if GFX_VER >= 9
147
for (unsigned i = 0; i < 32; i++)
148
sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
149
#endif
150
}
151
152
/* Emit URB setup. We tell it that the VS is active because we want it to
153
* allocate space for the VS. Even though one isn't run, we need VUEs to
154
* store the data that VF is going to pass to SOL.
155
*/
156
const unsigned entry_size[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 };
157
158
genX(emit_urb_setup)(cmd_buffer->device, &cmd_buffer->batch,
159
cmd_buffer->state.current_l3_config,
160
VK_SHADER_STAGE_VERTEX_BIT, entry_size, NULL);
161
162
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
163
#if GFX_VER < 12
164
sob.SOBufferIndex = 0;
165
#else
166
sob._3DCommandOpcode = 0;
167
sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD;
168
#endif
169
sob.MOCS = anv_mocs(cmd_buffer->device, dst.bo, 0),
170
sob.SurfaceBaseAddress = dst;
171
172
#if GFX_VER >= 8
173
sob.SOBufferEnable = true;
174
sob.SurfaceSize = size / 4 - 1;
175
#else
176
sob.SurfacePitch = bs;
177
sob.SurfaceEndAddress = anv_address_add(dst, size);
178
#endif
179
180
#if GFX_VER >= 8
181
/* As SOL writes out data, it updates the SO_WRITE_OFFSET registers with
182
* the end position of the stream. We need to reset this value to 0 at
183
* the beginning of the run or else SOL will start at the offset from
184
* the previous draw.
185
*/
186
sob.StreamOffsetWriteEnable = true;
187
sob.StreamOffset = 0;
188
#endif
189
}
190
191
#if GFX_VER <= 7
192
/* The hardware can do this for us on BDW+ (see above) */
193
anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), load) {
194
load.RegisterOffset = GENX(SO_WRITE_OFFSET0_num);
195
load.DataDWord = 0;
196
}
197
#endif
198
199
dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_SO_DECL_LIST),
200
.StreamtoBufferSelects0 = (1 << 0),
201
.NumEntries0 = 1);
202
GENX(SO_DECL_ENTRY_pack)(&cmd_buffer->batch, dw + 3,
203
&(struct GENX(SO_DECL_ENTRY)) {
204
.Stream0Decl = {
205
.OutputBufferSlot = 0,
206
.RegisterIndex = 0,
207
.ComponentMask = (1 << (bs / 4)) - 1,
208
},
209
});
210
211
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STREAMOUT), so) {
212
so.SOFunctionEnable = true;
213
so.RenderingDisable = true;
214
so.Stream0VertexReadOffset = 0;
215
so.Stream0VertexReadLength = DIV_ROUND_UP(32, 64);
216
#if GFX_VER >= 8
217
so.Buffer0SurfacePitch = bs;
218
#else
219
so.SOBufferEnable0 = true;
220
#endif
221
}
222
223
#if GFX_VER >= 8
224
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
225
topo.PrimitiveTopologyType = _3DPRIM_POINTLIST;
226
}
227
#endif
228
229
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_STATISTICS), vf) {
230
vf.StatisticsEnable = false;
231
}
232
233
#if GFX_VER >= 12
234
/* Disable Primitive Replication. */
235
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_PRIMITIVE_REPLICATION), pr);
236
#endif
237
238
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
239
prim.VertexAccessType = SEQUENTIAL;
240
prim.PrimitiveTopologyType = _3DPRIM_POINTLIST;
241
prim.VertexCountPerInstance = size / bs;
242
prim.StartVertexLocation = 0;
243
prim.InstanceCount = 1;
244
prim.StartInstanceLocation = 0;
245
prim.BaseVertexLocation = 0;
246
}
247
248
genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(cmd_buffer, SEQUENTIAL,
249
1ull << 32);
250
251
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
252
}
253
254