Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/virgl/virgl_encode.c
4570 views
1
/*
2
* Copyright 2014, 2015 Red Hat.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* on the rights to use, copy, modify, merge, publish, distribute, sub
8
* license, and/or sell copies of the Software, and to permit persons to whom
9
* the Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
13
* Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21
* USE OR OTHER DEALINGS IN THE SOFTWARE.
22
*/
23
#include <stdint.h>
24
#include <assert.h>
25
#include <string.h>
26
27
#include "util/format/u_format.h"
28
#include "util/u_memory.h"
29
#include "util/u_math.h"
30
#include "pipe/p_state.h"
31
#include "tgsi/tgsi_dump.h"
32
#include "tgsi/tgsi_parse.h"
33
34
#include "virgl_context.h"
35
#include "virgl_encode.h"
36
#include "virtio-gpu/virgl_protocol.h"
37
#include "virgl_resource.h"
38
#include "virgl_screen.h"
39
40
#define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
41
42
#define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
43
44
static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
45
CONV_FORMAT(B8G8R8A8_UNORM)
46
CONV_FORMAT(B8G8R8X8_UNORM)
47
CONV_FORMAT(A8R8G8B8_UNORM)
48
CONV_FORMAT(X8R8G8B8_UNORM)
49
CONV_FORMAT(B5G5R5A1_UNORM)
50
CONV_FORMAT(B4G4R4A4_UNORM)
51
CONV_FORMAT(B5G6R5_UNORM)
52
CONV_FORMAT(R10G10B10A2_UNORM)
53
CONV_FORMAT(L8_UNORM)
54
CONV_FORMAT(A8_UNORM)
55
CONV_FORMAT(L8A8_UNORM)
56
CONV_FORMAT(L16_UNORM)
57
CONV_FORMAT(Z16_UNORM)
58
CONV_FORMAT(Z32_UNORM)
59
CONV_FORMAT(Z32_FLOAT)
60
CONV_FORMAT(Z24_UNORM_S8_UINT)
61
CONV_FORMAT(S8_UINT_Z24_UNORM)
62
CONV_FORMAT(Z24X8_UNORM)
63
CONV_FORMAT(X8Z24_UNORM)
64
CONV_FORMAT(S8_UINT)
65
CONV_FORMAT(R64_FLOAT)
66
CONV_FORMAT(R64G64_FLOAT)
67
CONV_FORMAT(R64G64B64_FLOAT)
68
CONV_FORMAT(R64G64B64A64_FLOAT)
69
CONV_FORMAT(R32_FLOAT)
70
CONV_FORMAT(R32G32_FLOAT)
71
CONV_FORMAT(R32G32B32_FLOAT)
72
CONV_FORMAT(R32G32B32A32_FLOAT)
73
CONV_FORMAT(R32_UNORM)
74
CONV_FORMAT(R32G32_UNORM)
75
CONV_FORMAT(R32G32B32_UNORM)
76
CONV_FORMAT(R32G32B32A32_UNORM)
77
CONV_FORMAT(R32_USCALED)
78
CONV_FORMAT(R32G32_USCALED)
79
CONV_FORMAT(R32G32B32_USCALED)
80
CONV_FORMAT(R32G32B32A32_USCALED)
81
CONV_FORMAT(R32_SNORM)
82
CONV_FORMAT(R32G32_SNORM)
83
CONV_FORMAT(R32G32B32_SNORM)
84
CONV_FORMAT(R32G32B32A32_SNORM)
85
CONV_FORMAT(R32_SSCALED)
86
CONV_FORMAT(R32G32_SSCALED)
87
CONV_FORMAT(R32G32B32_SSCALED)
88
CONV_FORMAT(R32G32B32A32_SSCALED)
89
CONV_FORMAT(R16_UNORM)
90
CONV_FORMAT(R16G16_UNORM)
91
CONV_FORMAT(R16G16B16_UNORM)
92
CONV_FORMAT(R16G16B16A16_UNORM)
93
CONV_FORMAT(R16_USCALED)
94
CONV_FORMAT(R16G16_USCALED)
95
CONV_FORMAT(R16G16B16_USCALED)
96
CONV_FORMAT(R16G16B16A16_USCALED)
97
CONV_FORMAT(R16_SNORM)
98
CONV_FORMAT(R16G16_SNORM)
99
CONV_FORMAT(R16G16B16_SNORM)
100
CONV_FORMAT(R16G16B16A16_SNORM)
101
CONV_FORMAT(R16_SSCALED)
102
CONV_FORMAT(R16G16_SSCALED)
103
CONV_FORMAT(R16G16B16_SSCALED)
104
CONV_FORMAT(R16G16B16A16_SSCALED)
105
CONV_FORMAT(R8_UNORM)
106
CONV_FORMAT(R8G8_UNORM)
107
CONV_FORMAT(R8G8B8_UNORM)
108
CONV_FORMAT(R8G8B8A8_UNORM)
109
CONV_FORMAT(R8_USCALED)
110
CONV_FORMAT(R8G8_USCALED)
111
CONV_FORMAT(R8G8B8_USCALED)
112
CONV_FORMAT(R8G8B8A8_USCALED)
113
CONV_FORMAT(R8_SNORM)
114
CONV_FORMAT(R8G8_SNORM)
115
CONV_FORMAT(R8G8B8_SNORM)
116
CONV_FORMAT(R8G8B8A8_SNORM)
117
CONV_FORMAT(R8_SSCALED)
118
CONV_FORMAT(R8G8_SSCALED)
119
CONV_FORMAT(R8G8B8_SSCALED)
120
CONV_FORMAT(R8G8B8A8_SSCALED)
121
CONV_FORMAT(R16_FLOAT)
122
CONV_FORMAT(R16G16_FLOAT)
123
CONV_FORMAT(R16G16B16_FLOAT)
124
CONV_FORMAT(R16G16B16A16_FLOAT)
125
CONV_FORMAT(L8_SRGB)
126
CONV_FORMAT(L8A8_SRGB)
127
CONV_FORMAT(R8G8B8_SRGB)
128
CONV_FORMAT(A8B8G8R8_SRGB)
129
CONV_FORMAT(X8B8G8R8_SRGB)
130
CONV_FORMAT(B8G8R8A8_SRGB)
131
CONV_FORMAT(B8G8R8X8_SRGB)
132
CONV_FORMAT(A8R8G8B8_SRGB)
133
CONV_FORMAT(X8R8G8B8_SRGB)
134
CONV_FORMAT(R8G8B8A8_SRGB)
135
CONV_FORMAT(DXT1_RGB)
136
CONV_FORMAT(DXT1_RGBA)
137
CONV_FORMAT(DXT3_RGBA)
138
CONV_FORMAT(DXT5_RGBA)
139
CONV_FORMAT(DXT1_SRGB)
140
CONV_FORMAT(DXT1_SRGBA)
141
CONV_FORMAT(DXT3_SRGBA)
142
CONV_FORMAT(DXT5_SRGBA)
143
CONV_FORMAT(RGTC1_UNORM)
144
CONV_FORMAT(RGTC1_SNORM)
145
CONV_FORMAT(RGTC2_UNORM)
146
CONV_FORMAT(RGTC2_SNORM)
147
CONV_FORMAT(A8B8G8R8_UNORM)
148
CONV_FORMAT(B5G5R5X1_UNORM)
149
CONV_FORMAT(R10G10B10A2_USCALED)
150
CONV_FORMAT(R11G11B10_FLOAT)
151
CONV_FORMAT(R9G9B9E5_FLOAT)
152
CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
153
CONV_FORMAT(B10G10R10A2_UNORM)
154
CONV_FORMAT(R8G8B8X8_UNORM)
155
CONV_FORMAT(B4G4R4X4_UNORM)
156
CONV_FORMAT(X24S8_UINT)
157
CONV_FORMAT(S8X24_UINT)
158
CONV_FORMAT(X32_S8X24_UINT)
159
CONV_FORMAT(B2G3R3_UNORM)
160
CONV_FORMAT(L16A16_UNORM)
161
CONV_FORMAT(A16_UNORM)
162
CONV_FORMAT(I16_UNORM)
163
CONV_FORMAT(LATC1_UNORM)
164
CONV_FORMAT(LATC1_SNORM)
165
CONV_FORMAT(LATC2_UNORM)
166
CONV_FORMAT(LATC2_SNORM)
167
CONV_FORMAT(A8_SNORM)
168
CONV_FORMAT(L8_SNORM)
169
CONV_FORMAT(L8A8_SNORM)
170
CONV_FORMAT(A16_SNORM)
171
CONV_FORMAT(L16_SNORM)
172
CONV_FORMAT(L16A16_SNORM)
173
CONV_FORMAT(A16_FLOAT)
174
CONV_FORMAT(L16_FLOAT)
175
CONV_FORMAT(L16A16_FLOAT)
176
CONV_FORMAT(A32_FLOAT)
177
CONV_FORMAT(L32_FLOAT)
178
CONV_FORMAT(L32A32_FLOAT)
179
CONV_FORMAT(YV12)
180
CONV_FORMAT(YV16)
181
CONV_FORMAT(IYUV)
182
CONV_FORMAT(NV12)
183
CONV_FORMAT(NV21)
184
CONV_FORMAT(R8_UINT)
185
CONV_FORMAT(R8G8_UINT)
186
CONV_FORMAT(R8G8B8_UINT)
187
CONV_FORMAT(R8G8B8A8_UINT)
188
CONV_FORMAT(R8_SINT)
189
CONV_FORMAT(R8G8_SINT)
190
CONV_FORMAT(R8G8B8_SINT)
191
CONV_FORMAT(R8G8B8A8_SINT)
192
CONV_FORMAT(R16_UINT)
193
CONV_FORMAT(R16G16_UINT)
194
CONV_FORMAT(R16G16B16_UINT)
195
CONV_FORMAT(R16G16B16A16_UINT)
196
CONV_FORMAT(R16_SINT)
197
CONV_FORMAT(R16G16_SINT)
198
CONV_FORMAT(R16G16B16_SINT)
199
CONV_FORMAT(R16G16B16A16_SINT)
200
CONV_FORMAT(R32_UINT)
201
CONV_FORMAT(R32G32_UINT)
202
CONV_FORMAT(R32G32B32_UINT)
203
CONV_FORMAT(R32G32B32A32_UINT)
204
CONV_FORMAT(R32_SINT)
205
CONV_FORMAT(R32G32_SINT)
206
CONV_FORMAT(R32G32B32_SINT)
207
CONV_FORMAT(R32G32B32A32_SINT)
208
CONV_FORMAT(A8_UINT)
209
CONV_FORMAT(L8_UINT)
210
CONV_FORMAT(L8A8_UINT)
211
CONV_FORMAT(A8_SINT)
212
CONV_FORMAT(L8_SINT)
213
CONV_FORMAT(L8A8_SINT)
214
CONV_FORMAT(A16_UINT)
215
CONV_FORMAT(L16_UINT)
216
CONV_FORMAT(L16A16_UINT)
217
CONV_FORMAT(A16_SINT)
218
CONV_FORMAT(L16_SINT)
219
CONV_FORMAT(L16A16_SINT)
220
CONV_FORMAT(A32_UINT)
221
CONV_FORMAT(L32_UINT)
222
CONV_FORMAT(L32A32_UINT)
223
CONV_FORMAT(A32_SINT)
224
CONV_FORMAT(L32_SINT)
225
CONV_FORMAT(L32A32_SINT)
226
CONV_FORMAT(R10G10B10A2_SSCALED)
227
CONV_FORMAT(R10G10B10A2_SNORM)
228
CONV_FORMAT(B10G10R10A2_SNORM)
229
CONV_FORMAT(B10G10R10A2_UINT)
230
CONV_FORMAT(R8G8B8X8_SNORM)
231
CONV_FORMAT(R8G8B8X8_SRGB)
232
CONV_FORMAT(R8G8B8X8_UINT)
233
CONV_FORMAT(R8G8B8X8_SINT)
234
CONV_FORMAT(B10G10R10X2_UNORM)
235
CONV_FORMAT(R16G16B16X16_UNORM)
236
CONV_FORMAT(R16G16B16X16_SNORM)
237
CONV_FORMAT(R16G16B16X16_FLOAT)
238
CONV_FORMAT(R16G16B16X16_UINT)
239
CONV_FORMAT(R16G16B16X16_SINT)
240
CONV_FORMAT(R32G32B32X32_FLOAT)
241
CONV_FORMAT(R32G32B32X32_UINT)
242
CONV_FORMAT(R32G32B32X32_SINT)
243
CONV_FORMAT(R10G10B10A2_UINT)
244
CONV_FORMAT(BPTC_RGBA_UNORM)
245
CONV_FORMAT(BPTC_SRGBA)
246
CONV_FORMAT(BPTC_RGB_FLOAT)
247
CONV_FORMAT(BPTC_RGB_UFLOAT)
248
CONV_FORMAT(R10G10B10X2_UNORM)
249
CONV_FORMAT(A4B4G4R4_UNORM)
250
CONV_FORMAT(R8_SRGB)
251
CONV_FORMAT(R8G8_SRGB)
252
CONV_FORMAT(ETC1_RGB8)
253
CONV_FORMAT(ETC2_RGB8)
254
CONV_FORMAT(ETC2_SRGB8)
255
CONV_FORMAT(ETC2_RGB8A1)
256
CONV_FORMAT(ETC2_SRGB8A1)
257
CONV_FORMAT(ETC2_RGBA8)
258
CONV_FORMAT(ETC2_SRGBA8)
259
CONV_FORMAT(ETC2_R11_UNORM)
260
CONV_FORMAT(ETC2_R11_SNORM)
261
CONV_FORMAT(ETC2_RG11_UNORM)
262
CONV_FORMAT(ETC2_RG11_SNORM)
263
CONV_FORMAT(ASTC_4x4)
264
CONV_FORMAT(ASTC_5x4)
265
CONV_FORMAT(ASTC_5x5)
266
CONV_FORMAT(ASTC_6x5)
267
CONV_FORMAT(ASTC_6x6)
268
CONV_FORMAT(ASTC_8x5)
269
CONV_FORMAT(ASTC_8x6)
270
CONV_FORMAT(ASTC_8x8)
271
CONV_FORMAT(ASTC_10x5)
272
CONV_FORMAT(ASTC_10x6)
273
CONV_FORMAT(ASTC_10x8)
274
CONV_FORMAT(ASTC_10x10)
275
CONV_FORMAT(ASTC_12x10)
276
CONV_FORMAT(ASTC_12x12)
277
CONV_FORMAT(ASTC_4x4_SRGB)
278
CONV_FORMAT(ASTC_5x4_SRGB)
279
CONV_FORMAT(ASTC_5x5_SRGB)
280
CONV_FORMAT(ASTC_6x5_SRGB)
281
CONV_FORMAT(ASTC_6x6_SRGB)
282
CONV_FORMAT(ASTC_8x5_SRGB)
283
CONV_FORMAT(ASTC_8x6_SRGB)
284
CONV_FORMAT(ASTC_8x8_SRGB )
285
CONV_FORMAT(ASTC_10x5_SRGB)
286
CONV_FORMAT(ASTC_10x6_SRGB)
287
CONV_FORMAT(ASTC_10x8_SRGB)
288
CONV_FORMAT(ASTC_10x10_SRGB)
289
CONV_FORMAT(ASTC_12x10_SRGB)
290
CONV_FORMAT(ASTC_12x12_SRGB)
291
};
292
293
enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
294
{
295
enum virgl_formats vformat = virgl_formats_conv_table[format];
296
if (format != PIPE_FORMAT_NONE && !vformat)
297
debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
298
return vformat;
299
}
300
301
static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
302
uint32_t dword)
303
{
304
int len = (dword >> 16);
305
306
if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
307
ctx->base.flush(&ctx->base, NULL, 0);
308
309
virgl_encoder_write_dword(ctx->cbuf, dword);
310
return 0;
311
}
312
313
static void virgl_encoder_emit_resource(struct virgl_screen *vs,
314
struct virgl_cmd_buf *buf,
315
struct virgl_resource *res)
316
{
317
struct virgl_winsys *vws = vs->vws;
318
if (res && res->hw_res)
319
vws->emit_res(vws, buf, res->hw_res, TRUE);
320
else {
321
virgl_encoder_write_dword(buf, 0);
322
}
323
}
324
325
static void virgl_encoder_write_res(struct virgl_context *ctx,
326
struct virgl_resource *res)
327
{
328
struct virgl_screen *vs = virgl_screen(ctx->base.screen);
329
virgl_encoder_emit_resource(vs, ctx->cbuf, res);
330
}
331
332
int virgl_encode_bind_object(struct virgl_context *ctx,
333
uint32_t handle, uint32_t object)
334
{
335
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
336
virgl_encoder_write_dword(ctx->cbuf, handle);
337
return 0;
338
}
339
340
int virgl_encode_delete_object(struct virgl_context *ctx,
341
uint32_t handle, uint32_t object)
342
{
343
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
344
virgl_encoder_write_dword(ctx->cbuf, handle);
345
return 0;
346
}
347
348
int virgl_encode_blend_state(struct virgl_context *ctx,
349
uint32_t handle,
350
const struct pipe_blend_state *blend_state)
351
{
352
uint32_t tmp;
353
int i;
354
355
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
356
virgl_encoder_write_dword(ctx->cbuf, handle);
357
358
tmp =
359
VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
360
VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
361
VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
362
VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
363
VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
364
365
virgl_encoder_write_dword(ctx->cbuf, tmp);
366
367
tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
368
virgl_encoder_write_dword(ctx->cbuf, tmp);
369
370
for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
371
/* We use alpha src factor to pass the advanced blend equation value
372
* to the host. By doing so, we don't have to change the protocol.
373
*/
374
uint32_t alpha = (i == 0 && blend_state->advanced_blend_func)
375
? blend_state->advanced_blend_func
376
: blend_state->rt[i].alpha_src_factor;
377
tmp =
378
VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
379
VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
380
VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
381
VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
382
VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
383
VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha) |
384
VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
385
VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
386
virgl_encoder_write_dword(ctx->cbuf, tmp);
387
}
388
return 0;
389
}
390
391
int virgl_encode_dsa_state(struct virgl_context *ctx,
392
uint32_t handle,
393
const struct pipe_depth_stencil_alpha_state *dsa_state)
394
{
395
uint32_t tmp;
396
int i;
397
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
398
virgl_encoder_write_dword(ctx->cbuf, handle);
399
400
tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth_enabled) |
401
VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth_writemask) |
402
VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth_func) |
403
VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha_enabled) |
404
VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha_func);
405
virgl_encoder_write_dword(ctx->cbuf, tmp);
406
407
for (i = 0; i < 2; i++) {
408
tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
409
VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
410
VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
411
VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
412
VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
413
VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
414
VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
415
virgl_encoder_write_dword(ctx->cbuf, tmp);
416
}
417
418
virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha_ref_value));
419
return 0;
420
}
421
int virgl_encode_rasterizer_state(struct virgl_context *ctx,
422
uint32_t handle,
423
const struct pipe_rasterizer_state *state)
424
{
425
uint32_t tmp;
426
427
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
428
virgl_encoder_write_dword(ctx->cbuf, handle);
429
430
tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
431
VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
432
VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
433
VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
434
VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
435
VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
436
VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
437
VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
438
VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
439
VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
440
VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
441
VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
442
VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
443
VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
444
VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
445
VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
446
VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
447
VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
448
VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
449
VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
450
VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
451
VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
452
VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
453
VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
454
VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
455
VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
456
VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
457
VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
458
VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
459
460
virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
461
virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
462
virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
463
tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
464
VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
465
VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
466
virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
467
virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
468
virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
469
virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
470
virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
471
return 0;
472
}
473
474
static void virgl_emit_shader_header(struct virgl_context *ctx,
475
uint32_t handle, uint32_t len,
476
uint32_t type, uint32_t offlen,
477
uint32_t num_tokens)
478
{
479
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
480
virgl_encoder_write_dword(ctx->cbuf, handle);
481
virgl_encoder_write_dword(ctx->cbuf, type);
482
virgl_encoder_write_dword(ctx->cbuf, offlen);
483
virgl_encoder_write_dword(ctx->cbuf, num_tokens);
484
}
485
486
static void virgl_emit_shader_streamout(struct virgl_context *ctx,
487
const struct pipe_stream_output_info *so_info)
488
{
489
int num_outputs = 0;
490
int i;
491
uint32_t tmp;
492
493
if (so_info)
494
num_outputs = so_info->num_outputs;
495
496
virgl_encoder_write_dword(ctx->cbuf, num_outputs);
497
if (num_outputs) {
498
for (i = 0; i < 4; i++)
499
virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
500
501
for (i = 0; i < so_info->num_outputs; i++) {
502
tmp =
503
VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
504
VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
505
VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
506
VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
507
VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
508
virgl_encoder_write_dword(ctx->cbuf, tmp);
509
virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
510
}
511
}
512
}
513
514
int virgl_encode_shader_state(struct virgl_context *ctx,
515
uint32_t handle,
516
uint32_t type,
517
const struct pipe_stream_output_info *so_info,
518
uint32_t cs_req_local_mem,
519
const struct tgsi_token *tokens)
520
{
521
char *str, *sptr;
522
uint32_t shader_len, len;
523
bool bret;
524
int num_tokens = tgsi_num_tokens(tokens);
525
int str_total_size = 65536;
526
int retry_size = 1;
527
uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
528
bool first_pass;
529
str = CALLOC(1, str_total_size);
530
if (!str)
531
return -1;
532
533
do {
534
int old_size;
535
536
bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
537
if (bret == false) {
538
if (virgl_debug & VIRGL_DEBUG_VERBOSE)
539
debug_printf("Failed to translate shader in available space - trying again\n");
540
old_size = str_total_size;
541
str_total_size = 65536 * retry_size;
542
retry_size *= 2;
543
str = REALLOC(str, old_size, str_total_size);
544
if (!str)
545
return -1;
546
}
547
} while (bret == false && retry_size < 1024);
548
549
if (bret == false)
550
return -1;
551
552
if (virgl_debug & VIRGL_DEBUG_TGSI)
553
debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
554
555
shader_len = strlen(str) + 1;
556
557
left_bytes = shader_len;
558
559
base_hdr_size = 5;
560
strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
561
first_pass = true;
562
sptr = str;
563
while (left_bytes) {
564
uint32_t length, offlen;
565
int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
566
if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
567
ctx->base.flush(&ctx->base, NULL, 0);
568
569
thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
570
571
length = MIN2(thispass, left_bytes);
572
len = ((length + 3) / 4) + hdr_len;
573
574
if (first_pass)
575
offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
576
else
577
offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
578
579
virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
580
581
if (type == PIPE_SHADER_COMPUTE)
582
virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
583
else
584
virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
585
586
virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
587
588
sptr += length;
589
first_pass = false;
590
left_bytes -= length;
591
}
592
593
FREE(str);
594
return 0;
595
}
596
597
598
int virgl_encode_clear(struct virgl_context *ctx,
599
unsigned buffers,
600
const union pipe_color_union *color,
601
double depth, unsigned stencil)
602
{
603
int i;
604
uint64_t qword;
605
606
STATIC_ASSERT(sizeof(qword) == sizeof(depth));
607
memcpy(&qword, &depth, sizeof(qword));
608
609
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
610
virgl_encoder_write_dword(ctx->cbuf, buffers);
611
for (i = 0; i < 4; i++)
612
virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
613
virgl_encoder_write_qword(ctx->cbuf, qword);
614
virgl_encoder_write_dword(ctx->cbuf, stencil);
615
return 0;
616
}
617
618
int virgl_encode_clear_texture(struct virgl_context *ctx,
619
struct virgl_resource *res,
620
unsigned int level,
621
const struct pipe_box *box,
622
const void *data)
623
{
624
const struct util_format_description *desc = util_format_description(res->b.format);
625
unsigned block_bits = desc->block.bits;
626
uint32_t arr[4] = {0};
627
/* The spec describe <data> as a pointer to an array of between one
628
* and four components of texel data that will be used as the source
629
* for the constant fill value.
630
* Here, we are just copying the memory into <arr>. We do not try to
631
* re-create the data array. The host part will take care of interpreting
632
* the memory and applying the correct format to the clear call.
633
*/
634
memcpy(&arr, data, block_bits / 8);
635
636
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
637
virgl_encoder_write_res(ctx, res);
638
virgl_encoder_write_dword(ctx->cbuf, level);
639
virgl_encoder_write_dword(ctx->cbuf, box->x);
640
virgl_encoder_write_dword(ctx->cbuf, box->y);
641
virgl_encoder_write_dword(ctx->cbuf, box->z);
642
virgl_encoder_write_dword(ctx->cbuf, box->width);
643
virgl_encoder_write_dword(ctx->cbuf, box->height);
644
virgl_encoder_write_dword(ctx->cbuf, box->depth);
645
for (unsigned i = 0; i < 4; i++)
646
virgl_encoder_write_dword(ctx->cbuf, arr[i]);
647
return 0;
648
}
649
650
int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
651
const struct pipe_framebuffer_state *state)
652
{
653
struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
654
int i;
655
656
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
657
virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
658
virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
659
for (i = 0; i < state->nr_cbufs; i++) {
660
struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
661
virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
662
}
663
664
struct virgl_screen *rs = virgl_screen(ctx->base.screen);
665
if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
666
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
667
virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
668
virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
669
}
670
return 0;
671
}
672
673
int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
674
int start_slot,
675
int num_viewports,
676
const struct pipe_viewport_state *states)
677
{
678
int i,v;
679
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
680
virgl_encoder_write_dword(ctx->cbuf, start_slot);
681
for (v = 0; v < num_viewports; v++) {
682
for (i = 0; i < 3; i++)
683
virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
684
for (i = 0; i < 3; i++)
685
virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
686
}
687
return 0;
688
}
689
690
int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
691
uint32_t handle,
692
unsigned num_elements,
693
const struct pipe_vertex_element *element)
694
{
695
int i;
696
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
697
virgl_encoder_write_dword(ctx->cbuf, handle);
698
for (i = 0; i < num_elements; i++) {
699
virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
700
virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
701
virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
702
virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
703
}
704
return 0;
705
}
706
707
int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
708
unsigned num_buffers,
709
const struct pipe_vertex_buffer *buffers)
710
{
711
int i;
712
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
713
for (i = 0; i < num_buffers; i++) {
714
struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
715
virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
716
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
717
virgl_encoder_write_res(ctx, res);
718
}
719
return 0;
720
}
721
722
int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
723
const struct virgl_indexbuf *ib)
724
{
725
int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
726
struct virgl_resource *res = NULL;
727
if (ib)
728
res = virgl_resource(ib->buffer);
729
730
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
731
virgl_encoder_write_res(ctx, res);
732
if (ib) {
733
virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
734
virgl_encoder_write_dword(ctx->cbuf, ib->offset);
735
}
736
return 0;
737
}
738
739
int virgl_encoder_draw_vbo(struct virgl_context *ctx,
740
const struct pipe_draw_info *info,
741
unsigned drawid_offset,
742
const struct pipe_draw_indirect_info *indirect,
743
const struct pipe_draw_start_count_bias *draw)
744
{
745
uint32_t length = VIRGL_DRAW_VBO_SIZE;
746
if (info->mode == PIPE_PRIM_PATCHES)
747
length = VIRGL_DRAW_VBO_SIZE_TESS;
748
if (indirect && indirect->buffer)
749
length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
750
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
751
virgl_encoder_write_dword(ctx->cbuf, draw->start);
752
virgl_encoder_write_dword(ctx->cbuf, draw->count);
753
virgl_encoder_write_dword(ctx->cbuf, info->mode);
754
virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
755
virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
756
virgl_encoder_write_dword(ctx->cbuf, info->index_size ? draw->index_bias : 0);
757
virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
758
virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
759
virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart ? info->restart_index : 0);
760
virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->min_index : 0);
761
virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->max_index : ~0);
762
if (indirect && indirect->count_from_stream_output)
763
virgl_encoder_write_dword(ctx->cbuf, indirect->count_from_stream_output->buffer_size);
764
else
765
virgl_encoder_write_dword(ctx->cbuf, 0);
766
if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
767
virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
768
virgl_encoder_write_dword(ctx->cbuf, drawid_offset); /* drawid */
769
}
770
if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
771
virgl_encoder_write_res(ctx, virgl_resource(indirect->buffer));
772
virgl_encoder_write_dword(ctx->cbuf, indirect->offset);
773
virgl_encoder_write_dword(ctx->cbuf, indirect->stride); /* indirect stride */
774
virgl_encoder_write_dword(ctx->cbuf, indirect->draw_count); /* indirect draw count */
775
virgl_encoder_write_dword(ctx->cbuf, indirect->indirect_draw_count_offset); /* indirect draw count offset */
776
if (indirect->indirect_draw_count)
777
virgl_encoder_write_res(ctx, virgl_resource(indirect->indirect_draw_count));
778
else
779
virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
780
}
781
return 0;
782
}
783
784
static int virgl_encoder_create_surface_common(struct virgl_context *ctx,
785
uint32_t handle,
786
struct virgl_resource *res,
787
const struct pipe_surface *templat)
788
{
789
virgl_encoder_write_dword(ctx->cbuf, handle);
790
virgl_encoder_write_res(ctx, res);
791
virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
792
793
assert(templat->texture->target != PIPE_BUFFER);
794
virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
795
virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
796
797
return 0;
798
}
799
800
int virgl_encoder_create_surface(struct virgl_context *ctx,
801
uint32_t handle,
802
struct virgl_resource *res,
803
const struct pipe_surface *templat)
804
{
805
if (templat->nr_samples > 0) {
806
ASSERTED struct virgl_screen *rs = virgl_screen(ctx->base.screen);
807
assert(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_IMPLICIT_MSAA);
808
809
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_MSAA_SURFACE, VIRGL_OBJ_MSAA_SURFACE_SIZE));
810
virgl_encoder_create_surface_common(ctx, handle, res, templat);
811
virgl_encoder_write_dword(ctx->cbuf, templat->nr_samples);
812
} else {
813
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
814
virgl_encoder_create_surface_common(ctx, handle, res, templat);
815
}
816
817
return 0;
818
}
819
820
int virgl_encoder_create_so_target(struct virgl_context *ctx,
821
uint32_t handle,
822
struct virgl_resource *res,
823
unsigned buffer_offset,
824
unsigned buffer_size)
825
{
826
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
827
virgl_encoder_write_dword(ctx->cbuf, handle);
828
virgl_encoder_write_res(ctx, res);
829
virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
830
virgl_encoder_write_dword(ctx->cbuf, buffer_size);
831
return 0;
832
}
833
834
enum virgl_transfer3d_encode_stride {
835
/* The stride and layer_stride are explicitly specified in the command. */
836
virgl_transfer3d_explicit_stride,
837
/* The stride and layer_stride are inferred by the host. In this case, the
838
* host will use the image stride and layer_stride for the specified level.
839
*/
840
virgl_transfer3d_host_inferred_stride,
841
};
842
843
static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
844
struct virgl_cmd_buf *buf,
845
struct virgl_transfer *xfer,
846
enum virgl_transfer3d_encode_stride encode_stride)
847
848
{
849
struct pipe_transfer *transfer = &xfer->base;
850
unsigned stride;
851
unsigned layer_stride;
852
853
if (encode_stride == virgl_transfer3d_explicit_stride) {
854
stride = transfer->stride;
855
layer_stride = transfer->layer_stride;
856
} else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
857
stride = 0;
858
layer_stride = 0;
859
} else {
860
assert(!"Invalid virgl_transfer3d_encode_stride value");
861
}
862
863
/* We cannot use virgl_encoder_emit_resource with transfer->resource here
864
* because transfer->resource might have a different virgl_hw_res than what
865
* this transfer targets, which is saved in xfer->hw_res.
866
*/
867
vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE);
868
virgl_encoder_write_dword(buf, transfer->level);
869
virgl_encoder_write_dword(buf, transfer->usage);
870
virgl_encoder_write_dword(buf, stride);
871
virgl_encoder_write_dword(buf, layer_stride);
872
virgl_encoder_write_dword(buf, transfer->box.x);
873
virgl_encoder_write_dword(buf, transfer->box.y);
874
virgl_encoder_write_dword(buf, transfer->box.z);
875
virgl_encoder_write_dword(buf, transfer->box.width);
876
virgl_encoder_write_dword(buf, transfer->box.height);
877
virgl_encoder_write_dword(buf, transfer->box.depth);
878
}
879
880
int virgl_encoder_inline_write(struct virgl_context *ctx,
881
struct virgl_resource *res,
882
unsigned level, unsigned usage,
883
const struct pipe_box *box,
884
const void *data, unsigned stride,
885
unsigned layer_stride)
886
{
887
uint32_t size = (stride ? stride : box->width) * box->height;
888
uint32_t length, thispass, left_bytes;
889
struct virgl_transfer transfer;
890
struct virgl_screen *vs = virgl_screen(ctx->base.screen);
891
892
transfer.base.resource = &res->b;
893
transfer.hw_res = res->hw_res;
894
transfer.base.level = level;
895
transfer.base.usage = usage;
896
transfer.base.box = *box;
897
898
length = 11 + (size + 3) / 4;
899
if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
900
if (box->height > 1 || box->depth > 1) {
901
debug_printf("inline transfer failed due to multi dimensions and too large\n");
902
assert(0);
903
}
904
}
905
906
left_bytes = size;
907
while (left_bytes) {
908
if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
909
ctx->base.flush(&ctx->base, NULL, 0);
910
911
thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
912
913
length = MIN2(thispass, left_bytes);
914
915
transfer.base.box.width = length;
916
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
917
virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
918
virgl_transfer3d_host_inferred_stride);
919
virgl_encoder_write_block(ctx->cbuf, data, length);
920
left_bytes -= length;
921
transfer.base.box.x += length;
922
data += length;
923
}
924
return 0;
925
}
926
927
int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
928
struct virgl_resource *res)
929
{
930
// virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
931
// virgl_encoder_write_dword(ctx->cbuf, res_handle);
932
return 0;
933
}
934
935
int virgl_encode_sampler_state(struct virgl_context *ctx,
936
uint32_t handle,
937
const struct pipe_sampler_state *state)
938
{
939
uint32_t tmp;
940
int i;
941
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
942
virgl_encoder_write_dword(ctx->cbuf, handle);
943
944
tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
945
VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
946
VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
947
VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
948
VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
949
VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
950
VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
951
VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
952
VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
953
954
virgl_encoder_write_dword(ctx->cbuf, tmp);
955
virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
956
virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
957
virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
958
for (i = 0; i < 4; i++)
959
virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
960
return 0;
961
}
962
963
964
int virgl_encode_sampler_view(struct virgl_context *ctx,
965
uint32_t handle,
966
struct virgl_resource *res,
967
const struct pipe_sampler_view *state)
968
{
969
unsigned elem_size = util_format_get_blocksize(state->format);
970
struct virgl_screen *rs = virgl_screen(ctx->base.screen);
971
uint32_t tmp;
972
uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
973
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
974
virgl_encoder_write_dword(ctx->cbuf, handle);
975
virgl_encoder_write_res(ctx, res);
976
if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
977
dword_fmt_target |= (state->target << 24);
978
virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
979
if (res->b.target == PIPE_BUFFER) {
980
virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
981
virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
982
} else {
983
if (res->metadata.plane) {
984
debug_assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
985
virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
986
} else {
987
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
988
}
989
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
990
}
991
tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
992
VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
993
VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
994
VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
995
virgl_encoder_write_dword(ctx->cbuf, tmp);
996
return 0;
997
}
998
999
int virgl_encode_set_sampler_views(struct virgl_context *ctx,
1000
uint32_t shader_type,
1001
uint32_t start_slot,
1002
uint32_t num_views,
1003
struct virgl_sampler_view **views)
1004
{
1005
int i;
1006
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
1007
virgl_encoder_write_dword(ctx->cbuf, shader_type);
1008
virgl_encoder_write_dword(ctx->cbuf, start_slot);
1009
for (i = 0; i < num_views; i++) {
1010
uint32_t handle = views[i] ? views[i]->handle : 0;
1011
virgl_encoder_write_dword(ctx->cbuf, handle);
1012
}
1013
return 0;
1014
}
1015
1016
int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
1017
uint32_t shader_type,
1018
uint32_t start_slot,
1019
uint32_t num_handles,
1020
uint32_t *handles)
1021
{
1022
int i;
1023
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
1024
virgl_encoder_write_dword(ctx->cbuf, shader_type);
1025
virgl_encoder_write_dword(ctx->cbuf, start_slot);
1026
for (i = 0; i < num_handles; i++)
1027
virgl_encoder_write_dword(ctx->cbuf, handles[i]);
1028
return 0;
1029
}
1030
1031
int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
1032
uint32_t shader,
1033
uint32_t index,
1034
uint32_t size,
1035
const void *data)
1036
{
1037
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
1038
virgl_encoder_write_dword(ctx->cbuf, shader);
1039
virgl_encoder_write_dword(ctx->cbuf, index);
1040
if (data)
1041
virgl_encoder_write_block(ctx->cbuf, data, size * 4);
1042
return 0;
1043
}
1044
1045
int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
1046
uint32_t shader,
1047
uint32_t index,
1048
uint32_t offset,
1049
uint32_t length,
1050
struct virgl_resource *res)
1051
{
1052
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
1053
virgl_encoder_write_dword(ctx->cbuf, shader);
1054
virgl_encoder_write_dword(ctx->cbuf, index);
1055
virgl_encoder_write_dword(ctx->cbuf, offset);
1056
virgl_encoder_write_dword(ctx->cbuf, length);
1057
virgl_encoder_write_res(ctx, res);
1058
return 0;
1059
}
1060
1061
1062
int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1063
const struct pipe_stencil_ref *ref)
1064
{
1065
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1066
virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1067
return 0;
1068
}
1069
1070
int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1071
const struct pipe_blend_color *color)
1072
{
1073
int i;
1074
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1075
for (i = 0; i < 4; i++)
1076
virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1077
return 0;
1078
}
1079
1080
int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1081
unsigned start_slot,
1082
int num_scissors,
1083
const struct pipe_scissor_state *ss)
1084
{
1085
int i;
1086
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1087
virgl_encoder_write_dword(ctx->cbuf, start_slot);
1088
for (i = 0; i < num_scissors; i++) {
1089
virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1090
virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1091
}
1092
return 0;
1093
}
1094
1095
void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1096
const struct pipe_poly_stipple *ps)
1097
{
1098
int i;
1099
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1100
for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1101
virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1102
}
1103
}
1104
1105
void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1106
unsigned sample_mask)
1107
{
1108
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1109
virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1110
}
1111
1112
void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1113
unsigned min_samples)
1114
{
1115
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1116
virgl_encoder_write_dword(ctx->cbuf, min_samples);
1117
}
1118
1119
void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1120
const struct pipe_clip_state *clip)
1121
{
1122
int i, j;
1123
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1124
for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1125
for (j = 0; j < 4; j++) {
1126
virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1127
}
1128
}
1129
}
1130
1131
int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1132
struct virgl_resource *dst_res,
1133
unsigned dst_level,
1134
unsigned dstx, unsigned dsty, unsigned dstz,
1135
struct virgl_resource *src_res,
1136
unsigned src_level,
1137
const struct pipe_box *src_box)
1138
{
1139
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1140
virgl_encoder_write_res(ctx, dst_res);
1141
virgl_encoder_write_dword(ctx->cbuf, dst_level);
1142
virgl_encoder_write_dword(ctx->cbuf, dstx);
1143
virgl_encoder_write_dword(ctx->cbuf, dsty);
1144
virgl_encoder_write_dword(ctx->cbuf, dstz);
1145
virgl_encoder_write_res(ctx, src_res);
1146
virgl_encoder_write_dword(ctx->cbuf, src_level);
1147
virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1148
virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1149
virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1150
virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1151
virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1152
virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1153
return 0;
1154
}
1155
1156
int virgl_encode_blit(struct virgl_context *ctx,
1157
struct virgl_resource *dst_res,
1158
struct virgl_resource *src_res,
1159
const struct pipe_blit_info *blit)
1160
{
1161
uint32_t tmp;
1162
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1163
tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1164
VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1165
VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1166
VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1167
VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1168
virgl_encoder_write_dword(ctx->cbuf, tmp);
1169
virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1170
virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1171
1172
virgl_encoder_write_res(ctx, dst_res);
1173
virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1174
virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1175
virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1176
virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1177
virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1178
virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1179
virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1180
virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1181
1182
virgl_encoder_write_res(ctx, src_res);
1183
virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1184
virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1185
virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1186
virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1187
virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1188
virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1189
virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1190
virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1191
return 0;
1192
}
1193
1194
int virgl_encoder_create_query(struct virgl_context *ctx,
1195
uint32_t handle,
1196
uint query_type,
1197
uint query_index,
1198
struct virgl_resource *res,
1199
uint32_t offset)
1200
{
1201
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1202
virgl_encoder_write_dword(ctx->cbuf, handle);
1203
virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1204
virgl_encoder_write_dword(ctx->cbuf, offset);
1205
virgl_encoder_write_res(ctx, res);
1206
return 0;
1207
}
1208
1209
int virgl_encoder_begin_query(struct virgl_context *ctx,
1210
uint32_t handle)
1211
{
1212
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1213
virgl_encoder_write_dword(ctx->cbuf, handle);
1214
return 0;
1215
}
1216
1217
int virgl_encoder_end_query(struct virgl_context *ctx,
1218
uint32_t handle)
1219
{
1220
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1221
virgl_encoder_write_dword(ctx->cbuf, handle);
1222
return 0;
1223
}
1224
1225
int virgl_encoder_get_query_result(struct virgl_context *ctx,
1226
uint32_t handle, boolean wait)
1227
{
1228
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1229
virgl_encoder_write_dword(ctx->cbuf, handle);
1230
virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1231
return 0;
1232
}
1233
1234
int virgl_encoder_render_condition(struct virgl_context *ctx,
1235
uint32_t handle, boolean condition,
1236
enum pipe_render_cond_flag mode)
1237
{
1238
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1239
virgl_encoder_write_dword(ctx->cbuf, handle);
1240
virgl_encoder_write_dword(ctx->cbuf, condition);
1241
virgl_encoder_write_dword(ctx->cbuf, mode);
1242
return 0;
1243
}
1244
1245
int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1246
unsigned num_targets,
1247
struct pipe_stream_output_target **targets,
1248
unsigned append_bitmask)
1249
{
1250
int i;
1251
1252
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1253
virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1254
for (i = 0; i < num_targets; i++) {
1255
struct virgl_so_target *tg = virgl_so_target(targets[i]);
1256
virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1257
}
1258
return 0;
1259
}
1260
1261
1262
int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1263
{
1264
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1265
virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1266
return 0;
1267
}
1268
1269
int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1270
{
1271
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1272
virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1273
return 0;
1274
}
1275
1276
int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1277
{
1278
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1279
virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1280
return 0;
1281
}
1282
1283
int virgl_encode_bind_shader(struct virgl_context *ctx,
1284
uint32_t handle, uint32_t type)
1285
{
1286
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1287
virgl_encoder_write_dword(ctx->cbuf, handle);
1288
virgl_encoder_write_dword(ctx->cbuf, type);
1289
return 0;
1290
}
1291
1292
int virgl_encode_set_tess_state(struct virgl_context *ctx,
1293
const float outer[4],
1294
const float inner[2])
1295
{
1296
int i;
1297
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1298
for (i = 0; i < 4; i++)
1299
virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1300
for (i = 0; i < 2; i++)
1301
virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1302
return 0;
1303
}
1304
1305
int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1306
enum pipe_shader_type shader,
1307
unsigned start_slot, unsigned count,
1308
const struct pipe_shader_buffer *buffers)
1309
{
1310
int i;
1311
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1312
1313
virgl_encoder_write_dword(ctx->cbuf, shader);
1314
virgl_encoder_write_dword(ctx->cbuf, start_slot);
1315
for (i = 0; i < count; i++) {
1316
if (buffers && buffers[i].buffer) {
1317
struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1318
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1319
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1320
virgl_encoder_write_res(ctx, res);
1321
1322
util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1323
buffers[i].buffer_offset + buffers[i].buffer_size);
1324
virgl_resource_dirty(res, 0);
1325
} else {
1326
virgl_encoder_write_dword(ctx->cbuf, 0);
1327
virgl_encoder_write_dword(ctx->cbuf, 0);
1328
virgl_encoder_write_dword(ctx->cbuf, 0);
1329
}
1330
}
1331
return 0;
1332
}
1333
1334
int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1335
unsigned start_slot, unsigned count,
1336
const struct pipe_shader_buffer *buffers)
1337
{
1338
int i;
1339
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1340
1341
virgl_encoder_write_dword(ctx->cbuf, start_slot);
1342
for (i = 0; i < count; i++) {
1343
if (buffers && buffers[i].buffer) {
1344
struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1345
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1346
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1347
virgl_encoder_write_res(ctx, res);
1348
1349
util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1350
buffers[i].buffer_offset + buffers[i].buffer_size);
1351
virgl_resource_dirty(res, 0);
1352
} else {
1353
virgl_encoder_write_dword(ctx->cbuf, 0);
1354
virgl_encoder_write_dword(ctx->cbuf, 0);
1355
virgl_encoder_write_dword(ctx->cbuf, 0);
1356
}
1357
}
1358
return 0;
1359
}
1360
1361
int virgl_encode_set_shader_images(struct virgl_context *ctx,
1362
enum pipe_shader_type shader,
1363
unsigned start_slot, unsigned count,
1364
const struct pipe_image_view *images)
1365
{
1366
int i;
1367
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1368
1369
virgl_encoder_write_dword(ctx->cbuf, shader);
1370
virgl_encoder_write_dword(ctx->cbuf, start_slot);
1371
for (i = 0; i < count; i++) {
1372
if (images && images[i].resource) {
1373
struct virgl_resource *res = virgl_resource(images[i].resource);
1374
virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1375
virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1376
virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1377
virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1378
virgl_encoder_write_res(ctx, res);
1379
1380
if (res->b.target == PIPE_BUFFER) {
1381
util_range_add(&res->b, &res->valid_buffer_range, images[i].u.buf.offset,
1382
images[i].u.buf.offset + images[i].u.buf.size);
1383
}
1384
virgl_resource_dirty(res, images[i].u.tex.level);
1385
} else {
1386
virgl_encoder_write_dword(ctx->cbuf, 0);
1387
virgl_encoder_write_dword(ctx->cbuf, 0);
1388
virgl_encoder_write_dword(ctx->cbuf, 0);
1389
virgl_encoder_write_dword(ctx->cbuf, 0);
1390
virgl_encoder_write_dword(ctx->cbuf, 0);
1391
}
1392
}
1393
return 0;
1394
}
1395
1396
int virgl_encode_memory_barrier(struct virgl_context *ctx,
1397
unsigned flags)
1398
{
1399
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1400
virgl_encoder_write_dword(ctx->cbuf, flags);
1401
return 0;
1402
}
1403
1404
int virgl_encode_launch_grid(struct virgl_context *ctx,
1405
const struct pipe_grid_info *grid_info)
1406
{
1407
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1408
virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1409
virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1410
virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1411
virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1412
virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1413
virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1414
if (grid_info->indirect) {
1415
struct virgl_resource *res = virgl_resource(grid_info->indirect);
1416
virgl_encoder_write_res(ctx, res);
1417
} else
1418
virgl_encoder_write_dword(ctx->cbuf, 0);
1419
virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1420
return 0;
1421
}
1422
1423
int virgl_encode_texture_barrier(struct virgl_context *ctx,
1424
unsigned flags)
1425
{
1426
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1427
virgl_encoder_write_dword(ctx->cbuf, flags);
1428
return 0;
1429
}
1430
1431
int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1432
const char *flagstring)
1433
{
1434
unsigned long slen = strlen(flagstring) + 1;
1435
uint32_t sslen;
1436
uint32_t string_length;
1437
1438
if (!slen)
1439
return 0;
1440
1441
if (slen > 4 * 0xffff) {
1442
debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1443
slen = 4 * 0xffff;
1444
}
1445
1446
sslen = (uint32_t )(slen + 3) / 4;
1447
string_length = (uint32_t)MIN2(sslen * 4, slen);
1448
1449
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1450
virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1451
return 0;
1452
}
1453
1454
int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1455
{
1456
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1457
virgl_encoder_write_dword(ctx->cbuf, tweak);
1458
virgl_encoder_write_dword(ctx->cbuf, value);
1459
return 0;
1460
}
1461
1462
1463
int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1464
uint32_t handle,
1465
struct virgl_resource *res, boolean wait,
1466
uint32_t result_type,
1467
uint32_t offset,
1468
uint32_t index)
1469
{
1470
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1471
virgl_encoder_write_dword(ctx->cbuf, handle);
1472
virgl_encoder_write_res(ctx, res);
1473
virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1474
virgl_encoder_write_dword(ctx->cbuf, result_type);
1475
virgl_encoder_write_dword(ctx->cbuf, offset);
1476
virgl_encoder_write_dword(ctx->cbuf, index);
1477
return 0;
1478
}
1479
1480
void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1481
struct virgl_transfer *trans, uint32_t direction)
1482
{
1483
uint32_t command;
1484
struct virgl_resource *vres = virgl_resource(trans->base.resource);
1485
enum virgl_transfer3d_encode_stride stride_type =
1486
virgl_transfer3d_host_inferred_stride;
1487
1488
if (trans->base.box.depth == 1 && trans->base.level == 0 &&
1489
trans->base.resource->target == PIPE_TEXTURE_2D &&
1490
vres->blob_mem == VIRGL_BLOB_MEM_HOST3D_GUEST)
1491
stride_type = virgl_transfer3d_explicit_stride;
1492
1493
command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1494
virgl_encoder_write_dword(buf, command);
1495
virgl_encoder_transfer3d_common(vs, buf, trans, stride_type);
1496
virgl_encoder_write_dword(buf, trans->offset);
1497
virgl_encoder_write_dword(buf, direction);
1498
}
1499
1500
void virgl_encode_copy_transfer(struct virgl_context *ctx,
1501
struct virgl_transfer *trans)
1502
{
1503
uint32_t command;
1504
struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1505
1506
assert(trans->copy_src_hw_res);
1507
1508
command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1509
virgl_encoder_write_cmd_dword(ctx, command);
1510
/* Copy transfers need to explicitly specify the stride, since it may differ
1511
* from the image stride.
1512
*/
1513
virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1514
vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, TRUE);
1515
virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1516
/* At the moment all copy transfers are synchronized. */
1517
virgl_encoder_write_dword(ctx->cbuf, 1);
1518
}
1519
1520
void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1521
{
1522
uint32_t command, diff;
1523
diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1524
if (diff) {
1525
command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1526
virgl_encoder_write_dword(buf, command);
1527
}
1528
}
1529
1530
void virgl_encode_get_memory_info(struct virgl_context *ctx, struct virgl_resource *res)
1531
{
1532
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_MEMORY_INFO, 0, 1));
1533
virgl_encoder_write_res(ctx, res);
1534
}
1535
1536
void virgl_encode_emit_string_marker(struct virgl_context *ctx,
1537
const char *message, int len)
1538
{
1539
if (!len)
1540
return;
1541
1542
if (len > 4 * 0xffff) {
1543
debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1544
len = 4 * 0xffff;
1545
}
1546
1547
uint32_t buf_len = (uint32_t )(len + 3) / 4 + 1;
1548
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_EMIT_STRING_MARKER, 0, buf_len));
1549
virgl_encoder_write_dword(ctx->cbuf, len);
1550
virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)message, len);
1551
}
1552
1553