Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c
4574 views
1
2
#include "util/format/u_format.h"
3
4
#include "nvc0/nvc0_context.h"
5
6
struct nvc0_transfer {
7
struct pipe_transfer base;
8
struct nv50_m2mf_rect rect[2];
9
uint32_t nblocksx;
10
uint16_t nblocksy;
11
uint16_t nlayers;
12
};
13
14
static void
15
nvc0_m2mf_transfer_rect(struct nvc0_context *nvc0,
16
const struct nv50_m2mf_rect *dst,
17
const struct nv50_m2mf_rect *src,
18
uint32_t nblocksx, uint32_t nblocksy)
19
{
20
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
21
struct nouveau_bufctx *bctx = nvc0->bufctx;
22
const int cpp = dst->cpp;
23
uint32_t src_ofst = src->base;
24
uint32_t dst_ofst = dst->base;
25
uint32_t height = nblocksy;
26
uint32_t sy = src->y;
27
uint32_t dy = dst->y;
28
uint32_t exec = (1 << 20);
29
30
assert(dst->cpp == src->cpp);
31
32
nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
33
nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
34
nouveau_pushbuf_bufctx(push, bctx);
35
nouveau_pushbuf_validate(push);
36
37
if (nouveau_bo_memtype(src->bo)) {
38
BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_IN), 5);
39
PUSH_DATA (push, src->tile_mode);
40
PUSH_DATA (push, src->width * cpp);
41
PUSH_DATA (push, src->height);
42
PUSH_DATA (push, src->depth);
43
PUSH_DATA (push, src->z);
44
} else {
45
src_ofst += src->y * src->pitch + src->x * cpp;
46
47
BEGIN_NVC0(push, NVC0_M2MF(PITCH_IN), 1);
48
PUSH_DATA (push, src->width * cpp);
49
50
exec |= NVC0_M2MF_EXEC_LINEAR_IN;
51
}
52
53
if (nouveau_bo_memtype(dst->bo)) {
54
BEGIN_NVC0(push, NVC0_M2MF(TILING_MODE_OUT), 5);
55
PUSH_DATA (push, dst->tile_mode);
56
PUSH_DATA (push, dst->width * cpp);
57
PUSH_DATA (push, dst->height);
58
PUSH_DATA (push, dst->depth);
59
PUSH_DATA (push, dst->z);
60
} else {
61
dst_ofst += dst->y * dst->pitch + dst->x * cpp;
62
63
BEGIN_NVC0(push, NVC0_M2MF(PITCH_OUT), 1);
64
PUSH_DATA (push, dst->width * cpp);
65
66
exec |= NVC0_M2MF_EXEC_LINEAR_OUT;
67
}
68
69
while (height) {
70
int line_count = height > 2047 ? 2047 : height;
71
72
BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2);
73
PUSH_DATAh(push, src->bo->offset + src_ofst);
74
PUSH_DATA (push, src->bo->offset + src_ofst);
75
76
BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
77
PUSH_DATAh(push, dst->bo->offset + dst_ofst);
78
PUSH_DATA (push, dst->bo->offset + dst_ofst);
79
80
if (!(exec & NVC0_M2MF_EXEC_LINEAR_IN)) {
81
BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_IN_X), 2);
82
PUSH_DATA (push, src->x * cpp);
83
PUSH_DATA (push, sy);
84
} else {
85
src_ofst += line_count * src->pitch;
86
}
87
if (!(exec & NVC0_M2MF_EXEC_LINEAR_OUT)) {
88
BEGIN_NVC0(push, NVC0_M2MF(TILING_POSITION_OUT_X), 2);
89
PUSH_DATA (push, dst->x * cpp);
90
PUSH_DATA (push, dy);
91
} else {
92
dst_ofst += line_count * dst->pitch;
93
}
94
95
BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
96
PUSH_DATA (push, nblocksx * cpp);
97
PUSH_DATA (push, line_count);
98
BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
99
PUSH_DATA (push, exec);
100
101
height -= line_count;
102
sy += line_count;
103
dy += line_count;
104
}
105
106
nouveau_bufctx_reset(bctx, 0);
107
}
108
109
static void
110
nve4_m2mf_transfer_rect(struct nvc0_context *nvc0,
111
const struct nv50_m2mf_rect *dst,
112
const struct nv50_m2mf_rect *src,
113
uint32_t nblocksx, uint32_t nblocksy)
114
{
115
static const struct {
116
int cs;
117
int nc;
118
} cpbs[] = {
119
[ 1] = { 1, 1 },
120
[ 2] = { 1, 2 },
121
[ 3] = { 1, 3 },
122
[ 4] = { 1, 4 },
123
[ 6] = { 2, 3 },
124
[ 8] = { 2, 4 },
125
[ 9] = { 3, 3 },
126
[12] = { 3, 4 },
127
[16] = { 4, 4 },
128
};
129
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
130
struct nouveau_bufctx *bctx = nvc0->bufctx;
131
uint32_t exec;
132
uint32_t src_base = src->base;
133
uint32_t dst_base = dst->base;
134
135
assert(dst->cpp < ARRAY_SIZE(cpbs) && cpbs[dst->cpp].cs);
136
assert(dst->cpp == src->cpp);
137
138
nouveau_bufctx_refn(bctx, 0, dst->bo, dst->domain | NOUVEAU_BO_WR);
139
nouveau_bufctx_refn(bctx, 0, src->bo, src->domain | NOUVEAU_BO_RD);
140
nouveau_pushbuf_bufctx(push, bctx);
141
nouveau_pushbuf_validate(push);
142
143
exec = NVE4_COPY_EXEC_SWIZZLE_ENABLE | NVE4_COPY_EXEC_2D_ENABLE | NVE4_COPY_EXEC_FLUSH | NVE4_COPY_EXEC_COPY_MODE_NON_PIPELINED;
144
145
BEGIN_NVC0(push, NVE4_COPY(SWIZZLE), 1);
146
PUSH_DATA (push, (cpbs[dst->cpp].nc - 1) << 24 |
147
(cpbs[src->cpp].nc - 1) << 20 |
148
(cpbs[src->cpp].cs - 1) << 16 |
149
3 << 12 /* DST_W = SRC_W */ |
150
2 << 8 /* DST_Z = SRC_Z */ |
151
1 << 4 /* DST_Y = SRC_Y */ |
152
0 << 0 /* DST_X = SRC_X */);
153
154
if (nouveau_bo_memtype(dst->bo)) {
155
BEGIN_NVC0(push, NVE4_COPY(DST_BLOCK_DIMENSIONS), 6);
156
PUSH_DATA (push, dst->tile_mode | NVE4_COPY_SRC_BLOCK_DIMENSIONS_GOB_HEIGHT_FERMI_8);
157
PUSH_DATA (push, dst->width);
158
PUSH_DATA (push, dst->height);
159
PUSH_DATA (push, dst->depth);
160
PUSH_DATA (push, dst->z);
161
PUSH_DATA (push, (dst->y << 16) | dst->x);
162
} else {
163
assert(!dst->z);
164
dst_base += dst->y * dst->pitch + dst->x * dst->cpp;
165
exec |= NVE4_COPY_EXEC_DST_LAYOUT_BLOCKLINEAR;
166
}
167
168
if (nouveau_bo_memtype(src->bo)) {
169
BEGIN_NVC0(push, NVE4_COPY(SRC_BLOCK_DIMENSIONS), 6);
170
PUSH_DATA (push, src->tile_mode | NVE4_COPY_SRC_BLOCK_DIMENSIONS_GOB_HEIGHT_FERMI_8);
171
PUSH_DATA (push, src->width);
172
PUSH_DATA (push, src->height);
173
PUSH_DATA (push, src->depth);
174
PUSH_DATA (push, src->z);
175
PUSH_DATA (push, (src->y << 16) | src->x);
176
} else {
177
assert(!src->z);
178
src_base += src->y * src->pitch + src->x * src->cpp;
179
exec |= NVE4_COPY_EXEC_SRC_LAYOUT_BLOCKLINEAR;
180
}
181
182
BEGIN_NVC0(push, NVE4_COPY(SRC_ADDRESS_HIGH), 8);
183
PUSH_DATAh(push, src->bo->offset + src_base);
184
PUSH_DATA (push, src->bo->offset + src_base);
185
PUSH_DATAh(push, dst->bo->offset + dst_base);
186
PUSH_DATA (push, dst->bo->offset + dst_base);
187
PUSH_DATA (push, src->pitch);
188
PUSH_DATA (push, dst->pitch);
189
PUSH_DATA (push, nblocksx);
190
PUSH_DATA (push, nblocksy);
191
192
BEGIN_NVC0(push, NVE4_COPY(EXEC), 1);
193
PUSH_DATA (push, exec);
194
195
nouveau_bufctx_reset(bctx, 0);
196
}
197
198
void
199
nvc0_m2mf_push_linear(struct nouveau_context *nv,
200
struct nouveau_bo *dst, unsigned offset, unsigned domain,
201
unsigned size, const void *data)
202
{
203
struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
204
struct nouveau_pushbuf *push = nv->pushbuf;
205
uint32_t *src = (uint32_t *)data;
206
unsigned count = (size + 3) / 4;
207
208
nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
209
nouveau_pushbuf_bufctx(push, nvc0->bufctx);
210
nouveau_pushbuf_validate(push);
211
212
while (count) {
213
unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
214
215
if (!PUSH_SPACE(push, nr + 9))
216
break;
217
218
BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
219
PUSH_DATAh(push, dst->offset + offset);
220
PUSH_DATA (push, dst->offset + offset);
221
BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
222
PUSH_DATA (push, MIN2(size, nr * 4));
223
PUSH_DATA (push, 1);
224
BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
225
PUSH_DATA (push, 0x100111);
226
227
/* must not be interrupted (trap on QUERY fence, 0x50 works however) */
228
BEGIN_NIC0(push, NVC0_M2MF(DATA), nr);
229
PUSH_DATAp(push, src, nr);
230
231
count -= nr;
232
src += nr;
233
offset += nr * 4;
234
size -= nr * 4;
235
}
236
237
nouveau_bufctx_reset(nvc0->bufctx, 0);
238
}
239
240
void
241
nve4_p2mf_push_linear(struct nouveau_context *nv,
242
struct nouveau_bo *dst, unsigned offset, unsigned domain,
243
unsigned size, const void *data)
244
{
245
struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
246
struct nouveau_pushbuf *push = nv->pushbuf;
247
uint32_t *src = (uint32_t *)data;
248
unsigned count = (size + 3) / 4;
249
250
nouveau_bufctx_refn(nvc0->bufctx, 0, dst, domain | NOUVEAU_BO_WR);
251
nouveau_pushbuf_bufctx(push, nvc0->bufctx);
252
nouveau_pushbuf_validate(push);
253
254
while (count) {
255
unsigned nr = MIN2(count, (NV04_PFIFO_MAX_PACKET_LEN - 1));
256
257
if (!PUSH_SPACE(push, nr + 10))
258
break;
259
260
BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
261
PUSH_DATAh(push, dst->offset + offset);
262
PUSH_DATA (push, dst->offset + offset);
263
BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
264
PUSH_DATA (push, MIN2(size, nr * 4));
265
PUSH_DATA (push, 1);
266
/* must not be interrupted (trap on QUERY fence, 0x50 works however) */
267
BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), nr + 1);
268
PUSH_DATA (push, 0x1001);
269
PUSH_DATAp(push, src, nr);
270
271
count -= nr;
272
src += nr;
273
offset += nr * 4;
274
size -= nr * 4;
275
}
276
277
nouveau_bufctx_reset(nvc0->bufctx, 0);
278
}
279
280
static void
281
nvc0_m2mf_copy_linear(struct nouveau_context *nv,
282
struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
283
struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
284
unsigned size)
285
{
286
struct nouveau_pushbuf *push = nv->pushbuf;
287
struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
288
289
nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
290
nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
291
nouveau_pushbuf_bufctx(push, bctx);
292
nouveau_pushbuf_validate(push);
293
294
while (size) {
295
unsigned bytes = MIN2(size, 1 << 17);
296
297
BEGIN_NVC0(push, NVC0_M2MF(OFFSET_OUT_HIGH), 2);
298
PUSH_DATAh(push, dst->offset + dstoff);
299
PUSH_DATA (push, dst->offset + dstoff);
300
BEGIN_NVC0(push, NVC0_M2MF(OFFSET_IN_HIGH), 2);
301
PUSH_DATAh(push, src->offset + srcoff);
302
PUSH_DATA (push, src->offset + srcoff);
303
BEGIN_NVC0(push, NVC0_M2MF(LINE_LENGTH_IN), 2);
304
PUSH_DATA (push, bytes);
305
PUSH_DATA (push, 1);
306
BEGIN_NVC0(push, NVC0_M2MF(EXEC), 1);
307
PUSH_DATA (push, NVC0_M2MF_EXEC_QUERY_SHORT |
308
NVC0_M2MF_EXEC_LINEAR_IN | NVC0_M2MF_EXEC_LINEAR_OUT);
309
310
srcoff += bytes;
311
dstoff += bytes;
312
size -= bytes;
313
}
314
315
nouveau_bufctx_reset(bctx, 0);
316
}
317
318
static void
319
nve4_m2mf_copy_linear(struct nouveau_context *nv,
320
struct nouveau_bo *dst, unsigned dstoff, unsigned dstdom,
321
struct nouveau_bo *src, unsigned srcoff, unsigned srcdom,
322
unsigned size)
323
{
324
struct nouveau_pushbuf *push = nv->pushbuf;
325
struct nouveau_bufctx *bctx = nvc0_context(&nv->pipe)->bufctx;
326
327
nouveau_bufctx_refn(bctx, 0, src, srcdom | NOUVEAU_BO_RD);
328
nouveau_bufctx_refn(bctx, 0, dst, dstdom | NOUVEAU_BO_WR);
329
nouveau_pushbuf_bufctx(push, bctx);
330
nouveau_pushbuf_validate(push);
331
332
BEGIN_NVC0(push, NVE4_COPY(SRC_ADDRESS_HIGH), 4);
333
PUSH_DATAh(push, src->offset + srcoff);
334
PUSH_DATA (push, src->offset + srcoff);
335
PUSH_DATAh(push, dst->offset + dstoff);
336
PUSH_DATA (push, dst->offset + dstoff);
337
BEGIN_NVC0(push, NVE4_COPY(X_COUNT), 1);
338
PUSH_DATA (push, size);
339
BEGIN_NVC0(push, NVE4_COPY(EXEC), 1);
340
PUSH_DATA (push, NVE4_COPY_EXEC_COPY_MODE_NON_PIPELINED |
341
NVE4_COPY_EXEC_FLUSH |
342
NVE4_COPY_EXEC_SRC_LAYOUT_BLOCKLINEAR |
343
NVE4_COPY_EXEC_DST_LAYOUT_BLOCKLINEAR);
344
345
nouveau_bufctx_reset(bctx, 0);
346
}
347
348
349
static inline bool
350
nvc0_mt_transfer_can_map_directly(struct nv50_miptree *mt)
351
{
352
if (mt->base.domain == NOUVEAU_BO_VRAM)
353
return false;
354
if (mt->base.base.usage != PIPE_USAGE_STAGING)
355
return false;
356
return !nouveau_bo_memtype(mt->base.bo);
357
}
358
359
static inline bool
360
nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage)
361
{
362
if (!mt->base.mm) {
363
uint32_t access = (usage & PIPE_MAP_WRITE) ?
364
NOUVEAU_BO_WR : NOUVEAU_BO_RD;
365
return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client);
366
}
367
if (usage & PIPE_MAP_WRITE)
368
return !mt->base.fence || nouveau_fence_wait(mt->base.fence, &nvc0->base.debug);
369
return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
370
}
371
372
void *
373
nvc0_miptree_transfer_map(struct pipe_context *pctx,
374
struct pipe_resource *res,
375
unsigned level,
376
unsigned usage,
377
const struct pipe_box *box,
378
struct pipe_transfer **ptransfer)
379
{
380
struct nvc0_context *nvc0 = nvc0_context(pctx);
381
struct nouveau_device *dev = nvc0->screen->base.device;
382
struct nv50_miptree *mt = nv50_miptree(res);
383
struct nvc0_transfer *tx;
384
uint32_t size;
385
int ret;
386
unsigned flags = 0;
387
388
if (nvc0_mt_transfer_can_map_directly(mt)) {
389
ret = !nvc0_mt_sync(nvc0, mt, usage);
390
if (!ret)
391
ret = nouveau_bo_map(mt->base.bo, 0, NULL);
392
if (ret &&
393
(usage & PIPE_MAP_DIRECTLY))
394
return NULL;
395
if (!ret)
396
usage |= PIPE_MAP_DIRECTLY;
397
} else
398
if (usage & PIPE_MAP_DIRECTLY)
399
return NULL;
400
401
tx = CALLOC_STRUCT(nvc0_transfer);
402
if (!tx)
403
return NULL;
404
405
pipe_resource_reference(&tx->base.resource, res);
406
407
tx->base.level = level;
408
tx->base.usage = usage;
409
tx->base.box = *box;
410
411
if (util_format_is_plain(res->format)) {
412
tx->nblocksx = box->width << mt->ms_x;
413
tx->nblocksy = box->height << mt->ms_y;
414
} else {
415
tx->nblocksx = util_format_get_nblocksx(res->format, box->width);
416
tx->nblocksy = util_format_get_nblocksy(res->format, box->height);
417
}
418
tx->nlayers = box->depth;
419
420
if (usage & PIPE_MAP_DIRECTLY) {
421
tx->base.stride = mt->level[level].pitch;
422
tx->base.layer_stride = mt->layer_stride;
423
uint32_t offset = box->y * tx->base.stride +
424
util_format_get_stride(res->format, box->x);
425
if (!mt->layout_3d)
426
offset += mt->layer_stride * box->z;
427
else
428
offset += nvc0_mt_zslice_offset(mt, level, box->z);
429
*ptransfer = &tx->base;
430
return mt->base.bo->map + mt->base.offset + offset;
431
}
432
433
tx->base.stride = tx->nblocksx * util_format_get_blocksize(res->format);
434
tx->base.layer_stride = tx->nblocksy * tx->base.stride;
435
436
nv50_m2mf_rect_setup(&tx->rect[0], res, level, box->x, box->y, box->z);
437
438
size = tx->base.layer_stride;
439
440
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
441
size * tx->nlayers, NULL, &tx->rect[1].bo);
442
if (ret) {
443
pipe_resource_reference(&tx->base.resource, NULL);
444
FREE(tx);
445
return NULL;
446
}
447
448
tx->rect[1].cpp = tx->rect[0].cpp;
449
tx->rect[1].width = tx->nblocksx;
450
tx->rect[1].height = tx->nblocksy;
451
tx->rect[1].depth = 1;
452
tx->rect[1].pitch = tx->base.stride;
453
tx->rect[1].domain = NOUVEAU_BO_GART;
454
455
if (usage & PIPE_MAP_READ) {
456
unsigned base = tx->rect[0].base;
457
unsigned z = tx->rect[0].z;
458
unsigned i;
459
for (i = 0; i < tx->nlayers; ++i) {
460
nvc0->m2mf_copy_rect(nvc0, &tx->rect[1], &tx->rect[0],
461
tx->nblocksx, tx->nblocksy);
462
if (mt->layout_3d)
463
tx->rect[0].z++;
464
else
465
tx->rect[0].base += mt->layer_stride;
466
tx->rect[1].base += size;
467
}
468
tx->rect[0].z = z;
469
tx->rect[0].base = base;
470
tx->rect[1].base = 0;
471
}
472
473
if (tx->rect[1].bo->map) {
474
*ptransfer = &tx->base;
475
return tx->rect[1].bo->map;
476
}
477
478
if (usage & PIPE_MAP_READ)
479
flags = NOUVEAU_BO_RD;
480
if (usage & PIPE_MAP_WRITE)
481
flags |= NOUVEAU_BO_WR;
482
483
ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
484
if (ret) {
485
pipe_resource_reference(&tx->base.resource, NULL);
486
nouveau_bo_ref(NULL, &tx->rect[1].bo);
487
FREE(tx);
488
return NULL;
489
}
490
491
*ptransfer = &tx->base;
492
return tx->rect[1].bo->map;
493
}
494
495
void
496
nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
497
struct pipe_transfer *transfer)
498
{
499
struct nvc0_context *nvc0 = nvc0_context(pctx);
500
struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
501
struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
502
unsigned i;
503
504
if (tx->base.usage & PIPE_MAP_DIRECTLY) {
505
pipe_resource_reference(&transfer->resource, NULL);
506
507
FREE(tx);
508
return;
509
}
510
511
if (tx->base.usage & PIPE_MAP_WRITE) {
512
for (i = 0; i < tx->nlayers; ++i) {
513
nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
514
tx->nblocksx, tx->nblocksy);
515
if (mt->layout_3d)
516
tx->rect[0].z++;
517
else
518
tx->rect[0].base += mt->layer_stride;
519
tx->rect[1].base += tx->nblocksy * tx->base.stride;
520
}
521
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1);
522
523
/* Allow the copies above to finish executing before freeing the source */
524
nouveau_fence_work(nvc0->screen->base.fence.current,
525
nouveau_fence_unref_bo, tx->rect[1].bo);
526
} else {
527
nouveau_bo_ref(NULL, &tx->rect[1].bo);
528
}
529
if (tx->base.usage & PIPE_MAP_READ)
530
NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);
531
532
pipe_resource_reference(&transfer->resource, NULL);
533
534
FREE(tx);
535
}
536
537
/* This happens rather often with DTD9/st. */
538
static void
539
nvc0_cb_push(struct nouveau_context *nv,
540
struct nv04_resource *res,
541
unsigned offset, unsigned words, const uint32_t *data)
542
{
543
struct nvc0_context *nvc0 = nvc0_context(&nv->pipe);
544
struct nvc0_constbuf *cb = NULL;
545
int s;
546
547
/* Go through all the constbuf binding points of this buffer and try to
548
* find one which contains the region to be updated.
549
*/
550
for (s = 0; s < 6 && !cb; s++) {
551
uint16_t bindings = res->cb_bindings[s];
552
while (bindings) {
553
int i = ffs(bindings) - 1;
554
uint32_t cb_offset = nvc0->constbuf[s][i].offset;
555
556
bindings &= ~(1 << i);
557
if (cb_offset <= offset &&
558
cb_offset + nvc0->constbuf[s][i].size >= offset + words * 4) {
559
cb = &nvc0->constbuf[s][i];
560
break;
561
}
562
}
563
}
564
565
if (cb) {
566
nvc0_cb_bo_push(nv, res->bo, res->domain,
567
res->offset + cb->offset, cb->size,
568
offset - cb->offset, words, data);
569
} else {
570
nv->push_data(nv, res->bo, res->offset + offset, res->domain,
571
words * 4, data);
572
}
573
}
574
575
void
576
nvc0_cb_bo_push(struct nouveau_context *nv,
577
struct nouveau_bo *bo, unsigned domain,
578
unsigned base, unsigned size,
579
unsigned offset, unsigned words, const uint32_t *data)
580
{
581
struct nouveau_pushbuf *push = nv->pushbuf;
582
583
NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_count, 1);
584
NOUVEAU_DRV_STAT(nv->screen, constbuf_upload_bytes, words * 4);
585
586
assert(!(offset & 3));
587
size = align(size, 0x100);
588
589
assert(offset < size);
590
assert(offset + words * 4 <= size);
591
592
BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
593
PUSH_DATA (push, size);
594
PUSH_DATAh(push, bo->offset + base);
595
PUSH_DATA (push, bo->offset + base);
596
597
while (words) {
598
unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN - 1);
599
600
PUSH_SPACE(push, nr + 2);
601
PUSH_REFN (push, bo, NOUVEAU_BO_WR | domain);
602
BEGIN_1IC0(push, NVC0_3D(CB_POS), nr + 1);
603
PUSH_DATA (push, offset);
604
PUSH_DATAp(push, data, nr);
605
606
words -= nr;
607
data += nr;
608
offset += nr * 4;
609
}
610
}
611
612
void
613
nvc0_init_transfer_functions(struct nvc0_context *nvc0)
614
{
615
if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
616
nvc0->m2mf_copy_rect = nve4_m2mf_transfer_rect;
617
nvc0->base.copy_data = nve4_m2mf_copy_linear;
618
nvc0->base.push_data = nve4_p2mf_push_linear;
619
} else {
620
nvc0->m2mf_copy_rect = nvc0_m2mf_transfer_rect;
621
nvc0->base.copy_data = nvc0_m2mf_copy_linear;
622
nvc0->base.push_data = nvc0_m2mf_push_linear;
623
}
624
nvc0->base.push_cb = nvc0_cb_push;
625
}
626
627