Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/drivers/i915/i915_prim_vbuf.c
4570 views
1
/**************************************************************************
2
*
3
* Copyright 2007 VMware, Inc.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
/**
29
* \file
30
* Build post-transformation, post-clipping vertex buffers and element
31
* lists by hooking into the end of the primitive pipeline and
32
* manipulating the vertex_id field in the vertex headers.
33
*
34
* XXX: work in progress
35
*
36
* \author José Fonseca <[email protected]>
37
* \author Keith Whitwell <[email protected]>
38
*/
39
40
#include "draw/draw_context.h"
41
#include "draw/draw_vbuf.h"
42
#include "util/u_debug.h"
43
#include "util/u_fifo.h"
44
#include "util/u_inlines.h"
45
#include "util/u_math.h"
46
#include "util/u_memory.h"
47
48
#include "i915_batch.h"
49
#include "i915_context.h"
50
#include "i915_reg.h"
51
#include "i915_state.h"
52
53
#define VBUF_MAP_BUFFER
54
55
/**
56
* Primitive renderer for i915.
57
*/
58
struct i915_vbuf_render {
59
struct vbuf_render base;
60
61
struct i915_context *i915;
62
63
/** Vertex size in bytes */
64
size_t vertex_size;
65
66
/** Software primitive */
67
unsigned prim;
68
69
/** Hardware primitive */
70
unsigned hwprim;
71
72
/** Genereate a vertex list */
73
unsigned fallback;
74
75
/* Stuff for the vbo */
76
struct i915_winsys_buffer *vbo;
77
size_t vbo_size; /**< current size of allocated buffer */
78
size_t vbo_alloc_size; /**< minimum buffer size to allocate */
79
size_t vbo_hw_offset; /**< offset that we program the hardware with */
80
size_t vbo_sw_offset; /**< offset that we work with */
81
size_t vbo_index; /**< index offset to be added to all indices */
82
void *vbo_ptr;
83
size_t vbo_max_used;
84
size_t vbo_max_index; /**< index offset to be added to all indices */
85
86
#ifndef VBUF_MAP_BUFFER
87
size_t map_used_start;
88
size_t map_used_end;
89
size_t map_size;
90
#endif
91
};
92
93
/**
94
* Basically a cast wrapper.
95
*/
96
static inline struct i915_vbuf_render *
97
i915_vbuf_render(struct vbuf_render *render)
98
{
99
assert(render);
100
return (struct i915_vbuf_render *)render;
101
}
102
103
/**
104
* If vbo state differs between renderer and context
105
* push state to the context. This function pushes
106
* hw_offset to i915->vbo_offset and vbo to i915->vbo.
107
*
108
* Side effects:
109
* May updates context vbo_offset and vbo fields.
110
*/
111
static void
112
i915_vbuf_update_vbo_state(struct vbuf_render *render)
113
{
114
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
115
struct i915_context *i915 = i915_render->i915;
116
117
if (i915->vbo != i915_render->vbo ||
118
i915->vbo_offset != i915_render->vbo_hw_offset) {
119
i915->vbo = i915_render->vbo;
120
i915->vbo_offset = i915_render->vbo_hw_offset;
121
i915->dirty |= I915_NEW_VBO;
122
}
123
}
124
125
/**
126
* Callback exported to the draw module.
127
* Returns the current vertex_info.
128
*
129
* Side effects:
130
* If state is dirty update derived state.
131
*/
132
static const struct vertex_info *
133
i915_vbuf_render_get_vertex_info(struct vbuf_render *render)
134
{
135
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
136
struct i915_context *i915 = i915_render->i915;
137
138
if (i915->dirty) {
139
/* make sure we have up to date vertex layout */
140
i915_update_derived(i915);
141
}
142
143
return &i915->current.vertex_info;
144
}
145
146
/**
147
* Reserve space in the vbo for vertices.
148
*
149
* Side effects:
150
* None.
151
*/
152
static bool
153
i915_vbuf_render_reserve(struct i915_vbuf_render *i915_render, size_t size)
154
{
155
struct i915_context *i915 = i915_render->i915;
156
157
if (i915_render->vbo_size < size + i915_render->vbo_sw_offset)
158
return false;
159
160
if (i915->vbo_flushed)
161
return false;
162
163
return true;
164
}
165
166
/**
167
* Allocate a new vbo buffer should there not be enough space for
168
* the requested number of vertices by the draw module.
169
*
170
* Side effects:
171
* Updates hw_offset, sw_offset, index and allocates a new buffer.
172
* Will set i915->vbo to null on buffer allocation.
173
*/
174
static void
175
i915_vbuf_render_new_buf(struct i915_vbuf_render *i915_render, size_t size)
176
{
177
struct i915_context *i915 = i915_render->i915;
178
struct i915_winsys *iws = i915->iws;
179
180
if (i915_render->vbo) {
181
iws->buffer_unmap(iws, i915_render->vbo);
182
iws->buffer_destroy(iws, i915_render->vbo);
183
/*
184
* XXX If buffers where referenced then this should be done in
185
* update_vbo_state but since they arn't and malloc likes to reuse
186
* memory we need to set it to null
187
*/
188
i915->vbo = NULL;
189
i915_render->vbo = NULL;
190
}
191
192
i915->vbo_flushed = 0;
193
194
i915_render->vbo_size = MAX2(size, i915_render->vbo_alloc_size);
195
i915_render->vbo_hw_offset = 0;
196
i915_render->vbo_sw_offset = 0;
197
i915_render->vbo_index = 0;
198
199
#ifndef VBUF_MAP_BUFFER
200
if (i915_render->vbo_size > i915_render->map_size) {
201
i915_render->map_size = i915_render->vbo_size;
202
FREE(i915_render->vbo_ptr);
203
i915_render->vbo_ptr = MALLOC(i915_render->map_size);
204
}
205
#endif
206
207
i915_render->vbo =
208
iws->buffer_create(iws, i915_render->vbo_size, I915_NEW_VERTEX);
209
i915_render->vbo_ptr = iws->buffer_map(iws, i915_render->vbo, true);
210
}
211
212
/**
213
* Callback exported to the draw module.
214
*
215
* Side effects:
216
* Updates hw_offset, sw_offset, index and may allocate
217
* a new buffer. Also updates may update the vbo state
218
* on the i915 context.
219
*/
220
static boolean
221
i915_vbuf_render_allocate_vertices(struct vbuf_render *render,
222
ushort vertex_size, ushort nr_vertices)
223
{
224
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
225
size_t size = (size_t)vertex_size * (size_t)nr_vertices;
226
size_t offset;
227
228
/*
229
* Align sw_offset with first multiple of vertex size from hw_offset.
230
* Set index to be the multiples from from hw_offset to sw_offset.
231
* i915_vbuf_render_new_buf will reset index, sw_offset, hw_offset
232
* when it allocates a new buffer this is correct.
233
*/
234
{
235
offset = i915_render->vbo_sw_offset - i915_render->vbo_hw_offset;
236
offset = util_align_npot(offset, vertex_size);
237
i915_render->vbo_sw_offset = i915_render->vbo_hw_offset + offset;
238
i915_render->vbo_index = offset / vertex_size;
239
}
240
241
if (!i915_vbuf_render_reserve(i915_render, size))
242
i915_vbuf_render_new_buf(i915_render, size);
243
244
/*
245
* If a new buffer has been alocated sw_offset,
246
* hw_offset & index will be reset by new_buf
247
*/
248
249
i915_render->vertex_size = vertex_size;
250
251
i915_vbuf_update_vbo_state(render);
252
253
if (!i915_render->vbo)
254
return FALSE;
255
return TRUE;
256
}
257
258
static void *
259
i915_vbuf_render_map_vertices(struct vbuf_render *render)
260
{
261
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
262
struct i915_context *i915 = i915_render->i915;
263
264
if (i915->vbo_flushed)
265
debug_printf("%s bad vbo flush occurred stalling on hw\n", __FUNCTION__);
266
267
#ifdef VBUF_MAP_BUFFER
268
return (unsigned char *)i915_render->vbo_ptr + i915_render->vbo_sw_offset;
269
#else
270
return (unsigned char *)i915_render->vbo_ptr;
271
#endif
272
}
273
274
static void
275
i915_vbuf_render_unmap_vertices(struct vbuf_render *render, ushort min_index,
276
ushort max_index)
277
{
278
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
279
struct i915_context *i915 = i915_render->i915;
280
struct i915_winsys *iws = i915->iws;
281
282
i915_render->vbo_max_index = max_index;
283
i915_render->vbo_max_used = MAX2(i915_render->vbo_max_used,
284
i915_render->vertex_size * (max_index + 1));
285
#ifdef VBUF_MAP_BUFFER
286
(void)iws;
287
#else
288
i915_render->map_used_start = i915_render->vertex_size * min_index;
289
i915_render->map_used_end = i915_render->vertex_size * (max_index + 1);
290
iws->buffer_write(
291
iws, i915_render->vbo,
292
i915_render->map_used_start + i915_render->vbo_sw_offset,
293
i915_render->map_used_end - i915_render->map_used_start,
294
(unsigned char *)i915_render->vbo_ptr + i915_render->map_used_start);
295
296
#endif
297
}
298
299
/**
300
* Ensure that the given max_index given is not larger ushort max.
301
* If it is larger then ushort max it advanced the hw_offset to the
302
* same position in the vbo as sw_offset and set index to zero.
303
*
304
* Side effects:
305
* On failure update hw_offset and index.
306
*/
307
static void
308
i915_vbuf_ensure_index_bounds(struct vbuf_render *render, unsigned max_index)
309
{
310
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
311
312
if (max_index + i915_render->vbo_index < ((1 << 17) - 1))
313
return;
314
315
i915_render->vbo_hw_offset = i915_render->vbo_sw_offset;
316
i915_render->vbo_index = 0;
317
318
i915_vbuf_update_vbo_state(render);
319
}
320
321
static void
322
i915_vbuf_render_set_primitive(struct vbuf_render *render,
323
enum pipe_prim_type prim)
324
{
325
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
326
i915_render->prim = prim;
327
328
switch (prim) {
329
case PIPE_PRIM_POINTS:
330
i915_render->hwprim = PRIM3D_POINTLIST;
331
i915_render->fallback = 0;
332
break;
333
case PIPE_PRIM_LINES:
334
i915_render->hwprim = PRIM3D_LINELIST;
335
i915_render->fallback = 0;
336
break;
337
case PIPE_PRIM_LINE_LOOP:
338
i915_render->hwprim = PRIM3D_LINELIST;
339
i915_render->fallback = PIPE_PRIM_LINE_LOOP;
340
break;
341
case PIPE_PRIM_LINE_STRIP:
342
i915_render->hwprim = PRIM3D_LINESTRIP;
343
i915_render->fallback = 0;
344
break;
345
case PIPE_PRIM_TRIANGLES:
346
i915_render->hwprim = PRIM3D_TRILIST;
347
i915_render->fallback = 0;
348
break;
349
case PIPE_PRIM_TRIANGLE_STRIP:
350
i915_render->hwprim = PRIM3D_TRISTRIP;
351
i915_render->fallback = 0;
352
break;
353
case PIPE_PRIM_TRIANGLE_FAN:
354
i915_render->hwprim = PRIM3D_TRIFAN;
355
i915_render->fallback = 0;
356
break;
357
case PIPE_PRIM_QUADS:
358
i915_render->hwprim = PRIM3D_TRILIST;
359
i915_render->fallback = PIPE_PRIM_QUADS;
360
break;
361
case PIPE_PRIM_QUAD_STRIP:
362
i915_render->hwprim = PRIM3D_TRILIST;
363
i915_render->fallback = PIPE_PRIM_QUAD_STRIP;
364
break;
365
case PIPE_PRIM_POLYGON:
366
i915_render->hwprim = PRIM3D_POLY;
367
i915_render->fallback = 0;
368
break;
369
default:
370
/* FIXME: Actually, can handle a lot more just fine... */
371
assert(0 && "unexpected prim in i915_vbuf_render_set_primitive()");
372
}
373
}
374
375
/**
376
* Used for fallbacks in draw_arrays
377
*/
378
static void
379
draw_arrays_generate_indices(struct vbuf_render *render, unsigned start,
380
uint32_t nr, unsigned type)
381
{
382
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
383
struct i915_context *i915 = i915_render->i915;
384
unsigned i;
385
unsigned end = start + nr + i915_render->vbo_index;
386
start += i915_render->vbo_index;
387
388
switch (type) {
389
case 0:
390
for (i = start; i + 1 < end; i += 2)
391
OUT_BATCH((i + 0) | (i + 1) << 16);
392
if (i < end)
393
OUT_BATCH(i);
394
break;
395
case PIPE_PRIM_LINE_LOOP:
396
if (nr >= 2) {
397
for (i = start + 1; i < end; i++)
398
OUT_BATCH((i - 1) | (i + 0) << 16);
399
OUT_BATCH((i - 1) | (start) << 16);
400
}
401
break;
402
case PIPE_PRIM_QUADS:
403
for (i = start; i + 3 < end; i += 4) {
404
OUT_BATCH((i + 0) | (i + 1) << 16);
405
OUT_BATCH((i + 3) | (i + 1) << 16);
406
OUT_BATCH((i + 2) | (i + 3) << 16);
407
}
408
break;
409
case PIPE_PRIM_QUAD_STRIP:
410
for (i = start; i + 3 < end; i += 2) {
411
OUT_BATCH((i + 0) | (i + 1) << 16);
412
OUT_BATCH((i + 3) | (i + 2) << 16);
413
OUT_BATCH((i + 0) | (i + 3) << 16);
414
}
415
break;
416
default:
417
assert(0);
418
}
419
}
420
421
static unsigned
422
draw_arrays_calc_nr_indices(uint32_t nr, unsigned type)
423
{
424
switch (type) {
425
case 0:
426
return nr;
427
case PIPE_PRIM_LINE_LOOP:
428
if (nr >= 2)
429
return nr * 2;
430
else
431
return 0;
432
case PIPE_PRIM_QUADS:
433
return (nr / 4) * 6;
434
case PIPE_PRIM_QUAD_STRIP:
435
return ((nr - 2) / 2) * 6;
436
default:
437
assert(0);
438
return 0;
439
}
440
}
441
442
static void
443
draw_arrays_fallback(struct vbuf_render *render, unsigned start, uint32_t nr)
444
{
445
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
446
struct i915_context *i915 = i915_render->i915;
447
unsigned nr_indices;
448
449
nr_indices = draw_arrays_calc_nr_indices(nr, i915_render->fallback);
450
if (!nr_indices)
451
return;
452
453
i915_vbuf_ensure_index_bounds(render, start + nr_indices);
454
455
if (i915->dirty)
456
i915_update_derived(i915);
457
458
if (i915->hardware_dirty)
459
i915_emit_hardware_state(i915);
460
461
if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
462
FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
463
464
/* Make sure state is re-emitted after a flush:
465
*/
466
i915_emit_hardware_state(i915);
467
i915->vbo_flushed = 1;
468
469
if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
470
assert(0);
471
goto out;
472
}
473
}
474
475
OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim |
476
PRIM_INDIRECT_ELTS | nr_indices);
477
478
draw_arrays_generate_indices(render, start, nr, i915_render->fallback);
479
480
out:
481
return;
482
}
483
484
static void
485
i915_vbuf_render_draw_arrays(struct vbuf_render *render, unsigned start,
486
uint32_t nr)
487
{
488
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
489
struct i915_context *i915 = i915_render->i915;
490
491
if (i915_render->fallback) {
492
draw_arrays_fallback(render, start, nr);
493
return;
494
}
495
496
i915_vbuf_ensure_index_bounds(render, start + nr);
497
start += i915_render->vbo_index;
498
499
if (i915->dirty)
500
i915_update_derived(i915);
501
502
if (i915->hardware_dirty)
503
i915_emit_hardware_state(i915);
504
505
if (!BEGIN_BATCH(2)) {
506
FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
507
508
/* Make sure state is re-emitted after a flush:
509
*/
510
i915_emit_hardware_state(i915);
511
i915->vbo_flushed = 1;
512
513
if (!BEGIN_BATCH(2)) {
514
assert(0);
515
goto out;
516
}
517
}
518
519
OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | PRIM_INDIRECT_SEQUENTIAL |
520
i915_render->hwprim | nr);
521
OUT_BATCH(start); /* Beginning vertex index */
522
523
out:
524
return;
525
}
526
527
/**
528
* Used for normal and fallback emitting of indices
529
* If type is zero normal operation assumed.
530
*/
531
static void
532
draw_generate_indices(struct vbuf_render *render, const ushort *indices,
533
uint32_t nr_indices, unsigned type)
534
{
535
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
536
struct i915_context *i915 = i915_render->i915;
537
unsigned i;
538
unsigned o = i915_render->vbo_index;
539
540
switch (type) {
541
case 0:
542
for (i = 0; i + 1 < nr_indices; i += 2) {
543
OUT_BATCH((o + indices[i]) | (o + indices[i + 1]) << 16);
544
}
545
if (i < nr_indices) {
546
OUT_BATCH((o + indices[i]));
547
}
548
break;
549
case PIPE_PRIM_LINE_LOOP:
550
if (nr_indices >= 2) {
551
for (i = 1; i < nr_indices; i++)
552
OUT_BATCH((o + indices[i - 1]) | (o + indices[i]) << 16);
553
OUT_BATCH((o + indices[i - 1]) | (o + indices[0]) << 16);
554
}
555
break;
556
case PIPE_PRIM_QUADS:
557
for (i = 0; i + 3 < nr_indices; i += 4) {
558
OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 1]) << 16);
559
OUT_BATCH((o + indices[i + 3]) | (o + indices[i + 1]) << 16);
560
OUT_BATCH((o + indices[i + 2]) | (o + indices[i + 3]) << 16);
561
}
562
break;
563
case PIPE_PRIM_QUAD_STRIP:
564
for (i = 0; i + 3 < nr_indices; i += 2) {
565
OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 1]) << 16);
566
OUT_BATCH((o + indices[i + 3]) | (o + indices[i + 2]) << 16);
567
OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 3]) << 16);
568
}
569
break;
570
default:
571
assert(0);
572
break;
573
}
574
}
575
576
static unsigned
577
draw_calc_nr_indices(uint32_t nr_indices, unsigned type)
578
{
579
switch (type) {
580
case 0:
581
return nr_indices;
582
case PIPE_PRIM_LINE_LOOP:
583
if (nr_indices >= 2)
584
return nr_indices * 2;
585
else
586
return 0;
587
case PIPE_PRIM_QUADS:
588
return (nr_indices / 4) * 6;
589
case PIPE_PRIM_QUAD_STRIP:
590
return ((nr_indices - 2) / 2) * 6;
591
default:
592
assert(0);
593
return 0;
594
}
595
}
596
597
static void
598
i915_vbuf_render_draw_elements(struct vbuf_render *render,
599
const ushort *indices, uint32_t nr_indices)
600
{
601
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
602
struct i915_context *i915 = i915_render->i915;
603
unsigned save_nr_indices;
604
605
save_nr_indices = nr_indices;
606
607
nr_indices = draw_calc_nr_indices(nr_indices, i915_render->fallback);
608
if (!nr_indices)
609
return;
610
611
i915_vbuf_ensure_index_bounds(render, i915_render->vbo_max_index);
612
613
if (i915->dirty)
614
i915_update_derived(i915);
615
616
if (i915->hardware_dirty)
617
i915_emit_hardware_state(i915);
618
619
if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
620
FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
621
622
/* Make sure state is re-emitted after a flush:
623
*/
624
i915_emit_hardware_state(i915);
625
i915->vbo_flushed = 1;
626
627
if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
628
assert(0);
629
goto out;
630
}
631
}
632
633
OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim |
634
PRIM_INDIRECT_ELTS | nr_indices);
635
draw_generate_indices(render, indices, save_nr_indices,
636
i915_render->fallback);
637
638
out:
639
return;
640
}
641
642
static void
643
i915_vbuf_render_release_vertices(struct vbuf_render *render)
644
{
645
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
646
647
i915_render->vbo_sw_offset += i915_render->vbo_max_used;
648
i915_render->vbo_max_used = 0;
649
650
/*
651
* Micro optimization, by calling update here we the offset change
652
* will be picked up on the next pipe_context::draw_*.
653
*/
654
i915_vbuf_update_vbo_state(render);
655
}
656
657
static void
658
i915_vbuf_render_destroy(struct vbuf_render *render)
659
{
660
struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
661
struct i915_context *i915 = i915_render->i915;
662
struct i915_winsys *iws = i915->iws;
663
664
if (i915_render->vbo) {
665
i915->vbo = NULL;
666
iws->buffer_unmap(iws, i915_render->vbo);
667
iws->buffer_destroy(iws, i915_render->vbo);
668
}
669
670
FREE(i915_render);
671
}
672
673
/**
674
* Create a new primitive render.
675
*/
676
static struct vbuf_render *
677
i915_vbuf_render_create(struct i915_context *i915)
678
{
679
struct i915_vbuf_render *i915_render = CALLOC_STRUCT(i915_vbuf_render);
680
struct i915_winsys *iws = i915->iws;
681
int i;
682
683
i915_render->i915 = i915;
684
685
i915_render->base.max_vertex_buffer_bytes = 4 * 4096;
686
687
/* NOTE: it must be such that state and vertices indices fit in a single
688
* batch buffer. 4096 is one batch buffer and 430 is the max amount of
689
* state in dwords. The result is the number of 16-bit indices which can
690
* fit in a single batch buffer.
691
*/
692
i915_render->base.max_indices = (4096 - 430 * 4) / 2;
693
694
i915_render->base.get_vertex_info = i915_vbuf_render_get_vertex_info;
695
i915_render->base.allocate_vertices = i915_vbuf_render_allocate_vertices;
696
i915_render->base.map_vertices = i915_vbuf_render_map_vertices;
697
i915_render->base.unmap_vertices = i915_vbuf_render_unmap_vertices;
698
i915_render->base.set_primitive = i915_vbuf_render_set_primitive;
699
i915_render->base.draw_elements = i915_vbuf_render_draw_elements;
700
i915_render->base.draw_arrays = i915_vbuf_render_draw_arrays;
701
i915_render->base.release_vertices = i915_vbuf_render_release_vertices;
702
i915_render->base.destroy = i915_vbuf_render_destroy;
703
704
#ifndef VBUF_MAP_BUFFER
705
i915_render->map_size = 0;
706
i915_render->map_used_start = 0;
707
i915_render->map_used_end = 0;
708
#endif
709
710
i915_render->vbo = NULL;
711
i915_render->vbo_ptr = NULL;
712
i915_render->vbo_size = 0;
713
i915_render->vbo_hw_offset = 0;
714
i915_render->vbo_sw_offset = 0;
715
i915_render->vbo_alloc_size = i915_render->base.max_vertex_buffer_bytes * 4;
716
717
#ifdef VBUF_USE_POOL
718
i915_render->pool_used = false;
719
i915_render->pool_buffer_size = i915_render->vbo_alloc_size;
720
i915_render->pool_fifo = u_fifo_create(6);
721
for (i = 0; i < 6; i++)
722
u_fifo_add(i915_render->pool_fifo,
723
iws->buffer_create(iws, i915_render->pool_buffer_size,
724
I915_NEW_VERTEX));
725
#else
726
(void)i;
727
(void)iws;
728
#endif
729
730
return &i915_render->base;
731
}
732
733
/**
734
* Create a new primitive vbuf/render stage.
735
*/
736
struct draw_stage *
737
i915_draw_vbuf_stage(struct i915_context *i915)
738
{
739
struct vbuf_render *render;
740
struct draw_stage *stage;
741
742
render = i915_vbuf_render_create(i915);
743
if (!render)
744
return NULL;
745
746
stage = draw_vbuf_stage(i915->draw, render);
747
if (!stage) {
748
render->destroy(render);
749
return NULL;
750
}
751
/** TODO JB: this shouldn't be here */
752
draw_set_render(i915->draw, render);
753
754
return stage;
755
}
756
757