Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mesa
Path: blob/21.2-virgl/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
4565 views
1
/**************************************************************************
2
*
3
* Copyright 2009 Younes Manton.
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
#include <math.h>
29
#include <assert.h>
30
31
#include "util/u_memory.h"
32
#include "util/u_sampler.h"
33
#include "util/u_surface.h"
34
#include "util/u_video.h"
35
36
#include "vl_mpeg12_decoder.h"
37
#include "vl_defines.h"
38
39
#define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
40
#define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
41
42
struct format_config {
43
enum pipe_format zscan_source_format;
44
enum pipe_format idct_source_format;
45
enum pipe_format mc_source_format;
46
47
float idct_scale;
48
float mc_scale;
49
};
50
51
static const struct format_config bitstream_format_config[] = {
52
// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
53
// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
54
{ PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
55
{ PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
56
};
57
58
static const unsigned num_bitstream_format_configs =
59
sizeof(bitstream_format_config) / sizeof(struct format_config);
60
61
static const struct format_config idct_format_config[] = {
62
// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
63
// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
64
{ PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
65
{ PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
66
};
67
68
static const unsigned num_idct_format_configs =
69
sizeof(idct_format_config) / sizeof(struct format_config);
70
71
static const struct format_config mc_format_config[] = {
72
//{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
73
{ PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
74
};
75
76
static const unsigned num_mc_format_configs =
77
sizeof(mc_format_config) / sizeof(struct format_config);
78
79
static const unsigned const_empty_block_mask_420[3][2][2] = {
80
{ { 0x20, 0x10 }, { 0x08, 0x04 } },
81
{ { 0x02, 0x02 }, { 0x02, 0x02 } },
82
{ { 0x01, 0x01 }, { 0x01, 0x01 } }
83
};
84
85
struct video_buffer_private
86
{
87
struct list_head list;
88
struct pipe_video_buffer *video_buffer;
89
90
struct pipe_sampler_view *sampler_view_planes[VL_NUM_COMPONENTS];
91
struct pipe_surface *surfaces[VL_MAX_SURFACES];
92
93
struct vl_mpeg12_buffer *buffer;
94
};
95
96
static void
97
vl_mpeg12_destroy_buffer(struct vl_mpeg12_buffer *buf);
98
99
static void
100
destroy_video_buffer_private(void *private)
101
{
102
struct video_buffer_private *priv = private;
103
unsigned i;
104
105
list_del(&priv->list);
106
107
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
108
pipe_sampler_view_reference(&priv->sampler_view_planes[i], NULL);
109
110
for (i = 0; i < VL_MAX_SURFACES; ++i)
111
pipe_surface_reference(&priv->surfaces[i], NULL);
112
113
if (priv->buffer)
114
vl_mpeg12_destroy_buffer(priv->buffer);
115
116
FREE(priv);
117
}
118
119
static struct video_buffer_private *
120
get_video_buffer_private(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *buf)
121
{
122
struct pipe_context *pipe = dec->context;
123
struct video_buffer_private *priv;
124
struct pipe_sampler_view **sv;
125
struct pipe_surface **surf;
126
unsigned i;
127
128
priv = vl_video_buffer_get_associated_data(buf, &dec->base);
129
if (priv)
130
return priv;
131
132
priv = CALLOC_STRUCT(video_buffer_private);
133
134
list_add(&priv->list, &dec->buffer_privates);
135
priv->video_buffer = buf;
136
137
sv = buf->get_sampler_view_planes(buf);
138
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
139
if (sv[i])
140
priv->sampler_view_planes[i] = pipe->create_sampler_view(pipe, sv[i]->texture, sv[i]);
141
142
surf = buf->get_surfaces(buf);
143
for (i = 0; i < VL_MAX_SURFACES; ++i)
144
if (surf[i])
145
priv->surfaces[i] = pipe->create_surface(pipe, surf[i]->texture, surf[i]);
146
147
vl_video_buffer_set_associated_data(buf, &dec->base, priv, destroy_video_buffer_private);
148
149
return priv;
150
}
151
152
static void
153
free_video_buffer_privates(struct vl_mpeg12_decoder *dec)
154
{
155
struct video_buffer_private *priv, *next;
156
157
LIST_FOR_EACH_ENTRY_SAFE(priv, next, &dec->buffer_privates, list) {
158
struct pipe_video_buffer *buf = priv->video_buffer;
159
160
vl_video_buffer_set_associated_data(buf, &dec->base, NULL, NULL);
161
}
162
}
163
164
static bool
165
init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
166
{
167
struct pipe_resource *res, res_tmpl;
168
struct pipe_sampler_view sv_tmpl;
169
struct pipe_surface **destination;
170
171
unsigned i;
172
173
assert(dec && buffer);
174
175
memset(&res_tmpl, 0, sizeof(res_tmpl));
176
res_tmpl.target = PIPE_TEXTURE_2D;
177
res_tmpl.format = dec->zscan_source_format;
178
res_tmpl.width0 = dec->blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
179
res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
180
res_tmpl.depth0 = 1;
181
res_tmpl.array_size = 1;
182
res_tmpl.usage = PIPE_USAGE_STREAM;
183
res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
184
185
res = dec->context->screen->resource_create(dec->context->screen, &res_tmpl);
186
if (!res)
187
goto error_source;
188
189
190
memset(&sv_tmpl, 0, sizeof(sv_tmpl));
191
u_sampler_view_default_template(&sv_tmpl, res, res->format);
192
sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_X;
193
buffer->zscan_source = dec->context->create_sampler_view(dec->context, res, &sv_tmpl);
194
pipe_resource_reference(&res, NULL);
195
if (!buffer->zscan_source)
196
goto error_sampler;
197
198
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
199
destination = dec->idct_source->get_surfaces(dec->idct_source);
200
else
201
destination = dec->mc_source->get_surfaces(dec->mc_source);
202
203
if (!destination)
204
goto error_surface;
205
206
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
207
if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
208
&buffer->zscan[i], buffer->zscan_source, destination[i]))
209
goto error_plane;
210
211
return true;
212
213
error_plane:
214
for (; i > 0; --i)
215
vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
216
217
error_surface:
218
error_sampler:
219
pipe_sampler_view_reference(&buffer->zscan_source, NULL);
220
221
error_source:
222
return false;
223
}
224
225
static void
226
cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
227
{
228
unsigned i;
229
230
assert(buffer);
231
232
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
233
vl_zscan_cleanup_buffer(&buffer->zscan[i]);
234
235
pipe_sampler_view_reference(&buffer->zscan_source, NULL);
236
}
237
238
static bool
239
init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
240
{
241
struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
242
243
unsigned i;
244
245
assert(dec && buffer);
246
247
idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
248
if (!idct_source_sv)
249
goto error_source_sv;
250
251
mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
252
if (!mc_source_sv)
253
goto error_mc_source_sv;
254
255
for (i = 0; i < 3; ++i)
256
if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
257
&buffer->idct[i], idct_source_sv[i],
258
mc_source_sv[i]))
259
goto error_plane;
260
261
return true;
262
263
error_plane:
264
for (; i > 0; --i)
265
vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
266
267
error_mc_source_sv:
268
error_source_sv:
269
return false;
270
}
271
272
static void
273
cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
274
{
275
unsigned i;
276
277
assert(buf);
278
279
for (i = 0; i < 3; ++i)
280
vl_idct_cleanup_buffer(&buf->idct[i]);
281
}
282
283
static bool
284
init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
285
{
286
assert(dec && buf);
287
288
if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
289
goto error_mc_y;
290
291
if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
292
goto error_mc_cb;
293
294
if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
295
goto error_mc_cr;
296
297
return true;
298
299
error_mc_cr:
300
vl_mc_cleanup_buffer(&buf->mc[1]);
301
302
error_mc_cb:
303
vl_mc_cleanup_buffer(&buf->mc[0]);
304
305
error_mc_y:
306
return false;
307
}
308
309
static void
310
cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
311
{
312
unsigned i;
313
314
assert(buf);
315
316
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
317
vl_mc_cleanup_buffer(&buf->mc[i]);
318
}
319
320
static inline void
321
MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
322
{
323
assert(mb);
324
325
switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
326
case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
327
weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
328
weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
329
break;
330
331
case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
332
weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
333
weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
334
break;
335
336
case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
337
weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
338
weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
339
break;
340
341
default:
342
if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) {
343
weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
344
weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
345
} else {
346
/* no motion vector, but also not intra mb ->
347
just copy the old frame content */
348
weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
349
weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
350
}
351
break;
352
}
353
}
354
355
static inline struct vl_motionvector
356
MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
357
unsigned field_select_mask, unsigned weight)
358
{
359
struct vl_motionvector mv;
360
361
assert(mb);
362
363
if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
364
switch (mb->macroblock_modes.bits.frame_motion_type) {
365
case PIPE_MPEG12_MO_TYPE_FRAME:
366
mv.top.x = mb->PMV[0][vector][0];
367
mv.top.y = mb->PMV[0][vector][1];
368
mv.top.field_select = PIPE_VIDEO_FRAME;
369
mv.top.weight = weight;
370
371
mv.bottom.x = mb->PMV[0][vector][0];
372
mv.bottom.y = mb->PMV[0][vector][1];
373
mv.bottom.weight = weight;
374
mv.bottom.field_select = PIPE_VIDEO_FRAME;
375
break;
376
377
case PIPE_MPEG12_MO_TYPE_FIELD:
378
mv.top.x = mb->PMV[0][vector][0];
379
mv.top.y = mb->PMV[0][vector][1];
380
mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
381
PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
382
mv.top.weight = weight;
383
384
mv.bottom.x = mb->PMV[1][vector][0];
385
mv.bottom.y = mb->PMV[1][vector][1];
386
mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
387
PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
388
mv.bottom.weight = weight;
389
break;
390
391
default:
392
unreachable("TODO: Support DUALPRIME and 16x8");
393
}
394
} else {
395
mv.top.x = mv.top.y = 0;
396
mv.top.field_select = PIPE_VIDEO_FRAME;
397
mv.top.weight = weight;
398
399
mv.bottom.x = mv.bottom.y = 0;
400
mv.bottom.field_select = PIPE_VIDEO_FRAME;
401
mv.bottom.weight = weight;
402
}
403
return mv;
404
}
405
406
static inline void
407
UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
408
struct vl_mpeg12_buffer *buf,
409
const struct pipe_mpeg12_macroblock *mb)
410
{
411
unsigned intra;
412
unsigned tb, x, y, num_blocks = 0;
413
414
assert(dec && buf);
415
assert(mb);
416
417
if (!mb->coded_block_pattern)
418
return;
419
420
intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
421
422
for (y = 0; y < 2; ++y) {
423
for (x = 0; x < 2; ++x) {
424
if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
425
426
struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
427
stream->x = mb->x * 2 + x;
428
stream->y = mb->y * 2 + y;
429
stream->intra = intra;
430
stream->coding = mb->macroblock_modes.bits.dct_type;
431
stream->block_num = buf->block_num++;
432
433
buf->num_ycbcr_blocks[0]++;
434
buf->ycbcr_stream[0]++;
435
436
num_blocks++;
437
}
438
}
439
}
440
441
/* TODO: Implement 422, 444 */
442
//assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
443
444
for (tb = 1; tb < 3; ++tb) {
445
if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
446
447
struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
448
stream->x = mb->x;
449
stream->y = mb->y;
450
stream->intra = intra;
451
stream->coding = 0;
452
stream->block_num = buf->block_num++;
453
454
buf->num_ycbcr_blocks[tb]++;
455
buf->ycbcr_stream[tb]++;
456
457
num_blocks++;
458
}
459
}
460
461
memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks);
462
buf->texels += 64 * num_blocks;
463
}
464
465
static void
466
vl_mpeg12_destroy_buffer(struct vl_mpeg12_buffer *buf)
467
{
468
469
assert(buf);
470
471
cleanup_zscan_buffer(buf);
472
cleanup_idct_buffer(buf);
473
cleanup_mc_buffer(buf);
474
vl_vb_cleanup(&buf->vertex_stream);
475
476
FREE(buf);
477
}
478
479
static void
480
vl_mpeg12_destroy(struct pipe_video_codec *decoder)
481
{
482
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
483
unsigned i;
484
485
assert(decoder);
486
487
free_video_buffer_privates(dec);
488
489
/* Asserted in softpipe_delete_fs_state() for some reason */
490
dec->context->bind_vs_state(dec->context, NULL);
491
dec->context->bind_fs_state(dec->context, NULL);
492
493
dec->context->delete_depth_stencil_alpha_state(dec->context, dec->dsa);
494
dec->context->delete_sampler_state(dec->context, dec->sampler_ycbcr);
495
496
vl_mc_cleanup(&dec->mc_y);
497
vl_mc_cleanup(&dec->mc_c);
498
dec->mc_source->destroy(dec->mc_source);
499
500
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
501
vl_idct_cleanup(&dec->idct_y);
502
vl_idct_cleanup(&dec->idct_c);
503
dec->idct_source->destroy(dec->idct_source);
504
}
505
506
vl_zscan_cleanup(&dec->zscan_y);
507
vl_zscan_cleanup(&dec->zscan_c);
508
509
dec->context->delete_vertex_elements_state(dec->context, dec->ves_ycbcr);
510
dec->context->delete_vertex_elements_state(dec->context, dec->ves_mv);
511
512
pipe_resource_reference(&dec->quads.buffer.resource, NULL);
513
pipe_resource_reference(&dec->pos.buffer.resource, NULL);
514
515
pipe_sampler_view_reference(&dec->zscan_linear, NULL);
516
pipe_sampler_view_reference(&dec->zscan_normal, NULL);
517
pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
518
519
for (i = 0; i < 4; ++i)
520
if (dec->dec_buffers[i])
521
vl_mpeg12_destroy_buffer(dec->dec_buffers[i]);
522
523
dec->context->destroy(dec->context);
524
525
FREE(dec);
526
}
527
528
static struct vl_mpeg12_buffer *
529
vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *target)
530
{
531
struct video_buffer_private *priv;
532
struct vl_mpeg12_buffer *buffer;
533
534
assert(dec);
535
536
priv = get_video_buffer_private(dec, target);
537
if (priv->buffer)
538
return priv->buffer;
539
540
buffer = dec->dec_buffers[dec->current_buffer];
541
if (buffer)
542
return buffer;
543
544
buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
545
if (!buffer)
546
return NULL;
547
548
if (!vl_vb_init(&buffer->vertex_stream, dec->context,
549
dec->base.width / VL_MACROBLOCK_WIDTH,
550
dec->base.height / VL_MACROBLOCK_HEIGHT))
551
goto error_vertex_buffer;
552
553
if (!init_mc_buffer(dec, buffer))
554
goto error_mc;
555
556
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
557
if (!init_idct_buffer(dec, buffer))
558
goto error_idct;
559
560
if (!init_zscan_buffer(dec, buffer))
561
goto error_zscan;
562
563
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
564
vl_mpg12_bs_init(&buffer->bs, &dec->base);
565
566
if (dec->base.expect_chunked_decode)
567
priv->buffer = buffer;
568
else
569
dec->dec_buffers[dec->current_buffer] = buffer;
570
571
return buffer;
572
573
error_zscan:
574
cleanup_idct_buffer(buffer);
575
576
error_idct:
577
cleanup_mc_buffer(buffer);
578
579
error_mc:
580
vl_vb_cleanup(&buffer->vertex_stream);
581
582
error_vertex_buffer:
583
FREE(buffer);
584
return NULL;
585
}
586
587
static void
588
vl_mpeg12_begin_frame(struct pipe_video_codec *decoder,
589
struct pipe_video_buffer *target,
590
struct pipe_picture_desc *picture)
591
{
592
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
593
struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
594
struct vl_mpeg12_buffer *buf;
595
596
struct pipe_resource *tex;
597
struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
598
599
uint8_t intra_matrix[64];
600
uint8_t non_intra_matrix[64];
601
602
unsigned i;
603
604
assert(dec && target && picture);
605
606
buf = vl_mpeg12_get_decode_buffer(dec, target);
607
assert(buf);
608
609
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
610
memcpy(intra_matrix, desc->intra_matrix, sizeof(intra_matrix));
611
memcpy(non_intra_matrix, desc->non_intra_matrix, sizeof(non_intra_matrix));
612
intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
613
} else {
614
memset(intra_matrix, 0x10, sizeof(intra_matrix));
615
memset(non_intra_matrix, 0x10, sizeof(non_intra_matrix));
616
}
617
618
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
619
struct vl_zscan *zscan = i == 0 ? &dec->zscan_y : &dec->zscan_c;
620
vl_zscan_upload_quant(zscan, &buf->zscan[i], intra_matrix, true);
621
vl_zscan_upload_quant(zscan, &buf->zscan[i], non_intra_matrix, false);
622
}
623
624
vl_vb_map(&buf->vertex_stream, dec->context);
625
626
tex = buf->zscan_source->texture;
627
rect.width = tex->width0;
628
rect.height = tex->height0;
629
630
buf->texels =
631
dec->context->texture_map(dec->context, tex, 0,
632
PIPE_MAP_WRITE |
633
PIPE_MAP_DISCARD_RANGE,
634
&rect, &buf->tex_transfer);
635
636
buf->block_num = 0;
637
638
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
639
buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
640
buf->num_ycbcr_blocks[i] = 0;
641
}
642
643
for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
644
buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
645
646
if (dec->base.entrypoint >= PIPE_VIDEO_ENTRYPOINT_IDCT) {
647
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
648
vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
649
}
650
}
651
652
static void
653
vl_mpeg12_decode_macroblock(struct pipe_video_codec *decoder,
654
struct pipe_video_buffer *target,
655
struct pipe_picture_desc *picture,
656
const struct pipe_macroblock *macroblocks,
657
unsigned num_macroblocks)
658
{
659
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
660
const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
661
struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
662
struct vl_mpeg12_buffer *buf;
663
664
unsigned i, j, mv_weights[2];
665
666
assert(dec && target && picture);
667
assert(macroblocks && macroblocks->codec == PIPE_VIDEO_FORMAT_MPEG12);
668
669
buf = vl_mpeg12_get_decode_buffer(dec, target);
670
assert(buf);
671
672
for (; num_macroblocks > 0; --num_macroblocks) {
673
unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
674
675
if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
676
UploadYcbcrBlocks(dec, buf, mb);
677
678
MacroBlockTypeToPipeWeights(mb, mv_weights);
679
680
for (i = 0; i < 2; ++i) {
681
if (!desc->ref[i]) continue;
682
683
buf->mv_stream[i][mb_addr] = MotionVectorToPipe
684
(
685
mb, i,
686
i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
687
mv_weights[i]
688
);
689
}
690
691
/* see section 7.6.6 of the spec */
692
if (mb->num_skipped_macroblocks > 0) {
693
struct vl_motionvector skipped_mv[2];
694
695
if (desc->ref[0] && !desc->ref[1]) {
696
skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
697
skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
698
} else {
699
skipped_mv[0] = buf->mv_stream[0][mb_addr];
700
skipped_mv[1] = buf->mv_stream[1][mb_addr];
701
}
702
skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
703
skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
704
705
skipped_mv[0].bottom = skipped_mv[0].top;
706
skipped_mv[1].bottom = skipped_mv[1].top;
707
708
++mb_addr;
709
for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
710
for (j = 0; j < 2; ++j) {
711
if (!desc->ref[j]) continue;
712
buf->mv_stream[j][mb_addr] = skipped_mv[j];
713
714
}
715
}
716
}
717
718
++mb;
719
}
720
}
721
722
static void
723
vl_mpeg12_decode_bitstream(struct pipe_video_codec *decoder,
724
struct pipe_video_buffer *target,
725
struct pipe_picture_desc *picture,
726
unsigned num_buffers,
727
const void * const *buffers,
728
const unsigned *sizes)
729
{
730
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
731
struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
732
struct vl_mpeg12_buffer *buf;
733
734
unsigned i;
735
736
assert(dec && target && picture);
737
738
buf = vl_mpeg12_get_decode_buffer(dec, target);
739
assert(buf);
740
741
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
742
vl_zscan_set_layout(&buf->zscan[i], desc->alternate_scan ?
743
dec->zscan_alternate : dec->zscan_normal);
744
745
vl_mpg12_bs_decode(&buf->bs, target, desc, num_buffers, buffers, sizes);
746
}
747
748
static void
749
vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
750
struct pipe_video_buffer *target,
751
struct pipe_picture_desc *picture)
752
{
753
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
754
struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
755
struct pipe_sampler_view **ref_frames[2];
756
struct pipe_sampler_view **mc_source_sv;
757
struct pipe_surface **target_surfaces;
758
struct pipe_vertex_buffer vb[3];
759
struct vl_mpeg12_buffer *buf;
760
761
const unsigned *plane_order;
762
unsigned i, j, component;
763
unsigned nr_components;
764
765
assert(dec && target && picture);
766
assert(!target->interlaced);
767
768
buf = vl_mpeg12_get_decode_buffer(dec, target);
769
770
vl_vb_unmap(&buf->vertex_stream, dec->context);
771
772
if (buf->tex_transfer)
773
dec->context->texture_unmap(dec->context, buf->tex_transfer);
774
775
vb[0] = dec->quads;
776
vb[1] = dec->pos;
777
778
target_surfaces = get_video_buffer_private(dec, target)->surfaces;
779
780
for (i = 0; i < VL_MAX_REF_FRAMES; ++i) {
781
if (desc->ref[i])
782
ref_frames[i] = get_video_buffer_private(dec, desc->ref[i])->sampler_view_planes;
783
else
784
ref_frames[i] = NULL;
785
}
786
787
dec->context->bind_vertex_elements_state(dec->context, dec->ves_mv);
788
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
789
if (!target_surfaces[i]) continue;
790
791
vl_mc_set_surface(&buf->mc[i], target_surfaces[i]);
792
793
for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
794
if (!ref_frames[j] || !ref_frames[j][i]) continue;
795
796
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);
797
dec->context->set_vertex_buffers(dec->context, 0, 3, 0, false, vb);
798
799
vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
800
}
801
}
802
803
dec->context->bind_vertex_elements_state(dec->context, dec->ves_ycbcr);
804
for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
805
if (!buf->num_ycbcr_blocks[i]) continue;
806
807
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
808
dec->context->set_vertex_buffers(dec->context, 0, 2, 0, false, vb);
809
810
vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
811
812
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
813
vl_idct_flush(i ? &dec->idct_c : &dec->idct_y, &buf->idct[i], buf->num_ycbcr_blocks[i]);
814
}
815
816
plane_order = vl_video_buffer_plane_order(target->buffer_format);
817
mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
818
for (i = 0, component = 0; component < VL_NUM_COMPONENTS; ++i) {
819
if (!target_surfaces[i]) continue;
820
821
nr_components = util_format_get_nr_components(target_surfaces[i]->texture->format);
822
for (j = 0; j < nr_components; ++j, ++component) {
823
unsigned plane = plane_order[component];
824
if (!buf->num_ycbcr_blocks[plane]) continue;
825
826
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
827
dec->context->set_vertex_buffers(dec->context, 0, 2, 0, false, vb);
828
829
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
830
vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);
831
else {
832
dec->context->set_sampler_views(dec->context,
833
PIPE_SHADER_FRAGMENT, 0, 1, 0,
834
&mc_source_sv[plane]);
835
dec->context->bind_sampler_states(dec->context,
836
PIPE_SHADER_FRAGMENT,
837
0, 1, &dec->sampler_ycbcr);
838
}
839
vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[plane]);
840
}
841
}
842
dec->context->flush(dec->context, NULL, 0);
843
++dec->current_buffer;
844
dec->current_buffer %= 4;
845
}
846
847
static void
848
vl_mpeg12_flush(struct pipe_video_codec *decoder)
849
{
850
assert(decoder);
851
852
//Noop, for shaders it is much faster to flush everything in end_frame
853
}
854
855
static bool
856
init_pipe_state(struct vl_mpeg12_decoder *dec)
857
{
858
struct pipe_depth_stencil_alpha_state dsa;
859
struct pipe_sampler_state sampler;
860
unsigned i;
861
862
assert(dec);
863
864
memset(&dsa, 0, sizeof dsa);
865
dsa.depth_enabled = 0;
866
dsa.depth_writemask = 0;
867
dsa.depth_func = PIPE_FUNC_ALWAYS;
868
for (i = 0; i < 2; ++i) {
869
dsa.stencil[i].enabled = 0;
870
dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
871
dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
872
dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
873
dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
874
dsa.stencil[i].valuemask = 0;
875
dsa.stencil[i].writemask = 0;
876
}
877
dsa.alpha_enabled = 0;
878
dsa.alpha_func = PIPE_FUNC_ALWAYS;
879
dsa.alpha_ref_value = 0;
880
dec->dsa = dec->context->create_depth_stencil_alpha_state(dec->context, &dsa);
881
dec->context->bind_depth_stencil_alpha_state(dec->context, dec->dsa);
882
883
memset(&sampler, 0, sizeof(sampler));
884
sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
885
sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
886
sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
887
sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
888
sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
889
sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
890
sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
891
sampler.compare_func = PIPE_FUNC_ALWAYS;
892
sampler.normalized_coords = 1;
893
dec->sampler_ycbcr = dec->context->create_sampler_state(dec->context, &sampler);
894
if (!dec->sampler_ycbcr)
895
return false;
896
897
return true;
898
}
899
900
static const struct format_config*
901
find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
902
{
903
struct pipe_screen *screen;
904
unsigned i;
905
906
assert(dec);
907
908
screen = dec->context->screen;
909
910
for (i = 0; i < num_configs; ++i) {
911
if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
912
1, 1, PIPE_BIND_SAMPLER_VIEW))
913
continue;
914
915
if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
916
if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
917
1, 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
918
continue;
919
920
if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
921
1, 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
922
continue;
923
} else {
924
if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
925
1, 1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
926
continue;
927
}
928
return &configs[i];
929
}
930
931
return NULL;
932
}
933
934
static bool
935
init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
936
{
937
unsigned num_channels;
938
939
assert(dec);
940
941
dec->zscan_source_format = format_config->zscan_source_format;
942
dec->zscan_linear = vl_zscan_layout(dec->context, vl_zscan_linear, dec->blocks_per_line);
943
dec->zscan_normal = vl_zscan_layout(dec->context, vl_zscan_normal, dec->blocks_per_line);
944
dec->zscan_alternate = vl_zscan_layout(dec->context, vl_zscan_alternate, dec->blocks_per_line);
945
946
num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
947
948
if (!vl_zscan_init(&dec->zscan_y, dec->context, dec->base.width, dec->base.height,
949
dec->blocks_per_line, dec->num_blocks, num_channels))
950
return false;
951
952
if (!vl_zscan_init(&dec->zscan_c, dec->context, dec->chroma_width, dec->chroma_height,
953
dec->blocks_per_line, dec->num_blocks, num_channels))
954
return false;
955
956
return true;
957
}
958
959
static bool
960
init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
961
{
962
unsigned nr_of_idct_render_targets, max_inst;
963
enum pipe_format formats[3];
964
struct pipe_video_buffer templat;
965
966
struct pipe_sampler_view *matrix = NULL;
967
968
nr_of_idct_render_targets = dec->context->screen->get_param
969
(
970
dec->context->screen, PIPE_CAP_MAX_RENDER_TARGETS
971
);
972
973
max_inst = dec->context->screen->get_shader_param
974
(
975
dec->context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
976
);
977
978
// Just assume we need 32 inst per render target, not 100% true, but should work in most cases
979
if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
980
// more than 4 render targets usually doesn't makes any seens
981
nr_of_idct_render_targets = 4;
982
else
983
nr_of_idct_render_targets = 1;
984
985
formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
986
memset(&templat, 0, sizeof(templat));
987
templat.width = dec->base.width / 4;
988
templat.height = dec->base.height;
989
dec->idct_source = vl_video_buffer_create_ex
990
(
991
dec->context, &templat,
992
formats, 1, 1, PIPE_USAGE_DEFAULT,
993
PIPE_VIDEO_CHROMA_FORMAT_420
994
);
995
996
if (!dec->idct_source)
997
goto error_idct_source;
998
999
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
1000
memset(&templat, 0, sizeof(templat));
1001
templat.width = dec->base.width / nr_of_idct_render_targets;
1002
templat.height = dec->base.height / 4;
1003
dec->mc_source = vl_video_buffer_create_ex
1004
(
1005
dec->context, &templat,
1006
formats, nr_of_idct_render_targets, 1, PIPE_USAGE_DEFAULT,
1007
PIPE_VIDEO_CHROMA_FORMAT_420
1008
);
1009
1010
if (!dec->mc_source)
1011
goto error_mc_source;
1012
1013
if (!(matrix = vl_idct_upload_matrix(dec->context, format_config->idct_scale)))
1014
goto error_matrix;
1015
1016
if (!vl_idct_init(&dec->idct_y, dec->context, dec->base.width, dec->base.height,
1017
nr_of_idct_render_targets, matrix, matrix))
1018
goto error_y;
1019
1020
if(!vl_idct_init(&dec->idct_c, dec->context, dec->chroma_width, dec->chroma_height,
1021
nr_of_idct_render_targets, matrix, matrix))
1022
goto error_c;
1023
1024
pipe_sampler_view_reference(&matrix, NULL);
1025
1026
return true;
1027
1028
error_c:
1029
vl_idct_cleanup(&dec->idct_y);
1030
1031
error_y:
1032
pipe_sampler_view_reference(&matrix, NULL);
1033
1034
error_matrix:
1035
dec->mc_source->destroy(dec->mc_source);
1036
1037
error_mc_source:
1038
dec->idct_source->destroy(dec->idct_source);
1039
1040
error_idct_source:
1041
return false;
1042
}
1043
1044
static bool
1045
init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
1046
{
1047
enum pipe_format formats[3];
1048
struct pipe_video_buffer templat;
1049
1050
formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
1051
assert(pipe_format_to_chroma_format(formats[0]) == dec->base.chroma_format);
1052
memset(&templat, 0, sizeof(templat));
1053
templat.width = dec->base.width;
1054
templat.height = dec->base.height;
1055
dec->mc_source = vl_video_buffer_create_ex
1056
(
1057
dec->context, &templat,
1058
formats, 1, 1, PIPE_USAGE_DEFAULT,
1059
PIPE_VIDEO_CHROMA_FORMAT_420
1060
);
1061
1062
return dec->mc_source != NULL;
1063
}
1064
1065
static void
1066
mc_vert_shader_callback(void *priv, struct vl_mc *mc,
1067
struct ureg_program *shader,
1068
unsigned first_output,
1069
struct ureg_dst tex)
1070
{
1071
struct vl_mpeg12_decoder *dec = priv;
1072
struct ureg_dst o_vtex;
1073
1074
assert(priv && mc);
1075
assert(shader);
1076
1077
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1078
struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1079
vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
1080
} else {
1081
o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
1082
ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
1083
}
1084
}
1085
1086
static void
1087
mc_frag_shader_callback(void *priv, struct vl_mc *mc,
1088
struct ureg_program *shader,
1089
unsigned first_input,
1090
struct ureg_dst dst)
1091
{
1092
struct vl_mpeg12_decoder *dec = priv;
1093
struct ureg_src src, sampler;
1094
1095
assert(priv && mc);
1096
assert(shader);
1097
1098
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1099
struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1100
vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
1101
} else {
1102
src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
1103
sampler = ureg_DECL_sampler(shader, 0);
1104
ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
1105
}
1106
}
1107
1108
struct pipe_video_codec *
1109
vl_create_mpeg12_decoder(struct pipe_context *context,
1110
const struct pipe_video_codec *templat)
1111
{
1112
const unsigned block_size_pixels = VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
1113
const struct format_config *format_config;
1114
struct vl_mpeg12_decoder *dec;
1115
1116
assert(u_reduce_video_profile(templat->profile) == PIPE_VIDEO_FORMAT_MPEG12);
1117
1118
dec = CALLOC_STRUCT(vl_mpeg12_decoder);
1119
1120
if (!dec)
1121
return NULL;
1122
1123
dec->base = *templat;
1124
dec->base.context = context;
1125
dec->context = pipe_create_multimedia_context(context->screen);
1126
1127
dec->base.destroy = vl_mpeg12_destroy;
1128
dec->base.begin_frame = vl_mpeg12_begin_frame;
1129
dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
1130
dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
1131
dec->base.end_frame = vl_mpeg12_end_frame;
1132
dec->base.flush = vl_mpeg12_flush;
1133
1134
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
1135
dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
1136
dec->width_in_macroblocks = align(dec->base.width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
1137
1138
/* TODO: Implement 422, 444 */
1139
assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
1140
1141
if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
1142
dec->chroma_width = dec->base.width / 2;
1143
dec->chroma_height = dec->base.height / 2;
1144
dec->num_blocks = dec->num_blocks * 2;
1145
} else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
1146
dec->chroma_width = dec->base.width / 2;
1147
dec->chroma_height = dec->base.height;
1148
dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks;
1149
} else {
1150
dec->chroma_width = dec->base.width;
1151
dec->chroma_height = dec->base.height;
1152
dec->num_blocks = dec->num_blocks * 3;
1153
}
1154
1155
dec->quads = vl_vb_upload_quads(dec->context);
1156
dec->pos = vl_vb_upload_pos(
1157
dec->context,
1158
dec->base.width / VL_MACROBLOCK_WIDTH,
1159
dec->base.height / VL_MACROBLOCK_HEIGHT
1160
);
1161
1162
dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->context);
1163
dec->ves_mv = vl_vb_get_ves_mv(dec->context);
1164
1165
switch (templat->entrypoint) {
1166
case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
1167
format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
1168
break;
1169
1170
case PIPE_VIDEO_ENTRYPOINT_IDCT:
1171
format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
1172
break;
1173
1174
case PIPE_VIDEO_ENTRYPOINT_MC:
1175
format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
1176
break;
1177
1178
default:
1179
assert(0);
1180
FREE(dec);
1181
return NULL;
1182
}
1183
1184
if (!format_config) {
1185
FREE(dec);
1186
return NULL;
1187
}
1188
1189
if (!init_zscan(dec, format_config))
1190
goto error_zscan;
1191
1192
if (templat->entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1193
if (!init_idct(dec, format_config))
1194
goto error_sources;
1195
} else {
1196
if (!init_mc_source_widthout_idct(dec, format_config))
1197
goto error_sources;
1198
}
1199
1200
if (!vl_mc_init(&dec->mc_y, dec->context, dec->base.width, dec->base.height,
1201
VL_MACROBLOCK_HEIGHT, format_config->mc_scale,
1202
mc_vert_shader_callback, mc_frag_shader_callback, dec))
1203
goto error_mc_y;
1204
1205
// TODO
1206
if (!vl_mc_init(&dec->mc_c, dec->context, dec->base.width, dec->base.height,
1207
VL_BLOCK_HEIGHT, format_config->mc_scale,
1208
mc_vert_shader_callback, mc_frag_shader_callback, dec))
1209
goto error_mc_c;
1210
1211
if (!init_pipe_state(dec))
1212
goto error_pipe_state;
1213
1214
list_inithead(&dec->buffer_privates);
1215
1216
return &dec->base;
1217
1218
error_pipe_state:
1219
vl_mc_cleanup(&dec->mc_c);
1220
1221
error_mc_c:
1222
vl_mc_cleanup(&dec->mc_y);
1223
1224
error_mc_y:
1225
if (templat->entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1226
vl_idct_cleanup(&dec->idct_y);
1227
vl_idct_cleanup(&dec->idct_c);
1228
dec->idct_source->destroy(dec->idct_source);
1229
}
1230
dec->mc_source->destroy(dec->mc_source);
1231
1232
error_sources:
1233
vl_zscan_cleanup(&dec->zscan_y);
1234
vl_zscan_cleanup(&dec->zscan_c);
1235
1236
error_zscan:
1237
FREE(dec);
1238
return NULL;
1239
}
1240
1241