Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hrydgard
GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/HW/MediaEngine.cpp
5654 views
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "Common/Serialize/SerializeFuncs.h"
19
#include "Common/Math/SIMDHeaders.h"
20
#include "Common/StringUtils.h"
21
#include "Core/System.h"
22
#include "Core/Debugger/MemBlockInfo.h"
23
#include "Core/HW/MediaEngine.h"
24
#include "Core/MemMap.h"
25
#include "Core/Reporting.h"
26
#include "GPU/GPUState.h" // Used by TextureDecoder.h when templates get instanced
27
#include "GPU/Common/TextureDecoder.h"
28
#include "Core/HW/SimpleAudioDec.h"
29
30
#include <algorithm>
31
32
#ifdef USE_FFMPEG
33
34
extern "C" {
35
36
#include "libavcodec/avcodec.h"
37
#include "libavformat/avformat.h"
38
#include "libavutil/imgutils.h"
39
#include "libswscale/swscale.h"
40
41
#if LIBAVFORMAT_VERSION_MAJOR >= 59
42
// private libavformat api (see demux.h in ffmpeg src tree)
43
void avpriv_stream_set_need_parsing(AVStream *st, enum AVStreamParseType type);
44
#endif
45
}
46
#endif // USE_FFMPEG
47
48
#ifdef USE_FFMPEG
49
50
#include "Core/FFMPEGCompat.h"
51
52
static AVPixelFormat getSwsFormat(int pspFormat)
53
{
54
switch (pspFormat)
55
{
56
case GE_CMODE_16BIT_BGR5650:
57
return AV_PIX_FMT_BGR565LE;
58
case GE_CMODE_16BIT_ABGR5551:
59
return AV_PIX_FMT_BGR555LE;
60
case GE_CMODE_16BIT_ABGR4444:
61
return AV_PIX_FMT_BGR444LE;
62
case GE_CMODE_32BIT_ABGR8888:
63
return AV_PIX_FMT_RGBA;
64
default:
65
ERROR_LOG(Log::ME, "Unknown pixel format");
66
return (AVPixelFormat)0;
67
}
68
}
69
70
void ffmpeg_logger(void *, int level, const char *format, va_list va_args) {
71
// We're still called even if the level doesn't match.
72
if (level > av_log_get_level())
73
return;
74
75
char tmp[1024];
76
vsnprintf(tmp, sizeof(tmp), format, va_args);
77
tmp[sizeof(tmp) - 1] = '\0';
78
79
// Strip off any trailing newline.
80
size_t len = strlen(tmp);
81
if (tmp[len - 1] == '\n')
82
tmp[len - 1] = '\0';
83
84
if (!strcmp(tmp, "GHA Phase shifting")) {
85
Reporting::ReportMessage("Atrac3+: GHA phase shifting");
86
}
87
88
// Let's color the log line appropriately.
89
if (level <= AV_LOG_PANIC) {
90
ERROR_LOG(Log::ME, "FF: %s", tmp);
91
} else if (level >= AV_LOG_VERBOSE) {
92
DEBUG_LOG(Log::ME, "FF: %s", tmp);
93
} else {
94
// Downgrade some log messages we don't care about
95
if (startsWith(tmp, "No accelerated colorspace") || startsWith(tmp, "SEI type 1 size 40 truncated at 36")) {
96
VERBOSE_LOG(Log::ME, "FF: %s", tmp);
97
} else {
98
INFO_LOG(Log::ME, "FF: %s", tmp);
99
}
100
}
101
}
102
103
bool InitFFmpeg() {
104
#ifdef _DEBUG
105
av_log_set_level(AV_LOG_VERBOSE);
106
#else
107
av_log_set_level(AV_LOG_WARNING);
108
#endif
109
av_log_set_callback(&ffmpeg_logger);
110
111
return true;
112
}
113
#endif
114
115
static int getPixelFormatBytes(int pspFormat)
116
{
117
switch (pspFormat)
118
{
119
case GE_CMODE_16BIT_BGR5650:
120
case GE_CMODE_16BIT_ABGR5551:
121
case GE_CMODE_16BIT_ABGR4444:
122
return 2;
123
case GE_CMODE_32BIT_ABGR8888:
124
return 4;
125
126
default:
127
ERROR_LOG(Log::ME, "Unknown pixel format");
128
return 4;
129
}
130
}
131
132
MediaEngine::MediaEngine() {
133
m_bufSize = 0x2000;
134
135
m_mpegheaderSize = sizeof(m_mpegheader);
136
m_audioType = PSP_CODEC_AT3PLUS; // in movie, we use only AT3+ audio
137
}
138
139
MediaEngine::~MediaEngine() {
140
closeMedia();
141
}
142
143
void MediaEngine::closeMedia() {
144
closeContext();
145
delete m_pdata;
146
delete m_demux;
147
m_pdata = nullptr;
148
m_demux = nullptr;
149
AudioClose(&m_audioContext);
150
m_isVideoEnd = false;
151
}
152
153
void MediaEngine::DoState(PointerWrap &p) {
154
auto s = p.Section("MediaEngine", 1, 7);
155
if (!s)
156
return;
157
158
Do(p, m_videoStream);
159
Do(p, m_audioStream);
160
161
DoArray(p, m_mpegheader, sizeof(m_mpegheader));
162
if (s >= 4) {
163
Do(p, m_mpegheaderSize);
164
} else {
165
m_mpegheaderSize = sizeof(m_mpegheader);
166
}
167
if (s >= 5) {
168
Do(p, m_mpegheaderReadPos);
169
} else {
170
m_mpegheaderReadPos = m_mpegheaderSize;
171
}
172
if (s >= 6) {
173
Do(p, m_expectedVideoStreams);
174
} else {
175
m_expectedVideoStreams = 0;
176
}
177
178
Do(p, m_ringbuffersize);
179
180
u32 hasloadStream = m_pdata != nullptr;
181
Do(p, hasloadStream);
182
if (hasloadStream && p.mode == p.MODE_READ)
183
reloadStream();
184
#ifdef USE_FFMPEG
185
u32 hasopencontext = m_pFormatCtx != nullptr;
186
#else
187
u32 hasopencontext = false;
188
#endif
189
Do(p, hasopencontext);
190
if (m_pdata)
191
m_pdata->DoState(p);
192
if (m_demux)
193
m_demux->DoState(p);
194
195
Do(p, m_videopts);
196
if (s >= 7) {
197
Do(p, m_lastPts);
198
} else {
199
m_lastPts = m_videopts;
200
}
201
Do(p, m_audiopts);
202
203
if (s >= 2) {
204
Do(p, m_firstTimeStamp);
205
Do(p, m_lastTimeStamp);
206
}
207
208
if (hasopencontext && p.mode == p.MODE_READ) {
209
openContext(true);
210
}
211
212
Do(p, m_isVideoEnd);
213
bool noAudioDataRemoved;
214
Do(p, noAudioDataRemoved);
215
if (s >= 3) {
216
Do(p, m_audioType);
217
} else {
218
m_audioType = PSP_CODEC_AT3PLUS;
219
}
220
}
221
222
int MediaEngine::MpegReadbuffer(void *opaque, uint8_t *buf, int buf_size) {
223
MediaEngine *mpeg = (MediaEngine *)opaque;
224
225
int size = buf_size;
226
if (mpeg->m_mpegheaderReadPos < mpeg->m_mpegheaderSize) {
227
size = std::min(buf_size, mpeg->m_mpegheaderSize - mpeg->m_mpegheaderReadPos);
228
memcpy(buf, mpeg->m_mpegheader + mpeg->m_mpegheaderReadPos, size);
229
mpeg->m_mpegheaderReadPos += size;
230
} else {
231
size = mpeg->m_pdata->pop_front(buf, buf_size);
232
if (size > 0)
233
mpeg->m_decodingsize = size;
234
}
235
return size;
236
}
237
238
bool MediaEngine::SetupStreams() {
239
#ifdef USE_FFMPEG
240
const u32 magic = *(u32_le *)&m_mpegheader[0];
241
if (magic != PSMF_MAGIC) {
242
WARN_LOG_REPORT(Log::ME, "Could not setup streams, bad magic: %08x", magic);
243
return false;
244
}
245
int numStreams = *(u16_be *)&m_mpegheader[0x80];
246
if (numStreams <= 0 || numStreams > 8) {
247
// Looks crazy. Let's bail out and let FFmpeg handle it.
248
WARN_LOG_REPORT(Log::ME, "Could not setup streams, unexpected stream count: %d", numStreams);
249
return false;
250
}
251
252
// Looking good. Let's add those streams.
253
int videoStreamNum = -1;
254
for (int i = 0; i < numStreams; i++) {
255
const u8 *const currentStreamAddr = m_mpegheader + 0x82 + i * 16;
256
int streamId = currentStreamAddr[0];
257
258
// We only set video streams. We demux the audio stream separately.
259
if ((streamId & PSMF_VIDEO_STREAM_ID) == PSMF_VIDEO_STREAM_ID) {
260
++videoStreamNum;
261
addVideoStream(videoStreamNum, streamId);
262
}
263
}
264
// Add the streams to meet the expectation.
265
for (int i = videoStreamNum + 1; i < m_expectedVideoStreams; i++) {
266
addVideoStream(i);
267
}
268
#endif
269
270
return true;
271
}
272
273
bool MediaEngine::openContext(bool keepReadPos) {
274
#ifdef USE_FFMPEG
275
InitFFmpeg();
276
277
if (m_pFormatCtx || !m_pdata)
278
return false;
279
if (!keepReadPos) {
280
m_mpegheaderReadPos = 0;
281
}
282
m_decodingsize = 0;
283
284
m_bufSize = std::max(m_bufSize, m_mpegheaderSize);
285
u8 *tempbuf = (u8*)av_malloc(m_bufSize);
286
287
m_pFormatCtx = avformat_alloc_context();
288
m_pIOContext = avio_alloc_context(tempbuf, m_bufSize, 0, (void*)this, &MpegReadbuffer, nullptr, nullptr);
289
m_pFormatCtx->pb = m_pIOContext;
290
291
// Open video file
292
AVDictionary *open_opt = nullptr;
293
av_dict_set_int(&open_opt, "probesize", m_mpegheaderSize, 0);
294
if (avformat_open_input((AVFormatContext**)&m_pFormatCtx, nullptr, nullptr, &open_opt) != 0) {
295
av_dict_free(&open_opt);
296
return false;
297
}
298
av_dict_free(&open_opt);
299
300
bool usedFFMPEGFindStreamInfo = false;
301
if (!SetupStreams() || PSP_CoreParameter().compat.flags().UseFFMPEGFindStreamInfo) {
302
// Fallback to old behavior. Reads too much and corrupts when game doesn't read fast enough.
303
// SetupStreams sometimes work for newer FFmpeg 3.1+ now, but sometimes framerate is missing.
304
WARN_LOG_REPORT_ONCE(setupStreams, Log::ME, "Failed to read valid video stream data from header");
305
if (avformat_find_stream_info(m_pFormatCtx, nullptr) < 0) {
306
closeContext();
307
return false;
308
}
309
usedFFMPEGFindStreamInfo = true;
310
}
311
312
if (m_videoStream >= (int)m_pFormatCtx->nb_streams) {
313
WARN_LOG_REPORT(Log::ME, "Bad video stream %d", m_videoStream);
314
m_videoStream = -1;
315
}
316
317
if (m_videoStream == -1) {
318
// Find the first video stream
319
for (int i = 0; i < (int)m_pFormatCtx->nb_streams; i++) {
320
const AVStream *s = m_pFormatCtx->streams[i];
321
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
322
AVMediaType type = s->codecpar->codec_type;
323
#else
324
AVMediaType type = s->codec->codec_type;
325
#endif
326
if (type == AVMEDIA_TYPE_VIDEO) {
327
m_videoStream = i;
328
break;
329
}
330
}
331
if (m_videoStream == -1)
332
return false;
333
}
334
335
// Here it shouldn't be possible for m_videoStream to be invalid.
336
337
if (!setVideoStream(m_videoStream, true))
338
return false;
339
340
setVideoDim();
341
m_audioContext = CreateAudioDecoder((PSPAudioType)m_audioType);
342
m_isVideoEnd = false;
343
344
if (PSP_CoreParameter().compat.flags().UseFFMPEGFindStreamInfo && usedFFMPEGFindStreamInfo) {
345
m_mpegheaderReadPos++;
346
av_seek_frame(m_pFormatCtx, m_videoStream, 0, 0);
347
}
348
#endif // USE_FFMPEG
349
return true;
350
}
351
352
void MediaEngine::closeContext() {
353
#ifdef USE_FFMPEG
354
if (m_buffer)
355
av_free(m_buffer);
356
if (m_pFrameRGB)
357
av_frame_free(&m_pFrameRGB);
358
if (m_pFrame)
359
av_frame_free(&m_pFrame);
360
if (m_pIOContext && m_pIOContext->buffer)
361
av_free(m_pIOContext->buffer);
362
if (m_pIOContext)
363
av_free(m_pIOContext);
364
for (auto &it : m_pCodecCtxs) {
365
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
366
avcodec_free_context(&it.second);
367
#else
368
avcodec_close(it.second);
369
#endif
370
}
371
m_pCodecCtxs.clear();
372
// These are streams allocated from avformat_new_stream.
373
for (auto &it : m_codecsToClose) {
374
#if LIBAVCODEC_VERSION_MAJOR >= 62
375
avcodec_free_context(&it);
376
#else
377
avcodec_close(it);
378
#endif
379
}
380
m_codecsToClose.clear();
381
if (m_pFormatCtx)
382
avformat_close_input(&m_pFormatCtx);
383
sws_freeContext(m_sws_ctx);
384
m_sws_ctx = nullptr;
385
m_pIOContext = nullptr;
386
#endif
387
m_buffer = nullptr;
388
}
389
390
bool MediaEngine::loadStream(const u8 *buffer, int readSize, int RingbufferSize)
391
{
392
closeMedia();
393
394
m_videopts = 0;
395
m_lastPts = -1;
396
m_audiopts = 0;
397
m_ringbuffersize = RingbufferSize;
398
m_pdata = new BufferQueue(RingbufferSize + 2048);
399
m_pdata->push(buffer, readSize);
400
m_firstTimeStamp = getMpegTimeStamp(buffer + PSMF_FIRST_TIMESTAMP_OFFSET);
401
m_lastTimeStamp = getMpegTimeStamp(buffer + PSMF_LAST_TIMESTAMP_OFFSET);
402
int mpegoffset = (int)(*(s32_be*)(buffer + 8));
403
m_demux = new MpegDemux(RingbufferSize + 2048, mpegoffset);
404
m_demux->addStreamData(buffer, readSize);
405
return true;
406
}
407
408
bool MediaEngine::reloadStream()
409
{
410
return loadStream(m_mpegheader, 2048, m_ringbuffersize);
411
}
412
413
bool MediaEngine::addVideoStream(int streamNum, int streamId) {
414
#ifdef USE_FFMPEG
415
if (m_pFormatCtx) {
416
// no need to add an existing stream.
417
if ((u32)streamNum < m_pFormatCtx->nb_streams)
418
return true;
419
AVCodec *h264_codec = avcodec_find_decoder(AV_CODEC_ID_H264);
420
if (!h264_codec)
421
return false;
422
AVStream *stream = avformat_new_stream(m_pFormatCtx, h264_codec);
423
if (stream) {
424
// Reference ISO/IEC 13818-1.
425
if (streamId == -1)
426
streamId = PSMF_VIDEO_STREAM_ID | streamNum;
427
428
stream->id = 0x00000100 | streamId;
429
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
430
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
431
stream->codecpar->codec_id = AV_CODEC_ID_H264;
432
#endif
433
#if LIBAVFORMAT_VERSION_MAJOR >= 59
434
avpriv_stream_set_need_parsing(stream, AVSTREAM_PARSE_FULL);
435
#else
436
stream->request_probe = 0;
437
stream->need_parsing = AVSTREAM_PARSE_FULL;
438
#endif
439
// We could set the width here, but we don't need to.
440
if (streamNum >= m_expectedVideoStreams) {
441
++m_expectedVideoStreams;
442
}
443
444
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(59, 16, 100)
445
AVCodec *codec = avcodec_find_decoder(stream->codecpar->codec_id);
446
AVCodecContext *codecCtx = avcodec_alloc_context3(codec);
447
#else
448
AVCodecContext *codecCtx = stream->codec;
449
#endif
450
m_codecsToClose.push_back(codecCtx);
451
return true;
452
}
453
}
454
#endif
455
if (streamNum >= m_expectedVideoStreams) {
456
++m_expectedVideoStreams;
457
}
458
return false;
459
}
460
461
int MediaEngine::addStreamData(const u8 *buffer, int addSize) {
462
int size = addSize;
463
if (size > 0 && m_pdata) {
464
if (!m_pdata->push(buffer, size))
465
size = 0;
466
if (m_demux) {
467
m_demux->addStreamData(buffer, addSize);
468
}
469
#ifdef USE_FFMPEG
470
if (!m_pFormatCtx && m_pdata->getQueueSize() >= 2048) {
471
m_mpegheaderSize = m_pdata->get_front(m_mpegheader, sizeof(m_mpegheader));
472
int streamOffset = (int)(*(s32_be *)(m_mpegheader + 8));
473
if (streamOffset <= m_mpegheaderSize) {
474
m_mpegheaderSize = streamOffset;
475
m_pdata->pop_front(0, m_mpegheaderSize);
476
openContext();
477
}
478
}
479
#endif // USE_FFMPEG
480
481
// We added data, so... not the end anymore?
482
m_isVideoEnd = false;
483
}
484
return size;
485
}
486
487
bool MediaEngine::seekTo(s64 timestamp, int videoPixelMode) {
488
if (timestamp <= 0) {
489
return true;
490
}
491
492
// Just doing it the not so great way to be sure audio is in sync.
493
int timeout = 1000;
494
while (getVideoTimeStamp() < timestamp - 3003) {
495
if (getAudioTimeStamp() < getVideoTimeStamp() - 4180 * 2) {
496
getNextAudioFrame(NULL, NULL, NULL);
497
}
498
if (!stepVideo(videoPixelMode, true)) {
499
return false;
500
}
501
if (--timeout <= 0) {
502
return true;
503
}
504
}
505
506
while (getAudioTimeStamp() < getVideoTimeStamp() - 4180 * 2) {
507
if (getNextAudioFrame(NULL, NULL, NULL) == 0) {
508
return false;
509
}
510
if (--timeout <= 0) {
511
return true;
512
}
513
}
514
515
return true;
516
}
517
518
bool MediaEngine::setVideoStream(int streamNum, bool force) {
519
if (m_videoStream == streamNum && !force) {
520
// Yay, nothing to do.
521
return true;
522
}
523
524
#ifdef USE_FFMPEG
525
if (m_pFormatCtx && m_pCodecCtxs.find(streamNum) == m_pCodecCtxs.end()) {
526
// Get a pointer to the codec context for the video stream
527
if ((u32)streamNum >= m_pFormatCtx->nb_streams) {
528
return false;
529
}
530
531
AVStream *stream = m_pFormatCtx->streams[streamNum];
532
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
533
AVCodec *pCodec = avcodec_find_decoder(stream->codecpar->codec_id);
534
if (!pCodec) {
535
WARN_LOG_REPORT(Log::ME, "Could not find decoder for %d", (int)stream->codecpar->codec_id);
536
return false;
537
}
538
AVCodecContext *m_pCodecCtx = avcodec_alloc_context3(pCodec);
539
int paramResult = avcodec_parameters_to_context(m_pCodecCtx, stream->codecpar);
540
if (paramResult < 0) {
541
WARN_LOG_REPORT(Log::ME, "Failed to prepare context parameters: %08x", paramResult);
542
return false;
543
}
544
#else
545
AVCodecContext *m_pCodecCtx = stream->codec;
546
// Find the decoder for the video stream
547
AVCodec *pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id);
548
if (pCodec == nullptr) {
549
return false;
550
}
551
#endif
552
553
m_pCodecCtx->flags |= AV_CODEC_FLAG_OUTPUT_CORRUPT | AV_CODEC_FLAG_LOW_DELAY;
554
555
AVDictionary *opt = nullptr;
556
// Allow ffmpeg to use any number of threads it wants. Without this, it doesn't use threads.
557
av_dict_set(&opt, "threads", "0", 0);
558
int openResult = avcodec_open2(m_pCodecCtx, pCodec, &opt);
559
av_dict_free(&opt);
560
if (openResult < 0) {
561
return false;
562
}
563
564
m_pCodecCtxs[streamNum] = m_pCodecCtx;
565
}
566
#endif
567
m_videoStream = streamNum;
568
569
return true;
570
}
571
572
bool MediaEngine::setVideoDim(int width, int height)
573
{
574
#ifdef USE_FFMPEG
575
auto codecIter = m_pCodecCtxs.find(m_videoStream);
576
if (codecIter == m_pCodecCtxs.end())
577
return false;
578
AVCodecContext *m_pCodecCtx = codecIter->second;
579
580
if (width == 0 && height == 0)
581
{
582
// use the orignal video size
583
m_desWidth = m_pCodecCtx->width;
584
m_desHeight = m_pCodecCtx->height;
585
}
586
else
587
{
588
m_desWidth = width;
589
m_desHeight = height;
590
}
591
592
// Allocate video frame
593
if (!m_pFrame) {
594
m_pFrame = av_frame_alloc();
595
}
596
597
sws_freeContext(m_sws_ctx);
598
m_sws_ctx = nullptr;
599
m_sws_fmt = -1;
600
601
if (m_desWidth == 0 || m_desHeight == 0) {
602
// Can't setup SWS yet, so stop for now.
603
return false;
604
}
605
606
updateSwsFormat(GE_CMODE_32BIT_ABGR8888);
607
608
// Allocate video frame for RGB24
609
m_pFrameRGB = av_frame_alloc();
610
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
611
int numBytes = av_image_get_buffer_size((AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight, 1);
612
#else
613
int numBytes = avpicture_get_size((AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight);
614
#endif
615
m_buffer = (u8*)av_malloc(numBytes * sizeof(uint8_t));
616
617
// Assign appropriate parts of buffer to image planes in m_pFrameRGB
618
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
619
av_image_fill_arrays(m_pFrameRGB->data, m_pFrameRGB->linesize, m_buffer, (AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight, 1);
620
#else
621
avpicture_fill((AVPicture *)m_pFrameRGB, m_buffer, (AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight);
622
#endif
623
#endif // USE_FFMPEG
624
return true;
625
}
626
627
void MediaEngine::updateSwsFormat(int videoPixelMode) {
628
#ifdef USE_FFMPEG
629
auto codecIter = m_pCodecCtxs.find(m_videoStream);
630
AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;
631
632
AVPixelFormat swsDesired = getSwsFormat(videoPixelMode);
633
if (swsDesired != m_sws_fmt && m_pCodecCtx != 0) {
634
m_sws_fmt = swsDesired;
635
m_sws_ctx = sws_getCachedContext
636
(
637
m_sws_ctx,
638
m_pCodecCtx->width,
639
m_pCodecCtx->height,
640
m_pCodecCtx->pix_fmt,
641
m_desWidth,
642
m_desHeight,
643
(AVPixelFormat)m_sws_fmt,
644
SWS_BILINEAR,
645
NULL,
646
NULL,
647
NULL
648
);
649
650
int *inv_coefficients;
651
int *coefficients;
652
int srcRange, dstRange;
653
int brightness, contrast, saturation;
654
655
if (sws_getColorspaceDetails(m_sws_ctx, &inv_coefficients, &srcRange, &coefficients, &dstRange, &brightness, &contrast, &saturation) != -1) {
656
srcRange = 0;
657
dstRange = 0;
658
sws_setColorspaceDetails(m_sws_ctx, inv_coefficients, srcRange, coefficients, dstRange, brightness, contrast, saturation);
659
}
660
}
661
#endif
662
}
663
664
bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
665
#ifdef USE_FFMPEG
666
auto codecIter = m_pCodecCtxs.find(m_videoStream);
667
AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;
668
669
if (!m_pFormatCtx)
670
return false;
671
if (!m_pCodecCtx)
672
return false;
673
if (!m_pFrame)
674
return false;
675
676
AVPacket packet;
677
av_init_packet(&packet);
678
int frameFinished;
679
bool bGetFrame = false;
680
while (!bGetFrame) {
681
bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
682
// Even if we've read all frames, some may have been re-ordered frames at the end.
683
// Still need to decode those, so keep calling avcodec_decode_video2() / avcodec_receive_frame().
684
if (dataEnd || packet.stream_index == m_videoStream) {
685
// avcodec_decode_video2() / avcodec_send_packet() gives us the re-ordered frames with a NULL packet.
686
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
687
if (dataEnd)
688
av_packet_unref(&packet);
689
#else
690
if (dataEnd)
691
av_free_packet(&packet);
692
#endif
693
694
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 48, 101)
695
if (packet.size != 0)
696
avcodec_send_packet(m_pCodecCtx, &packet);
697
int result = avcodec_receive_frame(m_pCodecCtx, m_pFrame);
698
if (result == 0) {
699
result = 1;
700
frameFinished = 1;
701
} else if (result == AVERROR(EAGAIN)) {
702
result = 0;
703
frameFinished = 0;
704
} else {
705
frameFinished = 0;
706
}
707
#else
708
int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
709
#endif
710
if (frameFinished) {
711
if (!m_pFrameRGB) {
712
setVideoDim();
713
}
714
if (m_pFrameRGB && !skipFrame) {
715
updateSwsFormat(videoPixelMode);
716
// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
717
// Update the linesize for the new format too. We started with the largest size, so it should fit.
718
m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;
719
720
sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
721
m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
722
}
723
724
#if LIBAVUTIL_VERSION_MAJOR >= 59
725
int64_t bestPts = m_pFrame->best_effort_timestamp;
726
int64_t ptsDuration = m_pFrame->duration;
727
#elif LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(55, 58, 100)
728
int64_t bestPts = m_pFrame->best_effort_timestamp;
729
int64_t ptsDuration = m_pFrame->pkt_duration;
730
#else
731
int64_t bestPts = av_frame_get_best_effort_timestamp(m_pFrame);
732
int64_t ptsDuration = av_frame_get_pkt_duration(m_pFrame);
733
#endif
734
if (ptsDuration == 0) {
735
if (m_lastPts == bestPts - m_firstTimeStamp || bestPts == AV_NOPTS_VALUE) {
736
// TODO: Assuming 29.97 if missing.
737
m_videopts += 3003;
738
} else {
739
m_videopts = bestPts - m_firstTimeStamp;
740
m_lastPts = m_videopts;
741
}
742
} else if (bestPts != AV_NOPTS_VALUE) {
743
m_videopts = bestPts + ptsDuration - m_firstTimeStamp;
744
m_lastPts = m_videopts;
745
} else {
746
m_videopts += ptsDuration;
747
m_lastPts = m_videopts;
748
}
749
bGetFrame = true;
750
}
751
if (result <= 0 && dataEnd) {
752
// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
753
// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
754
m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
755
if (m_isVideoEnd)
756
m_decodingsize = 0;
757
break;
758
}
759
}
760
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
761
av_packet_unref(&packet);
762
#else
763
av_free_packet(&packet);
764
#endif
765
}
766
return bGetFrame;
767
#else
768
// If video engine is not available, just add to the timestamp at least.
769
m_videopts += 3003;
770
return true;
771
#endif // USE_FFMPEG
772
}
773
774
// Helpers that null out alpha (which seems to be the case on the PSP.)
775
// Some games depend on this, for example Sword Art Online (doesn't clear A's from buffer.)
776
inline void writeVideoLineRGBA(void *destp, const void *srcp, int width) {
777
// TODO: Use SSE/NEON, investigate why AV_PIX_FMT_RGB0 does not work.
778
u32_le *dest = (u32_le *)destp;
779
const u32_le *src = (u32_le *)srcp;
780
781
int count = width;
782
783
#if PPSSPP_ARCH(SSE2)
784
__m128i mask = _mm_set1_epi32(0x00FFFFFF);
785
while (count >= 8) {
786
__m128i pixels1 = _mm_and_si128(_mm_loadu_si128((const __m128i *)src), mask);
787
__m128i pixels2 = _mm_and_si128(_mm_loadu_si128((const __m128i *)src + 1), mask);
788
_mm_storeu_si128((__m128i *)dest, pixels1);
789
_mm_storeu_si128((__m128i *)dest + 1, pixels2);
790
src += 8;
791
dest += 8;
792
count -= 8;
793
}
794
#elif PPSSPP_ARCH(ARM_NEON)
795
uint32x4_t mask = vdupq_n_u32(0x00FFFFFF);
796
while (count >= 8) {
797
uint32x4_t pixels1 = vandq_u32(vld1q_u32(src), mask);
798
uint32x4_t pixels2 = vandq_u32(vld1q_u32(src + 4), mask);
799
vst1q_u32(dest, pixels1);
800
vst1q_u32(dest + 4, pixels2);
801
src += 8;
802
dest += 8;
803
count -= 8;
804
}
805
#endif
806
const u32 mask32 = 0x00FFFFFF;
807
DO_NOT_VECTORIZE_LOOP
808
while (count--) {
809
*dest++ = *src++ & mask32;
810
}
811
}
812
813
inline void writeVideoLineABGR5650(void *destp, const void *srcp, int width) {
814
memcpy(destp, srcp, width * sizeof(u16));
815
}
816
817
inline void writeVideoLineABGR5551(void *destp, const void *srcp, int width) {
818
// TODO: Use SSE/NEON.
819
u16_le *dest = (u16_le *)destp;
820
const u16_le *src = (u16_le *)srcp;
821
822
const u16 mask = 0x7FFF;
823
for (int i = 0; i < width; ++i) {
824
dest[i] = src[i] & mask;
825
}
826
}
827
828
inline void writeVideoLineABGR4444(void *destp, const void *srcp, int width) {
829
// TODO: Use SSE/NEON.
830
u16_le *dest = (u16_le *)destp;
831
const u16_le *src = (u16_le *)srcp;
832
833
const u16 mask = 0x0FFF;
834
for (int i = 0; i < width; ++i) {
835
dest[i] = src[i] & mask;
836
}
837
}
838
839
int MediaEngine::writeVideoImage(u32 bufferPtr, int frameWidth, int videoPixelMode) {
840
int videoLineSize = 0;
841
switch (videoPixelMode) {
842
case GE_CMODE_32BIT_ABGR8888:
843
videoLineSize = frameWidth * sizeof(u32);
844
break;
845
case GE_CMODE_16BIT_BGR5650:
846
case GE_CMODE_16BIT_ABGR5551:
847
case GE_CMODE_16BIT_ABGR4444:
848
videoLineSize = frameWidth * sizeof(u16);
849
break;
850
}
851
852
int videoImageSize = videoLineSize * m_desHeight;
853
854
if (!Memory::IsValidRange(bufferPtr, videoImageSize) || frameWidth > 2048) {
855
// Clearly invalid values. Let's just not.
856
ERROR_LOG_REPORT(Log::ME, "Ignoring invalid video decode address %08x/%x", bufferPtr, frameWidth);
857
return 0;
858
}
859
860
u8 *buffer = Memory::GetPointerWriteUnchecked(bufferPtr);
861
862
#ifdef USE_FFMPEG
863
if (!m_pFrame || !m_pFrameRGB)
864
return 0;
865
866
// lock the image size
867
int height = m_desHeight;
868
int width = m_desWidth;
869
u8 *imgbuf = buffer;
870
const u8 *data = m_pFrameRGB->data[0];
871
872
bool swizzle = Memory::IsVRAMAddress(bufferPtr) && (bufferPtr & 0x00200000) == 0x00200000;
873
if (swizzle) {
874
imgbuf = new u8[videoImageSize];
875
}
876
877
switch (videoPixelMode) {
878
case GE_CMODE_32BIT_ABGR8888:
879
for (int y = 0; y < height; y++) {
880
writeVideoLineRGBA(imgbuf + videoLineSize * y, data, width);
881
data += width * sizeof(u32);
882
}
883
break;
884
885
case GE_CMODE_16BIT_BGR5650:
886
for (int y = 0; y < height; y++) {
887
writeVideoLineABGR5650(imgbuf + videoLineSize * y, data, width);
888
data += width * sizeof(u16);
889
}
890
break;
891
892
case GE_CMODE_16BIT_ABGR5551:
893
for (int y = 0; y < height; y++) {
894
writeVideoLineABGR5551(imgbuf + videoLineSize * y, data, width);
895
data += width * sizeof(u16);
896
}
897
break;
898
899
case GE_CMODE_16BIT_ABGR4444:
900
for (int y = 0; y < height; y++) {
901
writeVideoLineABGR4444(imgbuf + videoLineSize * y, data, width);
902
data += width * sizeof(u16);
903
}
904
break;
905
906
default:
907
ERROR_LOG_REPORT(Log::ME, "Unsupported video pixel format %d", videoPixelMode);
908
break;
909
}
910
911
if (swizzle) {
912
const int bxc = videoLineSize / 16;
913
int byc = (height + 7) / 8;
914
if (byc == 0)
915
byc = 1;
916
917
DoSwizzleTex16((const u32 *)imgbuf, buffer, bxc, byc, videoLineSize);
918
delete [] imgbuf;
919
}
920
921
NotifyMemInfo(MemBlockFlags::WRITE, bufferPtr, videoImageSize, "VideoDecode");
922
923
return videoImageSize;
924
#endif // USE_FFMPEG
925
return 0;
926
}
927
928
int MediaEngine::writeVideoImageWithRange(u32 bufferPtr, int frameWidth, int videoPixelMode,
929
int xpos, int ypos, int width, int height) {
930
int videoLineSize = 0;
931
switch (videoPixelMode) {
932
case GE_CMODE_32BIT_ABGR8888:
933
videoLineSize = frameWidth * sizeof(u32);
934
break;
935
case GE_CMODE_16BIT_BGR5650:
936
case GE_CMODE_16BIT_ABGR5551:
937
case GE_CMODE_16BIT_ABGR4444:
938
videoLineSize = frameWidth * sizeof(u16);
939
break;
940
}
941
int videoImageSize = videoLineSize * height;
942
943
if (!Memory::IsValidRange(bufferPtr, videoImageSize) || frameWidth > 2048) {
944
// Clearly invalid values. Let's just not.
945
ERROR_LOG_REPORT(Log::ME, "Ignoring invalid video decode address %08x/%x", bufferPtr, frameWidth);
946
return 0;
947
}
948
949
u8 *buffer = Memory::GetPointerWriteUnchecked(bufferPtr);
950
951
#ifdef USE_FFMPEG
952
if (!m_pFrame || !m_pFrameRGB)
953
return 0;
954
955
// lock the image size
956
u8 *imgbuf = buffer;
957
const u8 *data = m_pFrameRGB->data[0];
958
959
bool swizzle = Memory::IsVRAMAddress(bufferPtr) && (bufferPtr & 0x00200000) == 0x00200000;
960
if (swizzle) {
961
imgbuf = new u8[videoImageSize];
962
}
963
964
if (width > m_desWidth - xpos)
965
width = m_desWidth - xpos;
966
if (height > m_desHeight - ypos)
967
height = m_desHeight - ypos;
968
969
switch (videoPixelMode) {
970
case GE_CMODE_32BIT_ABGR8888:
971
data += (ypos * m_desWidth + xpos) * sizeof(u32);
972
for (int y = 0; y < height; y++) {
973
writeVideoLineRGBA(imgbuf, data, width);
974
data += m_desWidth * sizeof(u32);
975
imgbuf += videoLineSize;
976
}
977
break;
978
979
case GE_CMODE_16BIT_BGR5650:
980
data += (ypos * m_desWidth + xpos) * sizeof(u16);
981
for (int y = 0; y < height; y++) {
982
writeVideoLineABGR5650(imgbuf, data, width);
983
data += m_desWidth * sizeof(u16);
984
imgbuf += videoLineSize;
985
}
986
break;
987
988
case GE_CMODE_16BIT_ABGR5551:
989
data += (ypos * m_desWidth + xpos) * sizeof(u16);
990
for (int y = 0; y < height; y++) {
991
writeVideoLineABGR5551(imgbuf, data, width);
992
data += m_desWidth * sizeof(u16);
993
imgbuf += videoLineSize;
994
}
995
break;
996
997
case GE_CMODE_16BIT_ABGR4444:
998
data += (ypos * m_desWidth + xpos) * sizeof(u16);
999
for (int y = 0; y < height; y++) {
1000
writeVideoLineABGR4444(imgbuf, data, width);
1001
data += m_desWidth * sizeof(u16);
1002
imgbuf += videoLineSize;
1003
}
1004
break;
1005
1006
default:
1007
ERROR_LOG_REPORT(Log::ME, "Unsupported video pixel format %d", videoPixelMode);
1008
break;
1009
}
1010
1011
if (swizzle) {
1012
WARN_LOG_REPORT_ONCE(vidswizzle, Log::ME, "Swizzling Video with range");
1013
1014
const int bxc = videoLineSize / 16;
1015
int byc = (height + 7) / 8;
1016
if (byc == 0)
1017
byc = 1;
1018
1019
DoSwizzleTex16((const u32 *)imgbuf, buffer, bxc, byc, videoLineSize);
1020
delete [] imgbuf;
1021
}
1022
NotifyMemInfo(MemBlockFlags::WRITE, bufferPtr, videoImageSize, "VideoDecodeRange");
1023
1024
return videoImageSize;
1025
#endif // USE_FFMPEG
1026
return 0;
1027
}
1028
1029
u8 *MediaEngine::getFrameImage() {
1030
#ifdef USE_FFMPEG
1031
return m_pFrameRGB->data[0];
1032
#else
1033
return nullptr;
1034
#endif
1035
}
1036
1037
int MediaEngine::getRemainSize() {
1038
if (!m_pdata)
1039
return 0;
1040
return std::max(m_pdata->getRemainSize() - m_decodingsize - 2048, 0);
1041
}
1042
1043
int MediaEngine::getAudioRemainSize() {
1044
if (!m_demux) {
1045
// No audio, so it can't be full, return video instead.
1046
return getRemainSize();
1047
}
1048
1049
return m_demux->getRemainSize();
1050
}
1051
1052
int MediaEngine::getNextAudioFrame(u8 **buf, int *headerCode1, int *headerCode2) {
1053
// When getting a frame, increment pts
1054
m_audiopts += 4180;
1055
1056
// Demux now (rather than on add data) so that we select the right stream.
1057
m_demux->demux(m_audioStream);
1058
1059
s64 pts = 0;
1060
int result = m_demux->getNextAudioFrame(buf, headerCode1, headerCode2, &pts);
1061
if (pts != 0) {
1062
// m_audiopts is supposed to be after the returned frame.
1063
m_audiopts = pts - m_firstTimeStamp + 4180;
1064
}
1065
return result;
1066
}
1067
1068
int MediaEngine::getAudioSamples(u32 bufferPtr) {
1069
int16_t *buffer = (int16_t *)Memory::GetPointerWriteRange(bufferPtr, 8192);
1070
if (buffer == nullptr) {
1071
ERROR_LOG_REPORT(Log::ME, "Ignoring bad audio decode address %08x during video playback", bufferPtr);
1072
}
1073
if (!m_demux) {
1074
return 0;
1075
}
1076
1077
u8 *audioFrame = nullptr;
1078
int headerCode1, headerCode2;
1079
int frameSize = getNextAudioFrame(&audioFrame, &headerCode1, &headerCode2);
1080
if (frameSize == 0) {
1081
return 0;
1082
}
1083
int outSamples = 0;
1084
1085
if (m_audioContext != nullptr) {
1086
if (headerCode1 == 0x24) {
1087
// This means mono audio - tell the decoder to expect it before the first frame.
1088
// Note that it will always send us back stereo audio.
1089
m_audioContext->SetChannels(1);
1090
}
1091
1092
int inbytesConsumed = 0;
1093
if (!m_audioContext->Decode(audioFrame, frameSize, &inbytesConsumed, 2, buffer, &outSamples)) {
1094
ERROR_LOG(Log::ME, "Audio (%s) decode failed during video playback", GetCodecName(m_audioType));
1095
}
1096
int outBytes = outSamples * sizeof(int16_t) * 2;
1097
1098
NotifyMemInfo(MemBlockFlags::WRITE, bufferPtr, outBytes, "VideoDecodeAudio");
1099
}
1100
1101
return 0x2000;
1102
}
1103
1104
bool MediaEngine::IsNoAudioData() {
1105
if (!m_demux) {
1106
return true;
1107
}
1108
1109
// Let's double check. Here should be a safe enough place to demux.
1110
m_demux->demux(m_audioStream);
1111
return !m_demux->hasNextAudioFrame(NULL, NULL, NULL, NULL);
1112
}
1113
1114
bool MediaEngine::IsActuallyPlayingAudio() {
1115
return getAudioTimeStamp() >= 0;
1116
}
1117
1118
s64 MediaEngine::getVideoTimeStamp() {
1119
return m_videopts;
1120
}
1121
1122
s64 MediaEngine::getAudioTimeStamp() {
1123
return m_demux ? m_audiopts - 4180 : -1;
1124
}
1125
1126
s64 MediaEngine::getLastTimeStamp() {
1127
if (!m_pdata)
1128
return 0;
1129
return m_lastTimeStamp - m_firstTimeStamp;
1130
}
1131
1132