CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Core/HW/MediaEngine.cpp
Views: 1401
1
// Copyright (c) 2012- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "Common/Serialize/SerializeFuncs.h"
19
#include "Common/Math/CrossSIMD.h"
20
#include "Core/Config.h"
21
#include "Core/Core.h"
22
#include "Core/Debugger/MemBlockInfo.h"
23
#include "Core/HW/MediaEngine.h"
24
#include "Core/MemMap.h"
25
#include "Core/MIPS/MIPS.h"
26
#include "Core/Reporting.h"
27
#include "GPU/GPUState.h" // Used by TextureDecoder.h when templates get instanced
28
#include "GPU/Common/TextureDecoder.h"
29
#include "GPU/GPUInterface.h"
30
#include "Core/HW/SimpleAudioDec.h"
31
32
#include <algorithm>
33
34
#ifdef _M_SSE
35
#include <emmintrin.h>
36
#endif
37
38
#if PPSSPP_ARCH(ARM_NEON)
39
#if defined(_MSC_VER) && PPSSPP_ARCH(ARM64)
40
#include <arm64_neon.h>
41
#else
42
#include <arm_neon.h>
43
#endif
44
#endif
45
46
#ifdef USE_FFMPEG
47
48
extern "C" {
49
50
#include "libavcodec/avcodec.h"
51
#include "libavformat/avformat.h"
52
#include "libavutil/imgutils.h"
53
#include "libswscale/swscale.h"
54
55
}
56
#endif // USE_FFMPEG
57
58
#ifdef USE_FFMPEG
59
60
#include "Core/FFMPEGCompat.h"
61
62
static AVPixelFormat getSwsFormat(int pspFormat)
63
{
64
switch (pspFormat)
65
{
66
case GE_CMODE_16BIT_BGR5650:
67
return AV_PIX_FMT_BGR565LE;
68
case GE_CMODE_16BIT_ABGR5551:
69
return AV_PIX_FMT_BGR555LE;
70
case GE_CMODE_16BIT_ABGR4444:
71
return AV_PIX_FMT_BGR444LE;
72
case GE_CMODE_32BIT_ABGR8888:
73
return AV_PIX_FMT_RGBA;
74
default:
75
ERROR_LOG(Log::ME, "Unknown pixel format");
76
return (AVPixelFormat)0;
77
}
78
}
79
80
void ffmpeg_logger(void *, int level, const char *format, va_list va_args) {
81
// We're still called even if the level doesn't match.
82
if (level > av_log_get_level())
83
return;
84
85
char tmp[1024];
86
vsnprintf(tmp, sizeof(tmp), format, va_args);
87
tmp[sizeof(tmp) - 1] = '\0';
88
89
// Strip off any trailing newline.
90
size_t len = strlen(tmp);
91
if (tmp[len - 1] == '\n')
92
tmp[len - 1] = '\0';
93
94
if (!strcmp(tmp, "GHA Phase shifting")) {
95
Reporting::ReportMessage("Atrac3+: GHA phase shifting");
96
}
97
98
// Let's color the log line appropriately.
99
if (level <= AV_LOG_PANIC) {
100
ERROR_LOG(Log::ME, "FF: %s", tmp);
101
} else if (level >= AV_LOG_VERBOSE) {
102
DEBUG_LOG(Log::ME, "FF: %s", tmp);
103
} else {
104
INFO_LOG(Log::ME, "FF: %s", tmp);
105
}
106
}
107
108
bool InitFFmpeg() {
109
#ifdef _DEBUG
110
av_log_set_level(AV_LOG_VERBOSE);
111
#else
112
av_log_set_level(AV_LOG_WARNING);
113
#endif
114
av_log_set_callback(&ffmpeg_logger);
115
116
return true;
117
}
118
#endif
119
120
static int getPixelFormatBytes(int pspFormat)
121
{
122
switch (pspFormat)
123
{
124
case GE_CMODE_16BIT_BGR5650:
125
case GE_CMODE_16BIT_ABGR5551:
126
case GE_CMODE_16BIT_ABGR4444:
127
return 2;
128
case GE_CMODE_32BIT_ABGR8888:
129
return 4;
130
131
default:
132
ERROR_LOG(Log::ME, "Unknown pixel format");
133
return 4;
134
}
135
}
136
137
MediaEngine::MediaEngine() {
138
m_bufSize = 0x2000;
139
140
m_mpegheaderSize = sizeof(m_mpegheader);
141
m_audioType = PSP_CODEC_AT3PLUS; // in movie, we use only AT3+ audio
142
}
143
144
MediaEngine::~MediaEngine() {
145
closeMedia();
146
}
147
148
void MediaEngine::closeMedia() {
149
closeContext();
150
delete m_pdata;
151
delete m_demux;
152
m_pdata = nullptr;
153
m_demux = nullptr;
154
AudioClose(&m_audioContext);
155
m_isVideoEnd = false;
156
}
157
158
void MediaEngine::DoState(PointerWrap &p) {
159
auto s = p.Section("MediaEngine", 1, 7);
160
if (!s)
161
return;
162
163
Do(p, m_videoStream);
164
Do(p, m_audioStream);
165
166
DoArray(p, m_mpegheader, sizeof(m_mpegheader));
167
if (s >= 4) {
168
Do(p, m_mpegheaderSize);
169
} else {
170
m_mpegheaderSize = sizeof(m_mpegheader);
171
}
172
if (s >= 5) {
173
Do(p, m_mpegheaderReadPos);
174
} else {
175
m_mpegheaderReadPos = m_mpegheaderSize;
176
}
177
if (s >= 6) {
178
Do(p, m_expectedVideoStreams);
179
} else {
180
m_expectedVideoStreams = 0;
181
}
182
183
Do(p, m_ringbuffersize);
184
185
u32 hasloadStream = m_pdata != nullptr;
186
Do(p, hasloadStream);
187
if (hasloadStream && p.mode == p.MODE_READ)
188
reloadStream();
189
#ifdef USE_FFMPEG
190
u32 hasopencontext = m_pFormatCtx != nullptr;
191
#else
192
u32 hasopencontext = false;
193
#endif
194
Do(p, hasopencontext);
195
if (m_pdata)
196
m_pdata->DoState(p);
197
if (m_demux)
198
m_demux->DoState(p);
199
200
Do(p, m_videopts);
201
if (s >= 7) {
202
Do(p, m_lastPts);
203
} else {
204
m_lastPts = m_videopts;
205
}
206
Do(p, m_audiopts);
207
208
if (s >= 2) {
209
Do(p, m_firstTimeStamp);
210
Do(p, m_lastTimeStamp);
211
}
212
213
if (hasopencontext && p.mode == p.MODE_READ) {
214
openContext(true);
215
}
216
217
Do(p, m_isVideoEnd);
218
bool noAudioDataRemoved;
219
Do(p, noAudioDataRemoved);
220
if (s >= 3) {
221
Do(p, m_audioType);
222
} else {
223
m_audioType = PSP_CODEC_AT3PLUS;
224
}
225
}
226
227
int MediaEngine::MpegReadbuffer(void *opaque, uint8_t *buf, int buf_size) {
228
MediaEngine *mpeg = (MediaEngine *)opaque;
229
230
int size = buf_size;
231
if (mpeg->m_mpegheaderReadPos < mpeg->m_mpegheaderSize) {
232
size = std::min(buf_size, mpeg->m_mpegheaderSize - mpeg->m_mpegheaderReadPos);
233
memcpy(buf, mpeg->m_mpegheader + mpeg->m_mpegheaderReadPos, size);
234
mpeg->m_mpegheaderReadPos += size;
235
} else {
236
size = mpeg->m_pdata->pop_front(buf, buf_size);
237
if (size > 0)
238
mpeg->m_decodingsize = size;
239
}
240
return size;
241
}
242
243
bool MediaEngine::SetupStreams() {
244
#ifdef USE_FFMPEG
245
const u32 magic = *(u32_le *)&m_mpegheader[0];
246
if (magic != PSMF_MAGIC) {
247
WARN_LOG_REPORT(Log::ME, "Could not setup streams, bad magic: %08x", magic);
248
return false;
249
}
250
int numStreams = *(u16_be *)&m_mpegheader[0x80];
251
if (numStreams <= 0 || numStreams > 8) {
252
// Looks crazy. Let's bail out and let FFmpeg handle it.
253
WARN_LOG_REPORT(Log::ME, "Could not setup streams, unexpected stream count: %d", numStreams);
254
return false;
255
}
256
257
// Looking good. Let's add those streams.
258
int videoStreamNum = -1;
259
for (int i = 0; i < numStreams; i++) {
260
const u8 *const currentStreamAddr = m_mpegheader + 0x82 + i * 16;
261
int streamId = currentStreamAddr[0];
262
263
// We only set video streams. We demux the audio stream separately.
264
if ((streamId & PSMF_VIDEO_STREAM_ID) == PSMF_VIDEO_STREAM_ID) {
265
++videoStreamNum;
266
addVideoStream(videoStreamNum, streamId);
267
}
268
}
269
// Add the streams to meet the expectation.
270
for (int i = videoStreamNum + 1; i < m_expectedVideoStreams; i++) {
271
addVideoStream(i);
272
}
273
#endif
274
275
return true;
276
}
277
278
bool MediaEngine::openContext(bool keepReadPos) {
279
#ifdef USE_FFMPEG
280
InitFFmpeg();
281
282
if (m_pFormatCtx || !m_pdata)
283
return false;
284
if (!keepReadPos) {
285
m_mpegheaderReadPos = 0;
286
}
287
m_decodingsize = 0;
288
289
m_bufSize = std::max(m_bufSize, m_mpegheaderSize);
290
u8 *tempbuf = (u8*)av_malloc(m_bufSize);
291
292
m_pFormatCtx = avformat_alloc_context();
293
m_pIOContext = avio_alloc_context(tempbuf, m_bufSize, 0, (void*)this, &MpegReadbuffer, nullptr, nullptr);
294
m_pFormatCtx->pb = m_pIOContext;
295
296
// Open video file
297
AVDictionary *open_opt = nullptr;
298
av_dict_set_int(&open_opt, "probesize", m_mpegheaderSize, 0);
299
if (avformat_open_input((AVFormatContext**)&m_pFormatCtx, nullptr, nullptr, &open_opt) != 0) {
300
av_dict_free(&open_opt);
301
return false;
302
}
303
av_dict_free(&open_opt);
304
305
bool usedFFMPEGFindStreamInfo = false;
306
if (!SetupStreams() || PSP_CoreParameter().compat.flags().UseFFMPEGFindStreamInfo) {
307
// Fallback to old behavior. Reads too much and corrupts when game doesn't read fast enough.
308
// SetupStreams sometimes work for newer FFmpeg 3.1+ now, but sometimes framerate is missing.
309
WARN_LOG_REPORT_ONCE(setupStreams, Log::ME, "Failed to read valid video stream data from header");
310
if (avformat_find_stream_info(m_pFormatCtx, nullptr) < 0) {
311
closeContext();
312
return false;
313
}
314
usedFFMPEGFindStreamInfo = true;
315
}
316
317
if (m_videoStream >= (int)m_pFormatCtx->nb_streams) {
318
WARN_LOG_REPORT(Log::ME, "Bad video stream %d", m_videoStream);
319
m_videoStream = -1;
320
}
321
322
if (m_videoStream == -1) {
323
// Find the first video stream
324
for (int i = 0; i < (int)m_pFormatCtx->nb_streams; i++) {
325
const AVStream *s = m_pFormatCtx->streams[i];
326
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
327
AVMediaType type = s->codecpar->codec_type;
328
#else
329
AVMediaType type = s->codec->codec_type;
330
#endif
331
if (type == AVMEDIA_TYPE_VIDEO) {
332
m_videoStream = i;
333
break;
334
}
335
}
336
if (m_videoStream == -1)
337
return false;
338
}
339
340
if (!setVideoStream(m_videoStream, true))
341
return false;
342
343
setVideoDim();
344
m_audioContext = CreateAudioDecoder((PSPAudioType)m_audioType);
345
m_isVideoEnd = false;
346
347
if (PSP_CoreParameter().compat.flags().UseFFMPEGFindStreamInfo && usedFFMPEGFindStreamInfo) {
348
m_mpegheaderReadPos++;
349
av_seek_frame(m_pFormatCtx, m_videoStream, 0, 0);
350
}
351
#endif // USE_FFMPEG
352
return true;
353
}
354
355
void MediaEngine::closeContext()
356
{
357
#ifdef USE_FFMPEG
358
if (m_buffer)
359
av_free(m_buffer);
360
if (m_pFrameRGB)
361
av_frame_free(&m_pFrameRGB);
362
if (m_pFrame)
363
av_frame_free(&m_pFrame);
364
if (m_pIOContext && m_pIOContext->buffer)
365
av_free(m_pIOContext->buffer);
366
if (m_pIOContext)
367
av_free(m_pIOContext);
368
for (auto it : m_pCodecCtxs) {
369
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
370
avcodec_free_context(&it.second);
371
#else
372
avcodec_close(it.second);
373
#endif
374
}
375
m_pCodecCtxs.clear();
376
// These are streams allocated from avformat_new_stream.
377
for (auto it : m_codecsToClose) {
378
avcodec_close(it);
379
}
380
m_codecsToClose.clear();
381
if (m_pFormatCtx)
382
avformat_close_input(&m_pFormatCtx);
383
sws_freeContext(m_sws_ctx);
384
m_sws_ctx = nullptr;
385
m_pIOContext = nullptr;
386
#endif
387
m_buffer = nullptr;
388
}
389
390
bool MediaEngine::loadStream(const u8 *buffer, int readSize, int RingbufferSize)
391
{
392
closeMedia();
393
394
m_videopts = 0;
395
m_lastPts = -1;
396
m_audiopts = 0;
397
m_ringbuffersize = RingbufferSize;
398
m_pdata = new BufferQueue(RingbufferSize + 2048);
399
m_pdata->push(buffer, readSize);
400
m_firstTimeStamp = getMpegTimeStamp(buffer + PSMF_FIRST_TIMESTAMP_OFFSET);
401
m_lastTimeStamp = getMpegTimeStamp(buffer + PSMF_LAST_TIMESTAMP_OFFSET);
402
int mpegoffset = (int)(*(s32_be*)(buffer + 8));
403
m_demux = new MpegDemux(RingbufferSize + 2048, mpegoffset);
404
m_demux->addStreamData(buffer, readSize);
405
return true;
406
}
407
408
bool MediaEngine::reloadStream()
409
{
410
return loadStream(m_mpegheader, 2048, m_ringbuffersize);
411
}
412
413
bool MediaEngine::addVideoStream(int streamNum, int streamId) {
414
#ifdef USE_FFMPEG
415
if (m_pFormatCtx) {
416
// no need to add an existing stream.
417
if ((u32)streamNum < m_pFormatCtx->nb_streams)
418
return true;
419
AVCodec *h264_codec = avcodec_find_decoder(AV_CODEC_ID_H264);
420
if (!h264_codec)
421
return false;
422
AVStream *stream = avformat_new_stream(m_pFormatCtx, h264_codec);
423
if (stream) {
424
// Reference ISO/IEC 13818-1.
425
if (streamId == -1)
426
streamId = PSMF_VIDEO_STREAM_ID | streamNum;
427
428
stream->id = 0x00000100 | streamId;
429
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
430
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
431
stream->codecpar->codec_id = AV_CODEC_ID_H264;
432
#else
433
stream->request_probe = 0;
434
stream->need_parsing = AVSTREAM_PARSE_FULL;
435
#endif
436
// We could set the width here, but we don't need to.
437
if (streamNum >= m_expectedVideoStreams) {
438
++m_expectedVideoStreams;
439
}
440
441
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(59, 16, 100)
442
AVCodec *codec = avcodec_find_decoder(stream->codecpar->codec_id);
443
AVCodecContext *codecCtx = avcodec_alloc_context3(codec);
444
#else
445
AVCodecContext *codecCtx = stream->codec;
446
#endif
447
m_codecsToClose.push_back(codecCtx);
448
return true;
449
}
450
}
451
#endif
452
if (streamNum >= m_expectedVideoStreams) {
453
++m_expectedVideoStreams;
454
}
455
return false;
456
}
457
458
int MediaEngine::addStreamData(const u8 *buffer, int addSize) {
459
int size = addSize;
460
if (size > 0 && m_pdata) {
461
if (!m_pdata->push(buffer, size))
462
size = 0;
463
if (m_demux) {
464
m_demux->addStreamData(buffer, addSize);
465
}
466
#ifdef USE_FFMPEG
467
if (!m_pFormatCtx && m_pdata->getQueueSize() >= 2048) {
468
m_mpegheaderSize = m_pdata->get_front(m_mpegheader, sizeof(m_mpegheader));
469
int streamOffset = (int)(*(s32_be *)(m_mpegheader + 8));
470
if (streamOffset <= m_mpegheaderSize) {
471
m_mpegheaderSize = streamOffset;
472
m_pdata->pop_front(0, m_mpegheaderSize);
473
openContext();
474
}
475
}
476
#endif // USE_FFMPEG
477
478
// We added data, so... not the end anymore?
479
m_isVideoEnd = false;
480
}
481
return size;
482
}
483
484
bool MediaEngine::seekTo(s64 timestamp, int videoPixelMode) {
485
if (timestamp <= 0) {
486
return true;
487
}
488
489
// Just doing it the not so great way to be sure audio is in sync.
490
int timeout = 1000;
491
while (getVideoTimeStamp() < timestamp - 3003) {
492
if (getAudioTimeStamp() < getVideoTimeStamp() - 4180 * 2) {
493
getNextAudioFrame(NULL, NULL, NULL);
494
}
495
if (!stepVideo(videoPixelMode, true)) {
496
return false;
497
}
498
if (--timeout <= 0) {
499
return true;
500
}
501
}
502
503
while (getAudioTimeStamp() < getVideoTimeStamp() - 4180 * 2) {
504
if (getNextAudioFrame(NULL, NULL, NULL) == 0) {
505
return false;
506
}
507
if (--timeout <= 0) {
508
return true;
509
}
510
}
511
512
return true;
513
}
514
515
bool MediaEngine::setVideoStream(int streamNum, bool force) {
516
if (m_videoStream == streamNum && !force) {
517
// Yay, nothing to do.
518
return true;
519
}
520
521
#ifdef USE_FFMPEG
522
if (m_pFormatCtx && m_pCodecCtxs.find(streamNum) == m_pCodecCtxs.end()) {
523
// Get a pointer to the codec context for the video stream
524
if ((u32)streamNum >= m_pFormatCtx->nb_streams) {
525
return false;
526
}
527
528
AVStream *stream = m_pFormatCtx->streams[streamNum];
529
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 33, 100)
530
AVCodec *pCodec = avcodec_find_decoder(stream->codecpar->codec_id);
531
if (!pCodec) {
532
WARN_LOG_REPORT(Log::ME, "Could not find decoder for %d", (int)stream->codecpar->codec_id);
533
return false;
534
}
535
AVCodecContext *m_pCodecCtx = avcodec_alloc_context3(pCodec);
536
int paramResult = avcodec_parameters_to_context(m_pCodecCtx, stream->codecpar);
537
if (paramResult < 0) {
538
WARN_LOG_REPORT(Log::ME, "Failed to prepare context parameters: %08x", paramResult);
539
return false;
540
}
541
#else
542
AVCodecContext *m_pCodecCtx = stream->codec;
543
// Find the decoder for the video stream
544
AVCodec *pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id);
545
if (pCodec == nullptr) {
546
return false;
547
}
548
#endif
549
550
m_pCodecCtx->flags |= AV_CODEC_FLAG_OUTPUT_CORRUPT | AV_CODEC_FLAG_LOW_DELAY;
551
552
AVDictionary *opt = nullptr;
553
// Allow ffmpeg to use any number of threads it wants. Without this, it doesn't use threads.
554
av_dict_set(&opt, "threads", "0", 0);
555
int openResult = avcodec_open2(m_pCodecCtx, pCodec, &opt);
556
av_dict_free(&opt);
557
if (openResult < 0) {
558
return false;
559
}
560
561
m_pCodecCtxs[streamNum] = m_pCodecCtx;
562
}
563
#endif
564
m_videoStream = streamNum;
565
566
return true;
567
}
568
569
bool MediaEngine::setVideoDim(int width, int height)
570
{
571
#ifdef USE_FFMPEG
572
auto codecIter = m_pCodecCtxs.find(m_videoStream);
573
if (codecIter == m_pCodecCtxs.end())
574
return false;
575
AVCodecContext *m_pCodecCtx = codecIter->second;
576
577
if (width == 0 && height == 0)
578
{
579
// use the orignal video size
580
m_desWidth = m_pCodecCtx->width;
581
m_desHeight = m_pCodecCtx->height;
582
}
583
else
584
{
585
m_desWidth = width;
586
m_desHeight = height;
587
}
588
589
// Allocate video frame
590
if (!m_pFrame) {
591
m_pFrame = av_frame_alloc();
592
}
593
594
sws_freeContext(m_sws_ctx);
595
m_sws_ctx = nullptr;
596
m_sws_fmt = -1;
597
598
if (m_desWidth == 0 || m_desHeight == 0) {
599
// Can't setup SWS yet, so stop for now.
600
return false;
601
}
602
603
updateSwsFormat(GE_CMODE_32BIT_ABGR8888);
604
605
// Allocate video frame for RGB24
606
m_pFrameRGB = av_frame_alloc();
607
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
608
int numBytes = av_image_get_buffer_size((AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight, 1);
609
#else
610
int numBytes = avpicture_get_size((AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight);
611
#endif
612
m_buffer = (u8*)av_malloc(numBytes * sizeof(uint8_t));
613
614
// Assign appropriate parts of buffer to image planes in m_pFrameRGB
615
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
616
av_image_fill_arrays(m_pFrameRGB->data, m_pFrameRGB->linesize, m_buffer, (AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight, 1);
617
#else
618
avpicture_fill((AVPicture *)m_pFrameRGB, m_buffer, (AVPixelFormat)m_sws_fmt, m_desWidth, m_desHeight);
619
#endif
620
#endif // USE_FFMPEG
621
return true;
622
}
623
624
void MediaEngine::updateSwsFormat(int videoPixelMode) {
625
#ifdef USE_FFMPEG
626
auto codecIter = m_pCodecCtxs.find(m_videoStream);
627
AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;
628
629
AVPixelFormat swsDesired = getSwsFormat(videoPixelMode);
630
if (swsDesired != m_sws_fmt && m_pCodecCtx != 0) {
631
m_sws_fmt = swsDesired;
632
m_sws_ctx = sws_getCachedContext
633
(
634
m_sws_ctx,
635
m_pCodecCtx->width,
636
m_pCodecCtx->height,
637
m_pCodecCtx->pix_fmt,
638
m_desWidth,
639
m_desHeight,
640
(AVPixelFormat)m_sws_fmt,
641
SWS_BILINEAR,
642
NULL,
643
NULL,
644
NULL
645
);
646
647
int *inv_coefficients;
648
int *coefficients;
649
int srcRange, dstRange;
650
int brightness, contrast, saturation;
651
652
if (sws_getColorspaceDetails(m_sws_ctx, &inv_coefficients, &srcRange, &coefficients, &dstRange, &brightness, &contrast, &saturation) != -1) {
653
srcRange = 0;
654
dstRange = 0;
655
sws_setColorspaceDetails(m_sws_ctx, inv_coefficients, srcRange, coefficients, dstRange, brightness, contrast, saturation);
656
}
657
}
658
#endif
659
}
660
661
bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
662
#ifdef USE_FFMPEG
663
auto codecIter = m_pCodecCtxs.find(m_videoStream);
664
AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;
665
666
if (!m_pFormatCtx)
667
return false;
668
if (!m_pCodecCtx)
669
return false;
670
if (!m_pFrame)
671
return false;
672
673
AVPacket packet;
674
av_init_packet(&packet);
675
int frameFinished;
676
bool bGetFrame = false;
677
while (!bGetFrame) {
678
bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
679
// Even if we've read all frames, some may have been re-ordered frames at the end.
680
// Still need to decode those, so keep calling avcodec_decode_video2() / avcodec_receive_frame().
681
if (dataEnd || packet.stream_index == m_videoStream) {
682
// avcodec_decode_video2() / avcodec_send_packet() gives us the re-ordered frames with a NULL packet.
683
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
684
if (dataEnd)
685
av_packet_unref(&packet);
686
#else
687
if (dataEnd)
688
av_free_packet(&packet);
689
#endif
690
691
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 48, 101)
692
if (packet.size != 0)
693
avcodec_send_packet(m_pCodecCtx, &packet);
694
int result = avcodec_receive_frame(m_pCodecCtx, m_pFrame);
695
if (result == 0) {
696
result = m_pFrame->pkt_size;
697
frameFinished = 1;
698
} else if (result == AVERROR(EAGAIN)) {
699
result = 0;
700
frameFinished = 0;
701
} else {
702
frameFinished = 0;
703
}
704
#else
705
int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
706
#endif
707
if (frameFinished) {
708
if (!m_pFrameRGB) {
709
setVideoDim();
710
}
711
if (m_pFrameRGB && !skipFrame) {
712
updateSwsFormat(videoPixelMode);
713
// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
714
// Update the linesize for the new format too. We started with the largest size, so it should fit.
715
m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;
716
717
sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
718
m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
719
}
720
721
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(55, 58, 100)
722
int64_t bestPts = m_pFrame->best_effort_timestamp;
723
int64_t ptsDuration = m_pFrame->pkt_duration;
724
#else
725
int64_t bestPts = av_frame_get_best_effort_timestamp(m_pFrame);
726
int64_t ptsDuration = av_frame_get_pkt_duration(m_pFrame);
727
#endif
728
if (ptsDuration == 0) {
729
if (m_lastPts == bestPts - m_firstTimeStamp || bestPts == AV_NOPTS_VALUE) {
730
// TODO: Assuming 29.97 if missing.
731
m_videopts += 3003;
732
} else {
733
m_videopts = bestPts - m_firstTimeStamp;
734
m_lastPts = m_videopts;
735
}
736
} else if (bestPts != AV_NOPTS_VALUE) {
737
m_videopts = bestPts + ptsDuration - m_firstTimeStamp;
738
m_lastPts = m_videopts;
739
} else {
740
m_videopts += ptsDuration;
741
m_lastPts = m_videopts;
742
}
743
bGetFrame = true;
744
}
745
if (result <= 0 && dataEnd) {
746
// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
747
// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
748
m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
749
if (m_isVideoEnd)
750
m_decodingsize = 0;
751
break;
752
}
753
}
754
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 12, 100)
755
av_packet_unref(&packet);
756
#else
757
av_free_packet(&packet);
758
#endif
759
}
760
return bGetFrame;
761
#else
762
// If video engine is not available, just add to the timestamp at least.
763
m_videopts += 3003;
764
return true;
765
#endif // USE_FFMPEG
766
}
767
768
// Helpers that null out alpha (which seems to be the case on the PSP.)
769
// Some games depend on this, for example Sword Art Online (doesn't clear A's from buffer.)
770
inline void writeVideoLineRGBA(void *destp, const void *srcp, int width) {
771
// TODO: Use SSE/NEON, investigate why AV_PIX_FMT_RGB0 does not work.
772
u32_le *dest = (u32_le *)destp;
773
const u32_le *src = (u32_le *)srcp;
774
775
int count = width;
776
777
#if PPSSPP_ARCH(SSE2)
778
__m128i mask = _mm_set1_epi32(0x00FFFFFF);
779
while (count >= 8) {
780
__m128i pixels1 = _mm_and_si128(_mm_loadu_si128((const __m128i *)src), mask);
781
__m128i pixels2 = _mm_and_si128(_mm_loadu_si128((const __m128i *)src + 1), mask);
782
_mm_storeu_si128((__m128i *)dest, pixels1);
783
_mm_storeu_si128((__m128i *)dest + 1, pixels2);
784
src += 8;
785
dest += 8;
786
count -= 8;
787
}
788
#elif PPSSPP_ARCH(ARM_NEON)
789
uint32x4_t mask = vdupq_n_u32(0x00FFFFFF);
790
while (count >= 8) {
791
uint32x4_t pixels1 = vandq_u32(vld1q_u32(src), mask);
792
uint32x4_t pixels2 = vandq_u32(vld1q_u32(src + 4), mask);
793
vst1q_u32(dest, pixels1);
794
vst1q_u32(dest + 4, pixels2);
795
src += 8;
796
dest += 8;
797
count -= 8;
798
}
799
#endif
800
const u32 mask32 = 0x00FFFFFF;
801
DO_NOT_VECTORIZE_LOOP
802
while (count--) {
803
*dest++ = *src++ & mask32;
804
}
805
}
806
807
inline void writeVideoLineABGR5650(void *destp, const void *srcp, int width) {
808
memcpy(destp, srcp, width * sizeof(u16));
809
}
810
811
inline void writeVideoLineABGR5551(void *destp, const void *srcp, int width) {
812
// TODO: Use SSE/NEON.
813
u16_le *dest = (u16_le *)destp;
814
const u16_le *src = (u16_le *)srcp;
815
816
const u16 mask = 0x7FFF;
817
for (int i = 0; i < width; ++i) {
818
dest[i] = src[i] & mask;
819
}
820
}
821
822
inline void writeVideoLineABGR4444(void *destp, const void *srcp, int width) {
823
// TODO: Use SSE/NEON.
824
u16_le *dest = (u16_le *)destp;
825
const u16_le *src = (u16_le *)srcp;
826
827
const u16 mask = 0x0FFF;
828
for (int i = 0; i < width; ++i) {
829
dest[i] = src[i] & mask;
830
}
831
}
832
833
int MediaEngine::writeVideoImage(u32 bufferPtr, int frameWidth, int videoPixelMode) {
834
int videoLineSize = 0;
835
switch (videoPixelMode) {
836
case GE_CMODE_32BIT_ABGR8888:
837
videoLineSize = frameWidth * sizeof(u32);
838
break;
839
case GE_CMODE_16BIT_BGR5650:
840
case GE_CMODE_16BIT_ABGR5551:
841
case GE_CMODE_16BIT_ABGR4444:
842
videoLineSize = frameWidth * sizeof(u16);
843
break;
844
}
845
846
int videoImageSize = videoLineSize * m_desHeight;
847
848
if (!Memory::IsValidRange(bufferPtr, videoImageSize) || frameWidth > 2048) {
849
// Clearly invalid values. Let's just not.
850
ERROR_LOG_REPORT(Log::ME, "Ignoring invalid video decode address %08x/%x", bufferPtr, frameWidth);
851
return 0;
852
}
853
854
u8 *buffer = Memory::GetPointerWriteUnchecked(bufferPtr);
855
856
#ifdef USE_FFMPEG
857
if (!m_pFrame || !m_pFrameRGB)
858
return 0;
859
860
// lock the image size
861
int height = m_desHeight;
862
int width = m_desWidth;
863
u8 *imgbuf = buffer;
864
const u8 *data = m_pFrameRGB->data[0];
865
866
bool swizzle = Memory::IsVRAMAddress(bufferPtr) && (bufferPtr & 0x00200000) == 0x00200000;
867
if (swizzle) {
868
imgbuf = new u8[videoImageSize];
869
}
870
871
switch (videoPixelMode) {
872
case GE_CMODE_32BIT_ABGR8888:
873
for (int y = 0; y < height; y++) {
874
writeVideoLineRGBA(imgbuf + videoLineSize * y, data, width);
875
data += width * sizeof(u32);
876
}
877
break;
878
879
case GE_CMODE_16BIT_BGR5650:
880
for (int y = 0; y < height; y++) {
881
writeVideoLineABGR5650(imgbuf + videoLineSize * y, data, width);
882
data += width * sizeof(u16);
883
}
884
break;
885
886
case GE_CMODE_16BIT_ABGR5551:
887
for (int y = 0; y < height; y++) {
888
writeVideoLineABGR5551(imgbuf + videoLineSize * y, data, width);
889
data += width * sizeof(u16);
890
}
891
break;
892
893
case GE_CMODE_16BIT_ABGR4444:
894
for (int y = 0; y < height; y++) {
895
writeVideoLineABGR4444(imgbuf + videoLineSize * y, data, width);
896
data += width * sizeof(u16);
897
}
898
break;
899
900
default:
901
ERROR_LOG_REPORT(Log::ME, "Unsupported video pixel format %d", videoPixelMode);
902
break;
903
}
904
905
if (swizzle) {
906
const int bxc = videoLineSize / 16;
907
int byc = (height + 7) / 8;
908
if (byc == 0)
909
byc = 1;
910
911
DoSwizzleTex16((const u32 *)imgbuf, buffer, bxc, byc, videoLineSize);
912
delete [] imgbuf;
913
}
914
915
NotifyMemInfo(MemBlockFlags::WRITE, bufferPtr, videoImageSize, "VideoDecode");
916
917
return videoImageSize;
918
#endif // USE_FFMPEG
919
return 0;
920
}
921
922
int MediaEngine::writeVideoImageWithRange(u32 bufferPtr, int frameWidth, int videoPixelMode,
923
int xpos, int ypos, int width, int height) {
924
int videoLineSize = 0;
925
switch (videoPixelMode) {
926
case GE_CMODE_32BIT_ABGR8888:
927
videoLineSize = frameWidth * sizeof(u32);
928
break;
929
case GE_CMODE_16BIT_BGR5650:
930
case GE_CMODE_16BIT_ABGR5551:
931
case GE_CMODE_16BIT_ABGR4444:
932
videoLineSize = frameWidth * sizeof(u16);
933
break;
934
}
935
int videoImageSize = videoLineSize * height;
936
937
if (!Memory::IsValidRange(bufferPtr, videoImageSize) || frameWidth > 2048) {
938
// Clearly invalid values. Let's just not.
939
ERROR_LOG_REPORT(Log::ME, "Ignoring invalid video decode address %08x/%x", bufferPtr, frameWidth);
940
return 0;
941
}
942
943
u8 *buffer = Memory::GetPointerWriteUnchecked(bufferPtr);
944
945
#ifdef USE_FFMPEG
946
if (!m_pFrame || !m_pFrameRGB)
947
return 0;
948
949
// lock the image size
950
u8 *imgbuf = buffer;
951
const u8 *data = m_pFrameRGB->data[0];
952
953
bool swizzle = Memory::IsVRAMAddress(bufferPtr) && (bufferPtr & 0x00200000) == 0x00200000;
954
if (swizzle) {
955
imgbuf = new u8[videoImageSize];
956
}
957
958
if (width > m_desWidth - xpos)
959
width = m_desWidth - xpos;
960
if (height > m_desHeight - ypos)
961
height = m_desHeight - ypos;
962
963
switch (videoPixelMode) {
964
case GE_CMODE_32BIT_ABGR8888:
965
data += (ypos * m_desWidth + xpos) * sizeof(u32);
966
for (int y = 0; y < height; y++) {
967
writeVideoLineRGBA(imgbuf, data, width);
968
data += m_desWidth * sizeof(u32);
969
imgbuf += videoLineSize;
970
}
971
break;
972
973
case GE_CMODE_16BIT_BGR5650:
974
data += (ypos * m_desWidth + xpos) * sizeof(u16);
975
for (int y = 0; y < height; y++) {
976
writeVideoLineABGR5650(imgbuf, data, width);
977
data += m_desWidth * sizeof(u16);
978
imgbuf += videoLineSize;
979
}
980
break;
981
982
case GE_CMODE_16BIT_ABGR5551:
983
data += (ypos * m_desWidth + xpos) * sizeof(u16);
984
for (int y = 0; y < height; y++) {
985
writeVideoLineABGR5551(imgbuf, data, width);
986
data += m_desWidth * sizeof(u16);
987
imgbuf += videoLineSize;
988
}
989
break;
990
991
case GE_CMODE_16BIT_ABGR4444:
992
data += (ypos * m_desWidth + xpos) * sizeof(u16);
993
for (int y = 0; y < height; y++) {
994
writeVideoLineABGR4444(imgbuf, data, width);
995
data += m_desWidth * sizeof(u16);
996
imgbuf += videoLineSize;
997
}
998
break;
999
1000
default:
1001
ERROR_LOG_REPORT(Log::ME, "Unsupported video pixel format %d", videoPixelMode);
1002
break;
1003
}
1004
1005
if (swizzle) {
1006
WARN_LOG_REPORT_ONCE(vidswizzle, Log::ME, "Swizzling Video with range");
1007
1008
const int bxc = videoLineSize / 16;
1009
int byc = (height + 7) / 8;
1010
if (byc == 0)
1011
byc = 1;
1012
1013
DoSwizzleTex16((const u32 *)imgbuf, buffer, bxc, byc, videoLineSize);
1014
delete [] imgbuf;
1015
}
1016
NotifyMemInfo(MemBlockFlags::WRITE, bufferPtr, videoImageSize, "VideoDecodeRange");
1017
1018
return videoImageSize;
1019
#endif // USE_FFMPEG
1020
return 0;
1021
}
1022
1023
u8 *MediaEngine::getFrameImage() {
1024
#ifdef USE_FFMPEG
1025
return m_pFrameRGB->data[0];
1026
#else
1027
return nullptr;
1028
#endif
1029
}
1030
1031
int MediaEngine::getRemainSize() {
1032
if (!m_pdata)
1033
return 0;
1034
return std::max(m_pdata->getRemainSize() - m_decodingsize - 2048, 0);
1035
}
1036
1037
int MediaEngine::getAudioRemainSize() {
1038
if (!m_demux) {
1039
// No audio, so it can't be full, return video instead.
1040
return getRemainSize();
1041
}
1042
1043
return m_demux->getRemainSize();
1044
}
1045
1046
int MediaEngine::getNextAudioFrame(u8 **buf, int *headerCode1, int *headerCode2) {
1047
// When getting a frame, increment pts
1048
m_audiopts += 4180;
1049
1050
// Demux now (rather than on add data) so that we select the right stream.
1051
m_demux->demux(m_audioStream);
1052
1053
s64 pts = 0;
1054
int result = m_demux->getNextAudioFrame(buf, headerCode1, headerCode2, &pts);
1055
if (pts != 0) {
1056
// m_audiopts is supposed to be after the returned frame.
1057
m_audiopts = pts - m_firstTimeStamp + 4180;
1058
}
1059
return result;
1060
}
1061
1062
int MediaEngine::getAudioSamples(u32 bufferPtr) {
1063
int16_t *buffer = (int16_t *)Memory::GetPointerWriteRange(bufferPtr, 8192);
1064
if (buffer == nullptr) {
1065
ERROR_LOG_REPORT(Log::ME, "Ignoring bad audio decode address %08x during video playback", bufferPtr);
1066
}
1067
if (!m_demux) {
1068
return 0;
1069
}
1070
1071
u8 *audioFrame = nullptr;
1072
int headerCode1, headerCode2;
1073
int frameSize = getNextAudioFrame(&audioFrame, &headerCode1, &headerCode2);
1074
if (frameSize == 0) {
1075
return 0;
1076
}
1077
int outSamples = 0;
1078
1079
if (m_audioContext != nullptr) {
1080
if (headerCode1 == 0x24) {
1081
// This means mono audio - tell the decoder to expect it before the first frame.
1082
// Note that it will always send us back stereo audio.
1083
m_audioContext->SetChannels(1);
1084
}
1085
1086
int inbytesConsumed = 0;
1087
if (!m_audioContext->Decode(audioFrame, frameSize, &inbytesConsumed, 2, buffer, &outSamples)) {
1088
ERROR_LOG(Log::ME, "Audio (%s) decode failed during video playback", GetCodecName(m_audioType));
1089
}
1090
int outBytes = outSamples * sizeof(int16_t) * 2;
1091
1092
NotifyMemInfo(MemBlockFlags::WRITE, bufferPtr, outBytes, "VideoDecodeAudio");
1093
}
1094
1095
return 0x2000;
1096
}
1097
1098
bool MediaEngine::IsNoAudioData() {
1099
if (!m_demux) {
1100
return true;
1101
}
1102
1103
// Let's double check. Here should be a safe enough place to demux.
1104
m_demux->demux(m_audioStream);
1105
return !m_demux->hasNextAudioFrame(NULL, NULL, NULL, NULL);
1106
}
1107
1108
bool MediaEngine::IsActuallyPlayingAudio() {
1109
return getAudioTimeStamp() >= 0;
1110
}
1111
1112
s64 MediaEngine::getVideoTimeStamp() {
1113
return m_videopts;
1114
}
1115
1116
s64 MediaEngine::getAudioTimeStamp() {
1117
return m_demux ? m_audiopts - 4180 : -1;
1118
}
1119
1120
s64 MediaEngine::getLastTimeStamp() {
1121
if (!m_pdata)
1122
return 0;
1123
return m_lastTimeStamp - m_firstTimeStamp;
1124
}
1125
1126