CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
Path: blob/master/ext/at3_standalone/atrac3plusdec.cpp
Views: 1401
/*1* ATRAC3+ compatible decoder2*3* Copyright (c) 2010-2013 Maxim Poliakovski4*5* This file is part of FFmpeg.6*7* FFmpeg is free software; you can redistribute it and/or8* modify it under the terms of the GNU Lesser General Public9* License as published by the Free Software Foundation; either10* version 2.1 of the License, or (at your option) any later version.11*12* FFmpeg is distributed in the hope that it will be useful,13* but WITHOUT ANY WARRANTY; without even the implied warranty of14* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU15* Lesser General Public License for more details.16*17* You should have received a copy of the GNU Lesser General Public18* License along with FFmpeg; if not, write to the Free Software19* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA20*/2122/**23* @file24* Sony ATRAC3+ compatible decoder.25*26* Container formats used to store its data:27* RIFF WAV (.at3) and Sony OpenMG (.oma, .aa3).28*29* Technical description of this codec can be found here:30* http://wiki.multimedia.cx/index.php?title=ATRAC3plus31*32* Kudos to Benjamin Larsson and Michael Karcher33* for their precious technical help!34*/3536#include <stdint.h>37#include <string.h>3839#include "float_dsp.h"40#include "get_bits.h"41#include "compat.h"42#include "atrac.h"43#include "mem.h"44#include "atrac3plus.h"4546struct ATRAC3PContext {47GetBitContext gb;4849DECLARE_ALIGNED(32, float, samples)[2][ATRAC3P_FRAME_SAMPLES]; ///< quantized MDCT spectrum50DECLARE_ALIGNED(32, float, mdct_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the IMDCT51DECLARE_ALIGNED(32, float, time_buf)[2][ATRAC3P_FRAME_SAMPLES]; ///< output of the gain compensation52DECLARE_ALIGNED(32, float, outp_buf)[2][ATRAC3P_FRAME_SAMPLES];5354AtracGCContext gainc_ctx; ///< gain compensation context55FFTContext mdct_ctx;56FFTContext ipqf_dct_ctx; ///< IDCT context used by IPQF5758Atrac3pChanUnitCtx *ch_units; ///< global channel units5960int num_channel_blocks; ///< number of channel blocks61uint8_t channel_blocks[5]; ///< channel configuration descriptor6263int block_align;64};6566void atrac3p_free(ATRAC3PContext *ctx)67{68av_freep(&ctx->ch_units);69ff_mdct_end(&ctx->mdct_ctx);70ff_mdct_end(&ctx->ipqf_dct_ctx);71av_freep(&ctx);72}7374static int set_channel_params(ATRAC3PContext *ctx, int channels) {75memset(ctx->channel_blocks, 0, sizeof(ctx->channel_blocks));76switch (channels) {77case 1:78ctx->num_channel_blocks = 1;79ctx->channel_blocks[0] = CH_UNIT_MONO;80break;81case 2:82ctx->num_channel_blocks = 1;83ctx->channel_blocks[0] = CH_UNIT_STEREO;84break;85case 3:86ctx->num_channel_blocks = 2;87ctx->channel_blocks[0] = CH_UNIT_STEREO;88ctx->channel_blocks[1] = CH_UNIT_MONO;89break;90case 4:91ctx->num_channel_blocks = 3;92ctx->channel_blocks[0] = CH_UNIT_STEREO;93ctx->channel_blocks[1] = CH_UNIT_MONO;94ctx->channel_blocks[2] = CH_UNIT_MONO;95break;96case 6:97ctx->num_channel_blocks = 4;98ctx->channel_blocks[0] = CH_UNIT_STEREO;99ctx->channel_blocks[1] = CH_UNIT_MONO;100ctx->channel_blocks[2] = CH_UNIT_STEREO;101ctx->channel_blocks[3] = CH_UNIT_MONO;102break;103case 7:104ctx->num_channel_blocks = 5;105ctx->channel_blocks[0] = CH_UNIT_STEREO;106ctx->channel_blocks[1] = CH_UNIT_MONO;107ctx->channel_blocks[2] = CH_UNIT_STEREO;108ctx->channel_blocks[3] = CH_UNIT_MONO;109ctx->channel_blocks[4] = CH_UNIT_MONO;110break;111case 8:112ctx->num_channel_blocks = 5;113ctx->channel_blocks[0] = CH_UNIT_STEREO;114ctx->channel_blocks[1] = CH_UNIT_MONO;115ctx->channel_blocks[2] = CH_UNIT_STEREO;116ctx->channel_blocks[3] = CH_UNIT_STEREO;117ctx->channel_blocks[4] = CH_UNIT_MONO;118break;119default:120av_log(AV_LOG_ERROR,121"Unsupported channel count: %d!\n", channels);122return AVERROR_INVALIDDATA;123}124return 0;125}126127ATRAC3PContext *atrac3p_alloc(int channels, int *block_align) {128int i, ch, ret;129130ATRAC3PContext *ctx = (ATRAC3PContext *)av_mallocz(sizeof(ATRAC3PContext));131ctx->block_align = *block_align;132133if (!*block_align) {134// No block align was passed in, using the default.135*block_align = 0x000002e8;136}137138ff_atrac3p_init_vlcs();139140/* initialize IPQF */141ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0);142143ff_atrac3p_init_imdct(&ctx->mdct_ctx);144145ff_atrac_init_gain_compensation(&ctx->gainc_ctx, 6, 2);146147ff_atrac3p_init_wave_synth();148149if ((ret = set_channel_params(ctx, channels)) < 0) {150atrac3p_free(ctx);151return nullptr;152}153154ctx->ch_units = (Atrac3pChanUnitCtx *)av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units));155156if (!ctx->ch_units) {157atrac3p_free(ctx);158return nullptr;159}160161for (i = 0; i < ctx->num_channel_blocks; i++) {162for (ch = 0; ch < 2; ch++) {163ctx->ch_units[i].channels[ch].ch_num = ch;164ctx->ch_units[i].channels[ch].wnd_shape = &ctx->ch_units[i].channels[ch].wnd_shape_hist[0][0];165ctx->ch_units[i].channels[ch].wnd_shape_prev = &ctx->ch_units[i].channels[ch].wnd_shape_hist[1][0];166ctx->ch_units[i].channels[ch].gain_data = &ctx->ch_units[i].channels[ch].gain_data_hist[0][0];167ctx->ch_units[i].channels[ch].gain_data_prev = &ctx->ch_units[i].channels[ch].gain_data_hist[1][0];168ctx->ch_units[i].channels[ch].tones_info = &ctx->ch_units[i].channels[ch].tones_info_hist[0][0];169ctx->ch_units[i].channels[ch].tones_info_prev = &ctx->ch_units[i].channels[ch].tones_info_hist[1][0];170}171172ctx->ch_units[i].waves_info = &ctx->ch_units[i].wave_synth_hist[0];173ctx->ch_units[i].waves_info_prev = &ctx->ch_units[i].wave_synth_hist[1];174}175176return ctx;177}178179static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx,180float out[2][ATRAC3P_FRAME_SAMPLES],181int num_channels)182{183int i, sb, ch, qu, nspeclines, RNG_index;184float *dst, q;185int16_t *src;186/* calculate RNG table index for each subband */187int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 };188189if (ctx->mute_flag) {190for (ch = 0; ch < num_channels; ch++)191memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));192return;193}194195for (qu = 0, RNG_index = 0; qu < ctx->used_quant_units; qu++)196RNG_index += ctx->channels[0].qu_sf_idx[qu] +197ctx->channels[1].qu_sf_idx[qu];198199for (sb = 0; sb < ctx->num_coded_subbands; sb++, RNG_index += 128)200sb_RNG_index[sb] = RNG_index & 0x3FC;201202/* inverse quant and power compensation */203for (ch = 0; ch < num_channels; ch++) {204/* clear channel's residual spectrum */205memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));206207for (qu = 0; qu < ctx->used_quant_units; qu++) {208src = &ctx->channels[ch].spectrum[av_atrac3p_qu_to_spec_pos[qu]];209dst = &out[ch][av_atrac3p_qu_to_spec_pos[qu]];210nspeclines = av_atrac3p_qu_to_spec_pos[qu + 1] -211av_atrac3p_qu_to_spec_pos[qu];212213if (ctx->channels[ch].qu_wordlen[qu] > 0) {214q = av_atrac3p_sf_tab[ctx->channels[ch].qu_sf_idx[qu]] *215av_atrac3p_mant_tab[ctx->channels[ch].qu_wordlen[qu]];216for (i = 0; i < nspeclines; i++)217dst[i] = src[i] * q;218}219}220221for (sb = 0; sb < ctx->num_coded_subbands; sb++)222ff_atrac3p_power_compensation(ctx, ch, &out[ch][0],223sb_RNG_index[sb], sb);224}225226if (ctx->unit_type == CH_UNIT_STEREO) {227for (sb = 0; sb < ctx->num_coded_subbands; sb++) {228if (ctx->swap_channels[sb]) {229for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)230FFSWAP(float, out[0][sb * ATRAC3P_SUBBAND_SAMPLES + i],231out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);232}233234/* flip coefficients' sign if requested */235if (ctx->negate_coeffs[sb])236for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)237out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i] = -(out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);238}239}240}241242static void reconstruct_frame(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit,243int num_channels)244{245int ch, sb;246247for (ch = 0; ch < num_channels; ch++) {248for (sb = 0; sb < ch_unit->num_subbands; sb++) {249/* inverse transform and windowing */250ff_atrac3p_imdct(&ctx->mdct_ctx,251&ctx->samples[ch][sb * ATRAC3P_SUBBAND_SAMPLES],252&ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],253(ch_unit->channels[ch].wnd_shape_prev[sb] << 1) +254ch_unit->channels[ch].wnd_shape[sb], sb);255256/* gain compensation and overlapping */257ff_atrac_gain_compensation(&ctx->gainc_ctx,258&ctx->mdct_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],259&ch_unit->prev_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES],260&ch_unit->channels[ch].gain_data_prev[sb],261&ch_unit->channels[ch].gain_data[sb],262ATRAC3P_SUBBAND_SAMPLES,263&ctx->time_buf[ch][sb * ATRAC3P_SUBBAND_SAMPLES]);264}265266/* zero unused subbands in both output and overlapping buffers */267memset(&ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],2680,269(ATRAC3P_SUBBANDS - ch_unit->num_subbands) *270ATRAC3P_SUBBAND_SAMPLES *271sizeof(ch_unit->prev_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));272memset(&ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES],2730,274(ATRAC3P_SUBBANDS - ch_unit->num_subbands) *275ATRAC3P_SUBBAND_SAMPLES *276sizeof(ctx->time_buf[ch][ch_unit->num_subbands * ATRAC3P_SUBBAND_SAMPLES]));277278/* resynthesize and add tonal signal */279if (ch_unit->waves_info->tones_present ||280ch_unit->waves_info_prev->tones_present) {281for (sb = 0; sb < ch_unit->num_subbands; sb++)282if (ch_unit->channels[ch].tones_info[sb].num_wavs ||283ch_unit->channels[ch].tones_info_prev[sb].num_wavs) {284ff_atrac3p_generate_tones(ch_unit, ch, sb,285&ctx->time_buf[ch][sb * 128]);286}287}288289/* subband synthesis and acoustic signal output */290ff_atrac3p_ipqf(&ctx->ipqf_dct_ctx, &ch_unit->ipqf_ctx[ch],291&ctx->time_buf[ch][0], &ctx->outp_buf[ch][0]);292}293294/* swap window shape and gain control buffers. */295for (ch = 0; ch < num_channels; ch++) {296FFSWAP(uint8_t *, ch_unit->channels[ch].wnd_shape,297ch_unit->channels[ch].wnd_shape_prev);298FFSWAP(AtracGainInfo *, ch_unit->channels[ch].gain_data,299ch_unit->channels[ch].gain_data_prev);300FFSWAP(Atrac3pWavesData *, ch_unit->channels[ch].tones_info,301ch_unit->channels[ch].tones_info_prev);302}303304FFSWAP(Atrac3pWaveSynthParams *, ch_unit->waves_info, ch_unit->waves_info_prev);305}306307int atrac3p_decode_frame(ATRAC3PContext *ctx, float *out_data[2], int *nb_samples, const uint8_t *indata, int indata_size)308{309int i, ret, ch_unit_id, ch_block = 0, out_ch_index = 0, channels_to_process;310float **samples_p = out_data;311312*nb_samples = 0;313314if ((ret = init_get_bits8(&ctx->gb, indata, indata_size)) < 0)315return ret;316317if (get_bits1(&ctx->gb)) {318av_log(AV_LOG_ERROR, "Invalid start bit!");319return AVERROR_INVALIDDATA;320}321322while (get_bits_left(&ctx->gb) >= 2 &&323(ch_unit_id = get_bits(&ctx->gb, 2)) != CH_UNIT_TERMINATOR) {324if (ch_unit_id == CH_UNIT_EXTENSION) {325avpriv_report_missing_feature("Channel unit extension");326return AVERROR_PATCHWELCOME;327}328if (ch_block >= ctx->num_channel_blocks ||329ctx->channel_blocks[ch_block] != ch_unit_id) {330av_log(AV_LOG_ERROR,331"Frame data doesn't match channel configuration!");332return AVERROR_INVALIDDATA;333}334335ctx->ch_units[ch_block].unit_type = ch_unit_id;336channels_to_process = ch_unit_id + 1;337338if ((ret = ff_atrac3p_decode_channel_unit(&ctx->gb,339&ctx->ch_units[ch_block],340channels_to_process)) < 0)341return ret;342343decode_residual_spectrum(&ctx->ch_units[ch_block], ctx->samples,344channels_to_process);345reconstruct_frame(ctx, &ctx->ch_units[ch_block],346channels_to_process);347348for (i = 0; i < channels_to_process; i++)349memcpy(samples_p[out_ch_index + i], ctx->outp_buf[i],350ATRAC3P_FRAME_SAMPLES * sizeof(**samples_p));351352ch_block++;353out_ch_index += channels_to_process;354}355356*nb_samples = ATRAC3P_FRAME_SAMPLES;357return FFMIN(ctx->block_align, indata_size);358}359360void atrac3p_flush_buffers(ATRAC3PContext *ctx) {361// TODO: Not sure what should be zeroed here.362}363364365