CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!
Path: blob/master/ext/jpge/jpgd_idct.h
Views: 1401
// Copyright 2009 Intel Corporation1// All Rights Reserved2//3// Permission is granted to use, copy, distribute and prepare derivative works of this4// software for any purpose and without fee, provided, that the above copyright notice5// and this statement appear in all copies. Intel makes no representations about the6// suitability of this software for any purpose. THIS SOFTWARE IS PROVIDED "AS IS."7// INTEL SPECIFICALLY DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, AND ALL LIABILITY,8// INCLUDING CONSEQUENTIAL AND OTHER INDIRECT DAMAGES, FOR THE USE OF THIS SOFTWARE,9// INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PROPRIETARY RIGHTS, AND INCLUDING THE10// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. Intel does not11// assume any responsibility for any errors which may appear in this software nor any12// responsibility to update it.13//14// From:15// https://software.intel.com/sites/default/files/m/d/4/1/d/8/UsingIntelAVXToImplementIDCT-r1_5.pdf16// https://software.intel.com/file/2904817//18// Requires SSE19//20#ifdef _MSC_VER21#include <intrin.h>22#endif23#include <immintrin.h>2425#ifdef _MSC_VER26#define JPGD_SIMD_ALIGN(type, name) __declspec(align(16)) type name27#else28#define JPGD_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))29#endif3031#define BITS_INV_ACC 432#define SHIFT_INV_ROW 16 - BITS_INV_ACC33#define SHIFT_INV_COL 1 + BITS_INV_ACC34const short IRND_INV_ROW = 1024 * (6 - BITS_INV_ACC); //1 << (SHIFT_INV_ROW-1)35const short IRND_INV_COL = 16 * (BITS_INV_ACC - 3); // 1 << (SHIFT_INV_COL-1)36const short IRND_INV_CORR = IRND_INV_COL - 1; // correction -1.0 and round3738JPGD_SIMD_ALIGN(short, shortM128_one_corr[8]) = {1, 1, 1, 1, 1, 1, 1, 1};39JPGD_SIMD_ALIGN(short, shortM128_round_inv_row[8]) = {IRND_INV_ROW, 0, IRND_INV_ROW, 0, IRND_INV_ROW, 0, IRND_INV_ROW, 0};40JPGD_SIMD_ALIGN(short, shortM128_round_inv_col[8]) = {IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL};41JPGD_SIMD_ALIGN(short, shortM128_round_inv_corr[8])= {IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR};42JPGD_SIMD_ALIGN(short, shortM128_tg_1_16[8]) = {13036, 13036, 13036, 13036, 13036, 13036, 13036, 13036}; // tg * (2<<16) + 0.543JPGD_SIMD_ALIGN(short, shortM128_tg_2_16[8]) = {27146, 27146, 27146, 27146, 27146, 27146, 27146, 27146}; // tg * (2<<16) + 0.544JPGD_SIMD_ALIGN(short, shortM128_tg_3_16[8]) = {-21746, -21746, -21746, -21746, -21746, -21746, -21746, -21746}; // tg * (2<<16) + 0.545JPGD_SIMD_ALIGN(short, shortM128_cos_4_16[8]) = {-19195, -19195, -19195, -19195, -19195, -19195, -19195, -19195};// cos * (2<<16) + 0.54647//-----------------------------------------------------------------------------48// Table for rows 0,4 - constants are multiplied on cos_4_1649// w15 w14 w11 w10 w07 w06 w03 w0250// w29 w28 w25 w24 w21 w20 w17 w1651// w31 w30 w27 w26 w23 w22 w19 w1852//movq -> w05 w04 w01 w0053JPGD_SIMD_ALIGN(short, shortM128_tab_i_04[]) = {5416384, 21407, 16384, 8867,5516384, -8867, 16384, -21407, // w13 w12 w09 w085616384, 8867, -16384, -21407, // w07 w06 w03 w0257-16384, 21407, 16384, -8867, // w15 w14 w11 w105822725, 19266, 19266, -4520, // w21 w20 w17 w165912873, -22725, 4520, -12873, // w29 w28 w25 w246012873, 4520, -22725, -12873, // w23 w22 w19 w18614520, 19266, 19266, -22725}; // w31 w30 w27 w266263// Table for rows 1,7 - constants are multiplied on cos_1_1664//movq -> w05 w04 w01 w0065JPGD_SIMD_ALIGN(short, shortM128_tab_i_17[]) = {6622725, 29692, 22725, 12299,6722725, -12299, 22725, -29692, // w13 w12 w09 w086822725, 12299, -22725, -29692, // w07 w06 w03 w0269-22725, 29692, 22725, -12299, // w15 w14 w11 w107031521, 26722, 26722, -6270, // w21 w20 w17 w167117855, -31521, 6270, -17855, // w29 w28 w25 w247217855, 6270, -31521, -17855, // w23 w22 w19 w18736270, 26722, 26722, -31521}; // w31 w30 w27 w267475// Table for rows 2,6 - constants are multiplied on cos_2_1676//movq -> w05 w04 w01 w0077JPGD_SIMD_ALIGN(short, shortM128_tab_i_26[]) = {7821407, 27969, 21407, 11585,7921407, -11585, 21407, -27969, // w13 w12 w09 w088021407, 11585, -21407, -27969, // w07 w06 w03 w0281-21407, 27969, 21407, -11585, // w15 w14 w11 w108229692, 25172, 25172, -5906, // w21 w20 w17 w168316819, -29692, 5906, -16819, // w29 w28 w25 w248416819, 5906, -29692, -16819, // w23 w22 w19 w18855906, 25172, 25172, -29692}; // w31 w30 w27 w2686// Table for rows 3,5 - constants are multiplied on cos_3_1687//movq -> w05 w04 w01 w0088JPGD_SIMD_ALIGN(short, shortM128_tab_i_35[]) = {8919266, 25172, 19266, 10426,9019266, -10426, 19266, -25172, // w13 w12 w09 w089119266, 10426, -19266, -25172, // w07 w06 w03 w0292-19266, 25172, 19266, -10426, // w15 w14 w11 w109326722, 22654, 22654, -5315, // w21 w20 w17 w169415137, -26722, 5315, -15137, // w29 w28 w25 w249515137, 5315, -26722, -15137, // w23 w22 w19 w18965315, 22654, 22654, -26722}; // w31 w30 w27 w269798JPGD_SIMD_ALIGN(short, shortM128_128[8]) = { 128, 128, 128, 128, 128, 128, 128, 128 };99100void idctSSEShortU8(const short *pInput, uint8_t * pOutputUB)101{102__m128i r_xmm0, r_xmm4;103__m128i r_xmm1, r_xmm2, r_xmm3, r_xmm5, r_xmm6, r_xmm7;104__m128i row0, row1, row2, row3, row4, row5, row6, row7;105short * pTab_i_04 = shortM128_tab_i_04;106short * pTab_i_26 = shortM128_tab_i_26;107108//Get pointers for this input and output109pTab_i_04 = shortM128_tab_i_04;110pTab_i_26 = shortM128_tab_i_26;111112//Row 1 and Row 3113r_xmm0 = _mm_load_si128((__m128i *) pInput);114r_xmm4 = _mm_load_si128((__m128i *) (&pInput[2*8]));115116// *** Work on the data in xmm0117//low shuffle mask = 0xd8 = 11 01 10 00118//get short 2 and short 0 into ls 32-bits119r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);120121// copy short 2 and short 0 to all locations122r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);123124// add to those copies125r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));126127// shuffle mask = 0x55 = 01 01 01 01128// copy short 3 and short 1 to all locations129r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);130131// high shuffle mask = 0xd8 = 11 01 10 00132// get short 6 and short 4 into bit positions 64-95133// get short 7 and short 5 into bit positions 96-127134r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);135136// add to short 3 and short 1137r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));138139// shuffle mask = 0xaa = 10 10 10 10140// copy short 6 and short 4 to all locations141r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);142143// shuffle mask = 0xaa = 11 11 11 11144// copy short 7 and short 5 to all locations145r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);146147// add to short 6 and short 4148r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));149150// *** Work on the data in xmm4151// high shuffle mask = 0xd8 11 01 10 00152// get short 6 and short 4 into bit positions 64-95153// get short 7 and short 5 into bit positions 96-127154r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);155156// (xmm0 short 2 and short 0 plus pSi) + some constants157r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));158r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);159r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));160r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);161r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);162r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &shortM128_tab_i_26[0]));163r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);164r_xmm2 = r_xmm1;165r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);166r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &shortM128_tab_i_26[8]));167r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);168r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);169r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);170r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &shortM128_tab_i_26[16]));171r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);172r_xmm2 = _mm_srai_epi32(r_xmm2, 12);173r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));174r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &shortM128_tab_i_26[24]));175r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);176r_xmm6 = r_xmm5;177r_xmm0 = _mm_srai_epi32(r_xmm0, 12);178r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);179row0 = _mm_packs_epi32(r_xmm0, r_xmm2);180r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);181r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);182r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);183r_xmm6 = _mm_srai_epi32(r_xmm6, 12);184r_xmm4 = _mm_srai_epi32(r_xmm4, 12);185r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);186row2 = _mm_packs_epi32(r_xmm4, r_xmm6);187188//Row 5 and row 7189r_xmm0 = _mm_load_si128((__m128i *) (&pInput[4*8]));190r_xmm4 = _mm_load_si128((__m128i *) (&pInput[6*8]));191192r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);193r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);194r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));195r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);196r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);197r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));198r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);199r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);200r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));201r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);202r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));203r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);204r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));205r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);206r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);207r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &shortM128_tab_i_26[0]));208r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);209r_xmm2 = r_xmm1;210r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);211r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &shortM128_tab_i_26[8]));212r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);213r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);214r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);215r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &shortM128_tab_i_26[16]));216r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);217r_xmm2 = _mm_srai_epi32(r_xmm2, 12);218r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));219r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &shortM128_tab_i_26[24]));220r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);221r_xmm6 = r_xmm5;222r_xmm0 = _mm_srai_epi32(r_xmm0, 12);223r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);224row4 = _mm_packs_epi32(r_xmm0, r_xmm2);225r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);226r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);227r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);228r_xmm6 = _mm_srai_epi32(r_xmm6, 12);229r_xmm4 = _mm_srai_epi32(r_xmm4, 12);230r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);231row6 = _mm_packs_epi32(r_xmm4, r_xmm6);232233//Row 4 and row 2234pTab_i_04 = shortM128_tab_i_35;235pTab_i_26 = shortM128_tab_i_17;236r_xmm0 = _mm_load_si128((__m128i *) (&pInput[3*8]));237r_xmm4 = _mm_load_si128((__m128i *) (&pInput[1*8]));238239r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);240r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);241r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));242r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);243r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);244r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));245r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);246r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);247r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));248r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);249r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));250r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);251r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));252r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);253r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);254r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &pTab_i_26[0]));255r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);256r_xmm2 = r_xmm1;257r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);258r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &pTab_i_26[8]));259r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);260r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);261r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);262r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &pTab_i_26[16]));263r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);264r_xmm2 = _mm_srai_epi32(r_xmm2, 12);265r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));266r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &pTab_i_26[24]));267r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);268r_xmm6 = r_xmm5;269r_xmm0 = _mm_srai_epi32(r_xmm0, 12);270r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);271row3 = _mm_packs_epi32(r_xmm0, r_xmm2);272r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);273r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);274r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);275r_xmm6 = _mm_srai_epi32(r_xmm6, 12);276r_xmm4 = _mm_srai_epi32(r_xmm4, 12);277r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);278row1 = _mm_packs_epi32(r_xmm4, r_xmm6);279280//Row 6 and row 8281r_xmm0 = _mm_load_si128((__m128i *) (&pInput[5*8]));282r_xmm4 = _mm_load_si128((__m128i *) (&pInput[7*8]));283284r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);285r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);286r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));287r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);288r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);289r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));290r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);291r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);292r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8]));293r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);294r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));295r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);296r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));297r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);298r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);299r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &pTab_i_26[0]));300r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);301r_xmm2 = r_xmm1;302r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);303r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &pTab_i_26[8]));304r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);305r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);306r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);307r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &pTab_i_26[16]));308r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);309r_xmm2 = _mm_srai_epi32(r_xmm2, 12);310r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));311r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &pTab_i_26[24]));312r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);313r_xmm6 = r_xmm5;314r_xmm0 = _mm_srai_epi32(r_xmm0, 12);315r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);316row5 = _mm_packs_epi32(r_xmm0, r_xmm2);317r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);318r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);319r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);320r_xmm6 = _mm_srai_epi32(r_xmm6, 12);321r_xmm4 = _mm_srai_epi32(r_xmm4, 12);322r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);323row7 = _mm_packs_epi32(r_xmm4, r_xmm6);324325r_xmm1 = _mm_load_si128((__m128i *) shortM128_tg_3_16);326r_xmm2 = row5;327r_xmm3 = row3;328r_xmm0 = _mm_mulhi_epi16(row5, r_xmm1);329330r_xmm1 = _mm_mulhi_epi16(r_xmm1, r_xmm3);331r_xmm5 = _mm_load_si128((__m128i *) shortM128_tg_1_16);332r_xmm6 = row7;333r_xmm4 = _mm_mulhi_epi16(row7, r_xmm5);334335r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm2);336r_xmm5 = _mm_mulhi_epi16(r_xmm5, row1);337r_xmm1 = _mm_adds_epi16(r_xmm1, r_xmm3);338r_xmm7 = row6;339340r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm3);341r_xmm3 = _mm_load_si128((__m128i *) shortM128_tg_2_16);342r_xmm2 = _mm_subs_epi16(r_xmm2, r_xmm1);343r_xmm7 = _mm_mulhi_epi16(r_xmm7, r_xmm3);344r_xmm1 = r_xmm0;345r_xmm3 = _mm_mulhi_epi16(r_xmm3, row2);346r_xmm5 = _mm_subs_epi16(r_xmm5, r_xmm6);347r_xmm4 = _mm_adds_epi16(r_xmm4, row1);348r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm4);349r_xmm0 = _mm_adds_epi16(r_xmm0, *((__m128i *) shortM128_one_corr));350r_xmm4 = _mm_subs_epi16(r_xmm4, r_xmm1);351r_xmm6 = r_xmm5;352r_xmm5 = _mm_subs_epi16(r_xmm5, r_xmm2);353r_xmm5 = _mm_adds_epi16(r_xmm5, *((__m128i *) shortM128_one_corr));354r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm2);355356//Intermediate results, needed later357__m128i temp3, temp7;358temp7 = r_xmm0;359360r_xmm1 = r_xmm4;361r_xmm0 = _mm_load_si128((__m128i *) shortM128_cos_4_16);362r_xmm4 = _mm_adds_epi16(r_xmm4, r_xmm5);363r_xmm2 = _mm_load_si128((__m128i *) shortM128_cos_4_16);364r_xmm2 = _mm_mulhi_epi16(r_xmm2, r_xmm4);365366//Intermediate results, needed later367temp3 = r_xmm6;368369r_xmm1 = _mm_subs_epi16(r_xmm1, r_xmm5);370r_xmm7 = _mm_adds_epi16(r_xmm7, row2);371r_xmm3 = _mm_subs_epi16(r_xmm3, row6);372r_xmm6 = row0;373r_xmm0 = _mm_mulhi_epi16(r_xmm0, r_xmm1);374r_xmm5 = row4;375r_xmm5 = _mm_adds_epi16(r_xmm5, r_xmm6);376r_xmm6 = _mm_subs_epi16(r_xmm6, row4);377r_xmm4 = _mm_adds_epi16(r_xmm4, r_xmm2);378379r_xmm4 = _mm_or_si128(r_xmm4, *((__m128i *) shortM128_one_corr));380r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm1);381r_xmm0 = _mm_or_si128(r_xmm0, *((__m128i *) shortM128_one_corr));382383r_xmm2 = r_xmm5;384r_xmm5 = _mm_adds_epi16(r_xmm5, r_xmm7);385r_xmm1 = r_xmm6;386r_xmm5 = _mm_adds_epi16(r_xmm5, *((__m128i *) shortM128_round_inv_col));387r_xmm2 = _mm_subs_epi16(r_xmm2, r_xmm7);388r_xmm7 = temp7;389r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm3);390r_xmm6 = _mm_adds_epi16(r_xmm6, *((__m128i *) shortM128_round_inv_col));391r_xmm7 = _mm_adds_epi16(r_xmm7, r_xmm5);392r_xmm7 = _mm_srai_epi16(r_xmm7, SHIFT_INV_COL);393r_xmm1 = _mm_subs_epi16(r_xmm1, r_xmm3);394r_xmm1 = _mm_adds_epi16(r_xmm1, *((__m128i *) shortM128_round_inv_corr));395r_xmm3 = r_xmm6;396r_xmm2 = _mm_adds_epi16(r_xmm2, *((__m128i *) shortM128_round_inv_corr));397r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm4);398399//Store results for row 0400//_mm_store_si128((__m128i *) pOutput, r_xmm7);401__m128i r0 = r_xmm7;402403r_xmm6 = _mm_srai_epi16(r_xmm6, SHIFT_INV_COL);404r_xmm7 = r_xmm1;405r_xmm1 = _mm_adds_epi16(r_xmm1, r_xmm0);406407//Store results for row 1408//_mm_store_si128((__m128i *) (&pOutput[1*8]), r_xmm6);409__m128i r1 = r_xmm6;410411r_xmm1 = _mm_srai_epi16(r_xmm1, SHIFT_INV_COL);412r_xmm6 = temp3;413r_xmm7 = _mm_subs_epi16(r_xmm7, r_xmm0);414r_xmm7 = _mm_srai_epi16(r_xmm7, SHIFT_INV_COL);415416//Store results for row 2417//_mm_store_si128((__m128i *) (&pOutput[2*8]), r_xmm1);418__m128i r2 = r_xmm1;419420r_xmm5 = _mm_subs_epi16(r_xmm5, temp7);421r_xmm5 = _mm_srai_epi16(r_xmm5, SHIFT_INV_COL);422423//Store results for row 7424//_mm_store_si128((__m128i *) (&pOutput[7*8]), r_xmm5);425__m128i r7 = r_xmm5;426427r_xmm3 = _mm_subs_epi16(r_xmm3, r_xmm4);428r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm2);429r_xmm2 = _mm_subs_epi16(r_xmm2, temp3);430r_xmm6 = _mm_srai_epi16(r_xmm6, SHIFT_INV_COL);431r_xmm2 = _mm_srai_epi16(r_xmm2, SHIFT_INV_COL);432433//Store results for row 3434//_mm_store_si128((__m128i *) (&pOutput[3*8]), r_xmm6);435__m128i r3 = r_xmm6;436437r_xmm3 = _mm_srai_epi16(r_xmm3, SHIFT_INV_COL);438439//Store results for rows 4, 5, and 6440//_mm_store_si128((__m128i *) (&pOutput[4*8]), r_xmm2);441//_mm_store_si128((__m128i *) (&pOutput[5*8]), r_xmm7);442//_mm_store_si128((__m128i *) (&pOutput[6*8]), r_xmm3);443444__m128i r4 = r_xmm2;445__m128i r5 = r_xmm7;446__m128i r6 = r_xmm3;447448r0 = _mm_add_epi16(*(const __m128i *)shortM128_128, r0);449r1 = _mm_add_epi16(*(const __m128i *)shortM128_128, r1);450r2 = _mm_add_epi16(*(const __m128i *)shortM128_128, r2);451r3 = _mm_add_epi16(*(const __m128i *)shortM128_128, r3);452r4 = _mm_add_epi16(*(const __m128i *)shortM128_128, r4);453r5 = _mm_add_epi16(*(const __m128i *)shortM128_128, r5);454r6 = _mm_add_epi16(*(const __m128i *)shortM128_128, r6);455r7 = _mm_add_epi16(*(const __m128i *)shortM128_128, r7);456457((__m128i *)pOutputUB)[0] = _mm_packus_epi16(r0, r1);458((__m128i *)pOutputUB)[1] = _mm_packus_epi16(r2, r3);459((__m128i *)pOutputUB)[2] = _mm_packus_epi16(r4, r5);460((__m128i *)pOutputUB)[3] = _mm_packus_epi16(r6, r7);461}462463464