Path: blob/master/thirdparty/libwebp/src/dsp/enc_sse41.c
21661 views
// Copyright 2015 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// SSE4 version of some encoding functions.10//11// Author: Skal ([email protected])1213#include "src/dsp/dsp.h"1415#if defined(WEBP_USE_SSE41)16#include <emmintrin.h>17#include <smmintrin.h>1819#include <stdlib.h> // for abs()2021#include "src/dsp/common_sse2.h"22#include "src/dsp/cpu.h"23#include "src/enc/vp8i_enc.h"24#include "src/webp/types.h"2526//------------------------------------------------------------------------------27// Compute susceptibility based on DCT-coeff histograms.2829static void CollectHistogram_SSE41(const uint8_t* WEBP_RESTRICT ref,30const uint8_t* WEBP_RESTRICT pred,31int start_block, int end_block,32VP8Histogram* WEBP_RESTRICT const histo) {33const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);34int j;35int distribution[MAX_COEFF_THRESH + 1] = { 0 };36for (j = start_block; j < end_block; ++j) {37int16_t out[16];38int k;3940VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);4142// Convert coefficients to bin (within out[]).43{44// Load.45const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);46const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);47// v = abs(out) >> 348const __m128i abs0 = _mm_abs_epi16(out0);49const __m128i abs1 = _mm_abs_epi16(out1);50const __m128i v0 = _mm_srai_epi16(abs0, 3);51const __m128i v1 = _mm_srai_epi16(abs1, 3);52// bin = min(v, MAX_COEFF_THRESH)53const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);54const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);55// Store.56_mm_storeu_si128((__m128i*)&out[0], bin0);57_mm_storeu_si128((__m128i*)&out[8], bin1);58}5960// Convert coefficients to bin.61for (k = 0; k < 16; ++k) {62++distribution[out[k]];63}64}65VP8SetHistogramData(distribution, histo);66}6768//------------------------------------------------------------------------------69// Texture distortion70//71// We try to match the spectral content (weighted) between source and72// reconstructed samples.7374// Hadamard transform75// Returns the weighted sum of the absolute value of transformed coefficients.76// w[] contains a row-major 4 by 4 symmetric matrix.77static int TTransform_SSE41(const uint8_t* inA, const uint8_t* inB,78const uint16_t* const w) {79int32_t sum[4];80__m128i tmp_0, tmp_1, tmp_2, tmp_3;8182// Load and combine inputs.83{84const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]);85const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]);86const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]);87// In SSE4.1, with gcc 4.8 at least (maybe other versions),88// _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump89// of inA and inB, _mm_loadl_epi64 is still used not to have an out of90// bound read.91const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]);92const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]);93const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]);94const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]);95const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]);9697// Combine inA and inB (we'll do two transforms in parallel).98const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0);99const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1);100const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2);101const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3);102tmp_0 = _mm_cvtepu8_epi16(inAB_0);103tmp_1 = _mm_cvtepu8_epi16(inAB_1);104tmp_2 = _mm_cvtepu8_epi16(inAB_2);105tmp_3 = _mm_cvtepu8_epi16(inAB_3);106// a00 a01 a02 a03 b00 b01 b02 b03107// a10 a11 a12 a13 b10 b11 b12 b13108// a20 a21 a22 a23 b20 b21 b22 b23109// a30 a31 a32 a33 b30 b31 b32 b33110}111112// Vertical pass first to avoid a transpose (vertical and horizontal passes113// are commutative because w/kWeightY is symmetric) and subsequent transpose.114{115// Calculate a and b (two 4x4 at once).116const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);117const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);118const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);119const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);120const __m128i b0 = _mm_add_epi16(a0, a1);121const __m128i b1 = _mm_add_epi16(a3, a2);122const __m128i b2 = _mm_sub_epi16(a3, a2);123const __m128i b3 = _mm_sub_epi16(a0, a1);124// a00 a01 a02 a03 b00 b01 b02 b03125// a10 a11 a12 a13 b10 b11 b12 b13126// a20 a21 a22 a23 b20 b21 b22 b23127// a30 a31 a32 a33 b30 b31 b32 b33128129// Transpose the two 4x4.130VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3);131}132133// Horizontal pass and difference of weighted sums.134{135// Load all inputs.136const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]);137const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]);138139// Calculate a and b (two 4x4 at once).140const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);141const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);142const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);143const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);144const __m128i b0 = _mm_add_epi16(a0, a1);145const __m128i b1 = _mm_add_epi16(a3, a2);146const __m128i b2 = _mm_sub_epi16(a3, a2);147const __m128i b3 = _mm_sub_epi16(a0, a1);148149// Separate the transforms of inA and inB.150__m128i A_b0 = _mm_unpacklo_epi64(b0, b1);151__m128i A_b2 = _mm_unpacklo_epi64(b2, b3);152__m128i B_b0 = _mm_unpackhi_epi64(b0, b1);153__m128i B_b2 = _mm_unpackhi_epi64(b2, b3);154155A_b0 = _mm_abs_epi16(A_b0);156A_b2 = _mm_abs_epi16(A_b2);157B_b0 = _mm_abs_epi16(B_b0);158B_b2 = _mm_abs_epi16(B_b2);159160// weighted sums161A_b0 = _mm_madd_epi16(A_b0, w_0);162A_b2 = _mm_madd_epi16(A_b2, w_8);163B_b0 = _mm_madd_epi16(B_b0, w_0);164B_b2 = _mm_madd_epi16(B_b2, w_8);165A_b0 = _mm_add_epi32(A_b0, A_b2);166B_b0 = _mm_add_epi32(B_b0, B_b2);167168// difference of weighted sums169A_b2 = _mm_sub_epi32(A_b0, B_b0);170_mm_storeu_si128((__m128i*)&sum[0], A_b2);171}172return sum[0] + sum[1] + sum[2] + sum[3];173}174175static int Disto4x4_SSE41(const uint8_t* WEBP_RESTRICT const a,176const uint8_t* WEBP_RESTRICT const b,177const uint16_t* WEBP_RESTRICT const w) {178const int diff_sum = TTransform_SSE41(a, b, w);179return abs(diff_sum) >> 5;180}181182static int Disto16x16_SSE41(const uint8_t* WEBP_RESTRICT const a,183const uint8_t* WEBP_RESTRICT const b,184const uint16_t* WEBP_RESTRICT const w) {185int D = 0;186int x, y;187for (y = 0; y < 16 * BPS; y += 4 * BPS) {188for (x = 0; x < 16; x += 4) {189D += Disto4x4_SSE41(a + x + y, b + x + y, w);190}191}192return D;193}194195//------------------------------------------------------------------------------196// Quantization197//198199// Generates a pshufb constant for shuffling 16b words.200#define PSHUFB_CST(A,B,C,D,E,F,G,H) \201_mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \2022 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \2032 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \2042 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0)205206static WEBP_INLINE int DoQuantizeBlock_SSE41(int16_t in[16], int16_t out[16],207const uint16_t* const sharpen,208const VP8Matrix* const mtx) {209const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);210const __m128i zero = _mm_setzero_si128();211__m128i out0, out8;212__m128i packed_out;213214// Load all inputs.215__m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);216__m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);217const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq[0]);218const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq[8]);219const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q[0]);220const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q[8]);221222// coeff = abs(in)223__m128i coeff0 = _mm_abs_epi16(in0);224__m128i coeff8 = _mm_abs_epi16(in8);225226// coeff = abs(in) + sharpen227if (sharpen != NULL) {228const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);229const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);230coeff0 = _mm_add_epi16(coeff0, sharpen0);231coeff8 = _mm_add_epi16(coeff8, sharpen8);232}233234// out = (coeff * iQ + B) >> QFIX235{236// doing calculations with 32b precision (QFIX=17)237// out = (coeff * iQ)238const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);239const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);240const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);241const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);242__m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);243__m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);244__m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);245__m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);246// out = (coeff * iQ + B)247const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias[0]);248const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias[4]);249const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias[8]);250const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias[12]);251out_00 = _mm_add_epi32(out_00, bias_00);252out_04 = _mm_add_epi32(out_04, bias_04);253out_08 = _mm_add_epi32(out_08, bias_08);254out_12 = _mm_add_epi32(out_12, bias_12);255// out = QUANTDIV(coeff, iQ, B, QFIX)256out_00 = _mm_srai_epi32(out_00, QFIX);257out_04 = _mm_srai_epi32(out_04, QFIX);258out_08 = _mm_srai_epi32(out_08, QFIX);259out_12 = _mm_srai_epi32(out_12, QFIX);260261// pack result as 16b262out0 = _mm_packs_epi32(out_00, out_04);263out8 = _mm_packs_epi32(out_08, out_12);264265// if (coeff > 2047) coeff = 2047266out0 = _mm_min_epi16(out0, max_coeff_2047);267out8 = _mm_min_epi16(out8, max_coeff_2047);268}269270// put sign back271out0 = _mm_sign_epi16(out0, in0);272out8 = _mm_sign_epi16(out8, in8);273274// in = out * Q275in0 = _mm_mullo_epi16(out0, q0);276in8 = _mm_mullo_epi16(out8, q8);277278_mm_storeu_si128((__m128i*)&in[0], in0);279_mm_storeu_si128((__m128i*)&in[8], in8);280281// zigzag the output before storing it. The re-ordering is:282// 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15283// -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15284// There's only two misplaced entries ([8] and [7]) that are crossing the285// reg's boundaries.286// We use pshufb instead of pshuflo/pshufhi.287{288const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6);289const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1);290const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo);291const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7); // extract #7292const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7);293const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1);294const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi);295const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8); // extract #8296const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8);297const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7);298_mm_storeu_si128((__m128i*)&out[0], out_z0);299_mm_storeu_si128((__m128i*)&out[8], out_z8);300packed_out = _mm_packs_epi16(out_z0, out_z8);301}302303// detect if all 'out' values are zeroes or not304return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);305}306307#undef PSHUFB_CST308309static int QuantizeBlock_SSE41(int16_t in[16], int16_t out[16],310const VP8Matrix* WEBP_RESTRICT const mtx) {311return DoQuantizeBlock_SSE41(in, out, &mtx->sharpen[0], mtx);312}313314static int QuantizeBlockWHT_SSE41(int16_t in[16], int16_t out[16],315const VP8Matrix* WEBP_RESTRICT const mtx) {316return DoQuantizeBlock_SSE41(in, out, NULL, mtx);317}318319static int Quantize2Blocks_SSE41(int16_t in[32], int16_t out[32],320const VP8Matrix* WEBP_RESTRICT const mtx) {321int nz;322const uint16_t* const sharpen = &mtx->sharpen[0];323nz = DoQuantizeBlock_SSE41(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;324nz |= DoQuantizeBlock_SSE41(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;325return nz;326}327328//------------------------------------------------------------------------------329// Entry point330331extern void VP8EncDspInitSSE41(void);332WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {333VP8CollectHistogram = CollectHistogram_SSE41;334VP8EncQuantizeBlock = QuantizeBlock_SSE41;335VP8EncQuantize2Blocks = Quantize2Blocks_SSE41;336VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41;337VP8TDisto4x4 = Disto4x4_SSE41;338VP8TDisto16x16 = Disto16x16_SSE41;339}340341#else // !WEBP_USE_SSE41342343WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41)344345#endif // WEBP_USE_SSE41346347348