Path: blob/master/thirdparty/libwebp/src/dsp/enc_sse2.c
9913 views
// Copyright 2011 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// SSE2 version of speed-critical encoding functions.10//11// Author: Christian Duvivier ([email protected])1213#include "src/dsp/dsp.h"1415#if defined(WEBP_USE_SSE2)16#include <assert.h>17#include <stdlib.h> // for abs()18#include <emmintrin.h>1920#include "src/dsp/common_sse2.h"21#include "src/enc/cost_enc.h"22#include "src/enc/vp8i_enc.h"2324//------------------------------------------------------------------------------25// Transforms (Paragraph 14.4)2627// Does one inverse transform.28static void ITransform_One_SSE2(const uint8_t* WEBP_RESTRICT ref,29const int16_t* WEBP_RESTRICT in,30uint8_t* WEBP_RESTRICT dst) {31// This implementation makes use of 16-bit fixed point versions of two32// multiply constants:33// K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^1634// K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^1635//36// To be able to use signed 16-bit integers, we use the following trick to37// have constants within range:38// - Associated constants are obtained by subtracting the 16-bit fixed point39// version of one:40// k = K - (1 << 16) => K = k + (1 << 16)41// K1 = 85267 => k1 = 2009142// K2 = 35468 => k2 = -3006843// - The multiplication of a variable by a constant become the sum of the44// variable and the multiplication of that variable by the associated45// constant:46// (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x47const __m128i k1k2 = _mm_set_epi16(-30068, -30068, -30068, -30068,4820091, 20091, 20091, 20091);49const __m128i k2k1 = _mm_set_epi16(20091, 20091, 20091, 20091,50-30068, -30068, -30068, -30068);51const __m128i zero = _mm_setzero_si128();52const __m128i zero_four = _mm_set_epi16(0, 0, 0, 0, 4, 4, 4, 4);53__m128i T01, T23;5455// Load and concatenate the transform coefficients.56const __m128i in01 = _mm_loadu_si128((const __m128i*)&in[0]);57const __m128i in23 = _mm_loadu_si128((const __m128i*)&in[8]);58// a00 a10 a20 a30 a01 a11 a21 a3159// a02 a12 a22 a32 a03 a13 a23 a336061// Vertical pass and subsequent transpose.62{63const __m128i in1 = _mm_unpackhi_epi64(in01, in01);64const __m128i in3 = _mm_unpackhi_epi64(in23, in23);6566// First pass, c and d calculations are longer because of the "trick"67// multiplications.68// c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in369// d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in370const __m128i a_d3 = _mm_add_epi16(in01, in23);71const __m128i b_c3 = _mm_sub_epi16(in01, in23);72const __m128i c1d1 = _mm_mulhi_epi16(in1, k2k1);73const __m128i c2d2 = _mm_mulhi_epi16(in3, k1k2);74const __m128i c3 = _mm_unpackhi_epi64(b_c3, b_c3);75const __m128i c4 = _mm_sub_epi16(c1d1, c2d2);76const __m128i c = _mm_add_epi16(c3, c4);77const __m128i d4u = _mm_add_epi16(c1d1, c2d2);78const __m128i du = _mm_add_epi16(a_d3, d4u);79const __m128i d = _mm_unpackhi_epi64(du, du);8081// Second pass.82const __m128i comb_ab = _mm_unpacklo_epi64(a_d3, b_c3);83const __m128i comb_dc = _mm_unpacklo_epi64(d, c);8485const __m128i tmp01 = _mm_add_epi16(comb_ab, comb_dc);86const __m128i tmp32 = _mm_sub_epi16(comb_ab, comb_dc);87const __m128i tmp23 = _mm_shuffle_epi32(tmp32, _MM_SHUFFLE(1, 0, 3, 2));8889const __m128i transpose_0 = _mm_unpacklo_epi16(tmp01, tmp23);90const __m128i transpose_1 = _mm_unpackhi_epi16(tmp01, tmp23);91// a00 a20 a01 a21 a02 a22 a03 a2392// a10 a30 a11 a31 a12 a32 a13 a339394T01 = _mm_unpacklo_epi16(transpose_0, transpose_1);95T23 = _mm_unpackhi_epi16(transpose_0, transpose_1);96// a00 a10 a20 a30 a01 a11 a21 a3197// a02 a12 a22 a32 a03 a13 a23 a3398}99100// Horizontal pass and subsequent transpose.101{102const __m128i T1 = _mm_unpackhi_epi64(T01, T01);103const __m128i T3 = _mm_unpackhi_epi64(T23, T23);104105// First pass, c and d calculations are longer because of the "trick"106// multiplications.107const __m128i dc = _mm_add_epi16(T01, zero_four);108109// c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3110// d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3111const __m128i a_d3 = _mm_add_epi16(dc, T23);112const __m128i b_c3 = _mm_sub_epi16(dc, T23);113const __m128i c1d1 = _mm_mulhi_epi16(T1, k2k1);114const __m128i c2d2 = _mm_mulhi_epi16(T3, k1k2);115const __m128i c3 = _mm_unpackhi_epi64(b_c3, b_c3);116const __m128i c4 = _mm_sub_epi16(c1d1, c2d2);117const __m128i c = _mm_add_epi16(c3, c4);118const __m128i d4u = _mm_add_epi16(c1d1, c2d2);119const __m128i du = _mm_add_epi16(a_d3, d4u);120const __m128i d = _mm_unpackhi_epi64(du, du);121122// Second pass.123const __m128i comb_ab = _mm_unpacklo_epi64(a_d3, b_c3);124const __m128i comb_dc = _mm_unpacklo_epi64(d, c);125126const __m128i tmp01 = _mm_add_epi16(comb_ab, comb_dc);127const __m128i tmp32 = _mm_sub_epi16(comb_ab, comb_dc);128const __m128i tmp23 = _mm_shuffle_epi32(tmp32, _MM_SHUFFLE(1, 0, 3, 2));129130const __m128i shifted01 = _mm_srai_epi16(tmp01, 3);131const __m128i shifted23 = _mm_srai_epi16(tmp23, 3);132// a00 a01 a02 a03 a10 a11 a12 a13133// a20 a21 a22 a23 a30 a31 a32 a33134135const __m128i transpose_0 = _mm_unpacklo_epi16(shifted01, shifted23);136const __m128i transpose_1 = _mm_unpackhi_epi16(shifted01, shifted23);137// a00 a20 a01 a21 a02 a22 a03 a23138// a10 a30 a11 a31 a12 a32 a13 a33139140T01 = _mm_unpacklo_epi16(transpose_0, transpose_1);141T23 = _mm_unpackhi_epi16(transpose_0, transpose_1);142// a00 a10 a20 a30 a01 a11 a21 a31143// a02 a12 a22 a32 a03 a13 a23 a33144}145146// Add inverse transform to 'ref' and store.147{148// Load the reference(s).149__m128i ref01, ref23, ref0123;150int32_t buf[4];151152// Load four bytes/pixels per line.153const __m128i ref0 = _mm_cvtsi32_si128(WebPMemToInt32(&ref[0 * BPS]));154const __m128i ref1 = _mm_cvtsi32_si128(WebPMemToInt32(&ref[1 * BPS]));155const __m128i ref2 = _mm_cvtsi32_si128(WebPMemToInt32(&ref[2 * BPS]));156const __m128i ref3 = _mm_cvtsi32_si128(WebPMemToInt32(&ref[3 * BPS]));157ref01 = _mm_unpacklo_epi32(ref0, ref1);158ref23 = _mm_unpacklo_epi32(ref2, ref3);159160// Convert to 16b.161ref01 = _mm_unpacklo_epi8(ref01, zero);162ref23 = _mm_unpacklo_epi8(ref23, zero);163// Add the inverse transform(s).164ref01 = _mm_add_epi16(ref01, T01);165ref23 = _mm_add_epi16(ref23, T23);166// Unsigned saturate to 8b.167ref0123 = _mm_packus_epi16(ref01, ref23);168169_mm_storeu_si128((__m128i *)buf, ref0123);170171// Store four bytes/pixels per line.172WebPInt32ToMem(&dst[0 * BPS], buf[0]);173WebPInt32ToMem(&dst[1 * BPS], buf[1]);174WebPInt32ToMem(&dst[2 * BPS], buf[2]);175WebPInt32ToMem(&dst[3 * BPS], buf[3]);176}177}178179// Does two inverse transforms.180static void ITransform_Two_SSE2(const uint8_t* WEBP_RESTRICT ref,181const int16_t* WEBP_RESTRICT in,182uint8_t* WEBP_RESTRICT dst) {183// This implementation makes use of 16-bit fixed point versions of two184// multiply constants:185// K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16186// K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16187//188// To be able to use signed 16-bit integers, we use the following trick to189// have constants within range:190// - Associated constants are obtained by subtracting the 16-bit fixed point191// version of one:192// k = K - (1 << 16) => K = k + (1 << 16)193// K1 = 85267 => k1 = 20091194// K2 = 35468 => k2 = -30068195// - The multiplication of a variable by a constant become the sum of the196// variable and the multiplication of that variable by the associated197// constant:198// (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x199const __m128i k1 = _mm_set1_epi16(20091);200const __m128i k2 = _mm_set1_epi16(-30068);201__m128i T0, T1, T2, T3;202203// Load and concatenate the transform coefficients (we'll do two inverse204// transforms in parallel).205__m128i in0, in1, in2, in3;206{207const __m128i tmp0 = _mm_loadu_si128((const __m128i*)&in[0]);208const __m128i tmp1 = _mm_loadu_si128((const __m128i*)&in[8]);209const __m128i tmp2 = _mm_loadu_si128((const __m128i*)&in[16]);210const __m128i tmp3 = _mm_loadu_si128((const __m128i*)&in[24]);211in0 = _mm_unpacklo_epi64(tmp0, tmp2);212in1 = _mm_unpackhi_epi64(tmp0, tmp2);213in2 = _mm_unpacklo_epi64(tmp1, tmp3);214in3 = _mm_unpackhi_epi64(tmp1, tmp3);215// a00 a10 a20 a30 b00 b10 b20 b30216// a01 a11 a21 a31 b01 b11 b21 b31217// a02 a12 a22 a32 b02 b12 b22 b32218// a03 a13 a23 a33 b03 b13 b23 b33219}220221// Vertical pass and subsequent transpose.222{223// First pass, c and d calculations are longer because of the "trick"224// multiplications.225const __m128i a = _mm_add_epi16(in0, in2);226const __m128i b = _mm_sub_epi16(in0, in2);227// c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3228const __m128i c1 = _mm_mulhi_epi16(in1, k2);229const __m128i c2 = _mm_mulhi_epi16(in3, k1);230const __m128i c3 = _mm_sub_epi16(in1, in3);231const __m128i c4 = _mm_sub_epi16(c1, c2);232const __m128i c = _mm_add_epi16(c3, c4);233// d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3234const __m128i d1 = _mm_mulhi_epi16(in1, k1);235const __m128i d2 = _mm_mulhi_epi16(in3, k2);236const __m128i d3 = _mm_add_epi16(in1, in3);237const __m128i d4 = _mm_add_epi16(d1, d2);238const __m128i d = _mm_add_epi16(d3, d4);239240// Second pass.241const __m128i tmp0 = _mm_add_epi16(a, d);242const __m128i tmp1 = _mm_add_epi16(b, c);243const __m128i tmp2 = _mm_sub_epi16(b, c);244const __m128i tmp3 = _mm_sub_epi16(a, d);245246// Transpose the two 4x4.247VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3);248}249250// Horizontal pass and subsequent transpose.251{252// First pass, c and d calculations are longer because of the "trick"253// multiplications.254const __m128i four = _mm_set1_epi16(4);255const __m128i dc = _mm_add_epi16(T0, four);256const __m128i a = _mm_add_epi16(dc, T2);257const __m128i b = _mm_sub_epi16(dc, T2);258// c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3259const __m128i c1 = _mm_mulhi_epi16(T1, k2);260const __m128i c2 = _mm_mulhi_epi16(T3, k1);261const __m128i c3 = _mm_sub_epi16(T1, T3);262const __m128i c4 = _mm_sub_epi16(c1, c2);263const __m128i c = _mm_add_epi16(c3, c4);264// d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3265const __m128i d1 = _mm_mulhi_epi16(T1, k1);266const __m128i d2 = _mm_mulhi_epi16(T3, k2);267const __m128i d3 = _mm_add_epi16(T1, T3);268const __m128i d4 = _mm_add_epi16(d1, d2);269const __m128i d = _mm_add_epi16(d3, d4);270271// Second pass.272const __m128i tmp0 = _mm_add_epi16(a, d);273const __m128i tmp1 = _mm_add_epi16(b, c);274const __m128i tmp2 = _mm_sub_epi16(b, c);275const __m128i tmp3 = _mm_sub_epi16(a, d);276const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);277const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);278const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);279const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);280281// Transpose the two 4x4.282VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1,283&T2, &T3);284}285286// Add inverse transform to 'ref' and store.287{288const __m128i zero = _mm_setzero_si128();289// Load the reference(s).290__m128i ref0, ref1, ref2, ref3;291// Load eight bytes/pixels per line.292ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]);293ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]);294ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]);295ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]);296// Convert to 16b.297ref0 = _mm_unpacklo_epi8(ref0, zero);298ref1 = _mm_unpacklo_epi8(ref1, zero);299ref2 = _mm_unpacklo_epi8(ref2, zero);300ref3 = _mm_unpacklo_epi8(ref3, zero);301// Add the inverse transform(s).302ref0 = _mm_add_epi16(ref0, T0);303ref1 = _mm_add_epi16(ref1, T1);304ref2 = _mm_add_epi16(ref2, T2);305ref3 = _mm_add_epi16(ref3, T3);306// Unsigned saturate to 8b.307ref0 = _mm_packus_epi16(ref0, ref0);308ref1 = _mm_packus_epi16(ref1, ref1);309ref2 = _mm_packus_epi16(ref2, ref2);310ref3 = _mm_packus_epi16(ref3, ref3);311// Store eight bytes/pixels per line.312_mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0);313_mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1);314_mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2);315_mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3);316}317}318319// Does one or two inverse transforms.320static void ITransform_SSE2(const uint8_t* WEBP_RESTRICT ref,321const int16_t* WEBP_RESTRICT in,322uint8_t* WEBP_RESTRICT dst,323int do_two) {324if (do_two) {325ITransform_Two_SSE2(ref, in, dst);326} else {327ITransform_One_SSE2(ref, in, dst);328}329}330331static void FTransformPass1_SSE2(const __m128i* const in01,332const __m128i* const in23,333__m128i* const out01,334__m128i* const out32) {335const __m128i k937 = _mm_set1_epi32(937);336const __m128i k1812 = _mm_set1_epi32(1812);337338const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8);339const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8);340const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352,3412217, 5352, 2217, 5352);342const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217,343-5352, 2217, -5352, 2217);344345// *in01 = 00 01 10 11 02 03 12 13346// *in23 = 20 21 30 31 22 23 32 33347const __m128i shuf01_p = _mm_shufflehi_epi16(*in01, _MM_SHUFFLE(2, 3, 0, 1));348const __m128i shuf23_p = _mm_shufflehi_epi16(*in23, _MM_SHUFFLE(2, 3, 0, 1));349// 00 01 10 11 03 02 13 12350// 20 21 30 31 23 22 33 32351const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p);352const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p);353// 00 01 10 11 20 21 30 31354// 03 02 13 12 23 22 33 32355const __m128i a01 = _mm_add_epi16(s01, s32);356const __m128i a32 = _mm_sub_epi16(s01, s32);357// [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ]358// [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ]359360const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ]361const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ]362const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p);363const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m);364const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812);365const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937);366const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9);367const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9);368const __m128i s03 = _mm_packs_epi32(tmp0, tmp2);369const __m128i s12 = _mm_packs_epi32(tmp1, tmp3);370const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1...371const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3372const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi);373*out01 = _mm_unpacklo_epi32(s_lo, s_hi);374*out32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2..375}376377static void FTransformPass2_SSE2(const __m128i* const v01,378const __m128i* const v32,379int16_t* WEBP_RESTRICT out) {380const __m128i zero = _mm_setzero_si128();381const __m128i seven = _mm_set1_epi16(7);382const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217,3835352, 2217, 5352, 2217);384const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352,3852217, -5352, 2217, -5352);386const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16));387const __m128i k51000 = _mm_set1_epi32(51000);388389// Same operations are done on the (0,3) and (1,2) pairs.390// a3 = v0 - v3391// a2 = v1 - v2392const __m128i a32 = _mm_sub_epi16(*v01, *v32);393const __m128i a22 = _mm_unpackhi_epi64(a32, a32);394395const __m128i b23 = _mm_unpacklo_epi16(a22, a32);396const __m128i c1 = _mm_madd_epi16(b23, k5352_2217);397const __m128i c3 = _mm_madd_epi16(b23, k2217_5352);398const __m128i d1 = _mm_add_epi32(c1, k12000_plus_one);399const __m128i d3 = _mm_add_epi32(c3, k51000);400const __m128i e1 = _mm_srai_epi32(d1, 16);401const __m128i e3 = _mm_srai_epi32(d3, 16);402// f1 = ((b3 * 5352 + b2 * 2217 + 12000) >> 16)403// f3 = ((b3 * 2217 - b2 * 5352 + 51000) >> 16)404const __m128i f1 = _mm_packs_epi32(e1, e1);405const __m128i f3 = _mm_packs_epi32(e3, e3);406// g1 = f1 + (a3 != 0);407// The compare will return (0xffff, 0) for (==0, !=0). To turn that into the408// desired (0, 1), we add one earlier through k12000_plus_one.409// -> g1 = f1 + 1 - (a3 == 0)410const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero));411412// a0 = v0 + v3413// a1 = v1 + v2414const __m128i a01 = _mm_add_epi16(*v01, *v32);415const __m128i a01_plus_7 = _mm_add_epi16(a01, seven);416const __m128i a11 = _mm_unpackhi_epi64(a01, a01);417const __m128i c0 = _mm_add_epi16(a01_plus_7, a11);418const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11);419// d0 = (a0 + a1 + 7) >> 4;420// d2 = (a0 - a1 + 7) >> 4;421const __m128i d0 = _mm_srai_epi16(c0, 4);422const __m128i d2 = _mm_srai_epi16(c2, 4);423424const __m128i d0_g1 = _mm_unpacklo_epi64(d0, g1);425const __m128i d2_f3 = _mm_unpacklo_epi64(d2, f3);426_mm_storeu_si128((__m128i*)&out[0], d0_g1);427_mm_storeu_si128((__m128i*)&out[8], d2_f3);428}429430static void FTransform_SSE2(const uint8_t* WEBP_RESTRICT src,431const uint8_t* WEBP_RESTRICT ref,432int16_t* WEBP_RESTRICT out) {433const __m128i zero = _mm_setzero_si128();434// Load src.435const __m128i src0 = _mm_loadl_epi64((const __m128i*)&src[0 * BPS]);436const __m128i src1 = _mm_loadl_epi64((const __m128i*)&src[1 * BPS]);437const __m128i src2 = _mm_loadl_epi64((const __m128i*)&src[2 * BPS]);438const __m128i src3 = _mm_loadl_epi64((const __m128i*)&src[3 * BPS]);439// 00 01 02 03 *440// 10 11 12 13 *441// 20 21 22 23 *442// 30 31 32 33 *443// Shuffle.444const __m128i src_0 = _mm_unpacklo_epi16(src0, src1);445const __m128i src_1 = _mm_unpacklo_epi16(src2, src3);446// 00 01 10 11 02 03 12 13 * * ...447// 20 21 30 31 22 22 32 33 * * ...448449// Load ref.450const __m128i ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]);451const __m128i ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]);452const __m128i ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]);453const __m128i ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]);454const __m128i ref_0 = _mm_unpacklo_epi16(ref0, ref1);455const __m128i ref_1 = _mm_unpacklo_epi16(ref2, ref3);456457// Convert both to 16 bit.458const __m128i src_0_16b = _mm_unpacklo_epi8(src_0, zero);459const __m128i src_1_16b = _mm_unpacklo_epi8(src_1, zero);460const __m128i ref_0_16b = _mm_unpacklo_epi8(ref_0, zero);461const __m128i ref_1_16b = _mm_unpacklo_epi8(ref_1, zero);462463// Compute the difference.464const __m128i row01 = _mm_sub_epi16(src_0_16b, ref_0_16b);465const __m128i row23 = _mm_sub_epi16(src_1_16b, ref_1_16b);466__m128i v01, v32;467468// First pass469FTransformPass1_SSE2(&row01, &row23, &v01, &v32);470471// Second pass472FTransformPass2_SSE2(&v01, &v32, out);473}474475static void FTransform2_SSE2(const uint8_t* WEBP_RESTRICT src,476const uint8_t* WEBP_RESTRICT ref,477int16_t* WEBP_RESTRICT out) {478const __m128i zero = _mm_setzero_si128();479480// Load src and convert to 16b.481const __m128i src0 = _mm_loadl_epi64((const __m128i*)&src[0 * BPS]);482const __m128i src1 = _mm_loadl_epi64((const __m128i*)&src[1 * BPS]);483const __m128i src2 = _mm_loadl_epi64((const __m128i*)&src[2 * BPS]);484const __m128i src3 = _mm_loadl_epi64((const __m128i*)&src[3 * BPS]);485const __m128i src_0 = _mm_unpacklo_epi8(src0, zero);486const __m128i src_1 = _mm_unpacklo_epi8(src1, zero);487const __m128i src_2 = _mm_unpacklo_epi8(src2, zero);488const __m128i src_3 = _mm_unpacklo_epi8(src3, zero);489// Load ref and convert to 16b.490const __m128i ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]);491const __m128i ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]);492const __m128i ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]);493const __m128i ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]);494const __m128i ref_0 = _mm_unpacklo_epi8(ref0, zero);495const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero);496const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero);497const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero);498// Compute difference. -> 00 01 02 03 00' 01' 02' 03'499const __m128i diff0 = _mm_sub_epi16(src_0, ref_0);500const __m128i diff1 = _mm_sub_epi16(src_1, ref_1);501const __m128i diff2 = _mm_sub_epi16(src_2, ref_2);502const __m128i diff3 = _mm_sub_epi16(src_3, ref_3);503504// Unpack and shuffle505// 00 01 02 03 0 0 0 0506// 10 11 12 13 0 0 0 0507// 20 21 22 23 0 0 0 0508// 30 31 32 33 0 0 0 0509const __m128i shuf01l = _mm_unpacklo_epi32(diff0, diff1);510const __m128i shuf23l = _mm_unpacklo_epi32(diff2, diff3);511const __m128i shuf01h = _mm_unpackhi_epi32(diff0, diff1);512const __m128i shuf23h = _mm_unpackhi_epi32(diff2, diff3);513__m128i v01l, v32l;514__m128i v01h, v32h;515516// First pass517FTransformPass1_SSE2(&shuf01l, &shuf23l, &v01l, &v32l);518FTransformPass1_SSE2(&shuf01h, &shuf23h, &v01h, &v32h);519520// Second pass521FTransformPass2_SSE2(&v01l, &v32l, out + 0);522FTransformPass2_SSE2(&v01h, &v32h, out + 16);523}524525static void FTransformWHTRow_SSE2(const int16_t* WEBP_RESTRICT const in,526__m128i* const out) {527const __m128i kMult = _mm_set_epi16(-1, 1, -1, 1, 1, 1, 1, 1);528const __m128i src0 = _mm_loadl_epi64((__m128i*)&in[0 * 16]);529const __m128i src1 = _mm_loadl_epi64((__m128i*)&in[1 * 16]);530const __m128i src2 = _mm_loadl_epi64((__m128i*)&in[2 * 16]);531const __m128i src3 = _mm_loadl_epi64((__m128i*)&in[3 * 16]);532const __m128i A01 = _mm_unpacklo_epi16(src0, src1); // A0 A1 | ...533const __m128i A23 = _mm_unpacklo_epi16(src2, src3); // A2 A3 | ...534const __m128i B0 = _mm_adds_epi16(A01, A23); // a0 | a1 | ...535const __m128i B1 = _mm_subs_epi16(A01, A23); // a3 | a2 | ...536const __m128i C0 = _mm_unpacklo_epi32(B0, B1); // a0 | a1 | a3 | a2 | ...537const __m128i C1 = _mm_unpacklo_epi32(B1, B0); // a3 | a2 | a0 | a1 | ...538const __m128i D = _mm_unpacklo_epi64(C0, C1); // a0 a1 a3 a2 a3 a2 a0 a1539*out = _mm_madd_epi16(D, kMult);540}541542static void FTransformWHT_SSE2(const int16_t* WEBP_RESTRICT in,543int16_t* WEBP_RESTRICT out) {544// Input is 12b signed.545__m128i row0, row1, row2, row3;546// Rows are 14b signed.547FTransformWHTRow_SSE2(in + 0 * 64, &row0);548FTransformWHTRow_SSE2(in + 1 * 64, &row1);549FTransformWHTRow_SSE2(in + 2 * 64, &row2);550FTransformWHTRow_SSE2(in + 3 * 64, &row3);551552{553// The a* are 15b signed.554const __m128i a0 = _mm_add_epi32(row0, row2);555const __m128i a1 = _mm_add_epi32(row1, row3);556const __m128i a2 = _mm_sub_epi32(row1, row3);557const __m128i a3 = _mm_sub_epi32(row0, row2);558const __m128i a0a3 = _mm_packs_epi32(a0, a3);559const __m128i a1a2 = _mm_packs_epi32(a1, a2);560561// The b* are 16b signed.562const __m128i b0b1 = _mm_add_epi16(a0a3, a1a2);563const __m128i b3b2 = _mm_sub_epi16(a0a3, a1a2);564const __m128i tmp_b2b3 = _mm_unpackhi_epi64(b3b2, b3b2);565const __m128i b2b3 = _mm_unpacklo_epi64(tmp_b2b3, b3b2);566567_mm_storeu_si128((__m128i*)&out[0], _mm_srai_epi16(b0b1, 1));568_mm_storeu_si128((__m128i*)&out[8], _mm_srai_epi16(b2b3, 1));569}570}571572//------------------------------------------------------------------------------573// Compute susceptibility based on DCT-coeff histograms:574// the higher, the "easier" the macroblock is to compress.575576static void CollectHistogram_SSE2(const uint8_t* WEBP_RESTRICT ref,577const uint8_t* WEBP_RESTRICT pred,578int start_block, int end_block,579VP8Histogram* WEBP_RESTRICT const histo) {580const __m128i zero = _mm_setzero_si128();581const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);582int j;583int distribution[MAX_COEFF_THRESH + 1] = { 0 };584for (j = start_block; j < end_block; ++j) {585int16_t out[16];586int k;587588FTransform_SSE2(ref + VP8DspScan[j], pred + VP8DspScan[j], out);589590// Convert coefficients to bin (within out[]).591{592// Load.593const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);594const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);595const __m128i d0 = _mm_sub_epi16(zero, out0);596const __m128i d1 = _mm_sub_epi16(zero, out1);597const __m128i abs0 = _mm_max_epi16(out0, d0); // abs(v), 16b598const __m128i abs1 = _mm_max_epi16(out1, d1);599// v = abs(out) >> 3600const __m128i v0 = _mm_srai_epi16(abs0, 3);601const __m128i v1 = _mm_srai_epi16(abs1, 3);602// bin = min(v, MAX_COEFF_THRESH)603const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);604const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);605// Store.606_mm_storeu_si128((__m128i*)&out[0], bin0);607_mm_storeu_si128((__m128i*)&out[8], bin1);608}609610// Convert coefficients to bin.611for (k = 0; k < 16; ++k) {612++distribution[out[k]];613}614}615VP8SetHistogramData(distribution, histo);616}617618//------------------------------------------------------------------------------619// Intra predictions620621// helper for chroma-DC predictions622static WEBP_INLINE void Put8x8uv_SSE2(uint8_t v, uint8_t* dst) {623int j;624const __m128i values = _mm_set1_epi8((char)v);625for (j = 0; j < 8; ++j) {626_mm_storel_epi64((__m128i*)(dst + j * BPS), values);627}628}629630static WEBP_INLINE void Put16_SSE2(uint8_t v, uint8_t* dst) {631int j;632const __m128i values = _mm_set1_epi8((char)v);633for (j = 0; j < 16; ++j) {634_mm_store_si128((__m128i*)(dst + j * BPS), values);635}636}637638static WEBP_INLINE void Fill_SSE2(uint8_t* dst, int value, int size) {639if (size == 4) {640int j;641for (j = 0; j < 4; ++j) {642memset(dst + j * BPS, value, 4);643}644} else if (size == 8) {645Put8x8uv_SSE2(value, dst);646} else {647Put16_SSE2(value, dst);648}649}650651static WEBP_INLINE void VE8uv_SSE2(uint8_t* WEBP_RESTRICT dst,652const uint8_t* WEBP_RESTRICT top) {653int j;654const __m128i top_values = _mm_loadl_epi64((const __m128i*)top);655for (j = 0; j < 8; ++j) {656_mm_storel_epi64((__m128i*)(dst + j * BPS), top_values);657}658}659660static WEBP_INLINE void VE16_SSE2(uint8_t* WEBP_RESTRICT dst,661const uint8_t* WEBP_RESTRICT top) {662const __m128i top_values = _mm_load_si128((const __m128i*)top);663int j;664for (j = 0; j < 16; ++j) {665_mm_store_si128((__m128i*)(dst + j * BPS), top_values);666}667}668669static WEBP_INLINE void VerticalPred_SSE2(uint8_t* WEBP_RESTRICT dst,670const uint8_t* WEBP_RESTRICT top,671int size) {672if (top != NULL) {673if (size == 8) {674VE8uv_SSE2(dst, top);675} else {676VE16_SSE2(dst, top);677}678} else {679Fill_SSE2(dst, 127, size);680}681}682683static WEBP_INLINE void HE8uv_SSE2(uint8_t* WEBP_RESTRICT dst,684const uint8_t* WEBP_RESTRICT left) {685int j;686for (j = 0; j < 8; ++j) {687const __m128i values = _mm_set1_epi8((char)left[j]);688_mm_storel_epi64((__m128i*)dst, values);689dst += BPS;690}691}692693static WEBP_INLINE void HE16_SSE2(uint8_t* WEBP_RESTRICT dst,694const uint8_t* WEBP_RESTRICT left) {695int j;696for (j = 0; j < 16; ++j) {697const __m128i values = _mm_set1_epi8((char)left[j]);698_mm_store_si128((__m128i*)dst, values);699dst += BPS;700}701}702703static WEBP_INLINE void HorizontalPred_SSE2(uint8_t* WEBP_RESTRICT dst,704const uint8_t* WEBP_RESTRICT left,705int size) {706if (left != NULL) {707if (size == 8) {708HE8uv_SSE2(dst, left);709} else {710HE16_SSE2(dst, left);711}712} else {713Fill_SSE2(dst, 129, size);714}715}716717static WEBP_INLINE void TM_SSE2(uint8_t* WEBP_RESTRICT dst,718const uint8_t* WEBP_RESTRICT left,719const uint8_t* WEBP_RESTRICT top, int size) {720const __m128i zero = _mm_setzero_si128();721int y;722if (size == 8) {723const __m128i top_values = _mm_loadl_epi64((const __m128i*)top);724const __m128i top_base = _mm_unpacklo_epi8(top_values, zero);725for (y = 0; y < 8; ++y, dst += BPS) {726const int val = left[y] - left[-1];727const __m128i base = _mm_set1_epi16(val);728const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero);729_mm_storel_epi64((__m128i*)dst, out);730}731} else {732const __m128i top_values = _mm_load_si128((const __m128i*)top);733const __m128i top_base_0 = _mm_unpacklo_epi8(top_values, zero);734const __m128i top_base_1 = _mm_unpackhi_epi8(top_values, zero);735for (y = 0; y < 16; ++y, dst += BPS) {736const int val = left[y] - left[-1];737const __m128i base = _mm_set1_epi16(val);738const __m128i out_0 = _mm_add_epi16(base, top_base_0);739const __m128i out_1 = _mm_add_epi16(base, top_base_1);740const __m128i out = _mm_packus_epi16(out_0, out_1);741_mm_store_si128((__m128i*)dst, out);742}743}744}745746static WEBP_INLINE void TrueMotion_SSE2(uint8_t* WEBP_RESTRICT dst,747const uint8_t* WEBP_RESTRICT left,748const uint8_t* WEBP_RESTRICT top,749int size) {750if (left != NULL) {751if (top != NULL) {752TM_SSE2(dst, left, top, size);753} else {754HorizontalPred_SSE2(dst, left, size);755}756} else {757// true motion without left samples (hence: with default 129 value)758// is equivalent to VE prediction where you just copy the top samples.759// Note that if top samples are not available, the default value is760// then 129, and not 127 as in the VerticalPred case.761if (top != NULL) {762VerticalPred_SSE2(dst, top, size);763} else {764Fill_SSE2(dst, 129, size);765}766}767}768769static WEBP_INLINE void DC8uv_SSE2(uint8_t* WEBP_RESTRICT dst,770const uint8_t* WEBP_RESTRICT left,771const uint8_t* WEBP_RESTRICT top) {772const __m128i top_values = _mm_loadl_epi64((const __m128i*)top);773const __m128i left_values = _mm_loadl_epi64((const __m128i*)left);774const __m128i combined = _mm_unpacklo_epi64(top_values, left_values);775const int DC = VP8HorizontalAdd8b(&combined) + 8;776Put8x8uv_SSE2(DC >> 4, dst);777}778779static WEBP_INLINE void DC8uvNoLeft_SSE2(uint8_t* WEBP_RESTRICT dst,780const uint8_t* WEBP_RESTRICT top) {781const __m128i zero = _mm_setzero_si128();782const __m128i top_values = _mm_loadl_epi64((const __m128i*)top);783const __m128i sum = _mm_sad_epu8(top_values, zero);784const int DC = _mm_cvtsi128_si32(sum) + 4;785Put8x8uv_SSE2(DC >> 3, dst);786}787788static WEBP_INLINE void DC8uvNoTop_SSE2(uint8_t* WEBP_RESTRICT dst,789const uint8_t* WEBP_RESTRICT left) {790// 'left' is contiguous so we can reuse the top summation.791DC8uvNoLeft_SSE2(dst, left);792}793794static WEBP_INLINE void DC8uvNoTopLeft_SSE2(uint8_t* dst) {795Put8x8uv_SSE2(0x80, dst);796}797798static WEBP_INLINE void DC8uvMode_SSE2(uint8_t* WEBP_RESTRICT dst,799const uint8_t* WEBP_RESTRICT left,800const uint8_t* WEBP_RESTRICT top) {801if (top != NULL) {802if (left != NULL) { // top and left present803DC8uv_SSE2(dst, left, top);804} else { // top, but no left805DC8uvNoLeft_SSE2(dst, top);806}807} else if (left != NULL) { // left but no top808DC8uvNoTop_SSE2(dst, left);809} else { // no top, no left, nothing.810DC8uvNoTopLeft_SSE2(dst);811}812}813814static WEBP_INLINE void DC16_SSE2(uint8_t* WEBP_RESTRICT dst,815const uint8_t* WEBP_RESTRICT left,816const uint8_t* WEBP_RESTRICT top) {817const __m128i top_row = _mm_load_si128((const __m128i*)top);818const __m128i left_row = _mm_load_si128((const __m128i*)left);819const int DC =820VP8HorizontalAdd8b(&top_row) + VP8HorizontalAdd8b(&left_row) + 16;821Put16_SSE2(DC >> 5, dst);822}823824static WEBP_INLINE void DC16NoLeft_SSE2(uint8_t* WEBP_RESTRICT dst,825const uint8_t* WEBP_RESTRICT top) {826const __m128i top_row = _mm_load_si128((const __m128i*)top);827const int DC = VP8HorizontalAdd8b(&top_row) + 8;828Put16_SSE2(DC >> 4, dst);829}830831static WEBP_INLINE void DC16NoTop_SSE2(uint8_t* WEBP_RESTRICT dst,832const uint8_t* WEBP_RESTRICT left) {833// 'left' is contiguous so we can reuse the top summation.834DC16NoLeft_SSE2(dst, left);835}836837static WEBP_INLINE void DC16NoTopLeft_SSE2(uint8_t* dst) {838Put16_SSE2(0x80, dst);839}840841static WEBP_INLINE void DC16Mode_SSE2(uint8_t* WEBP_RESTRICT dst,842const uint8_t* WEBP_RESTRICT left,843const uint8_t* WEBP_RESTRICT top) {844if (top != NULL) {845if (left != NULL) { // top and left present846DC16_SSE2(dst, left, top);847} else { // top, but no left848DC16NoLeft_SSE2(dst, top);849}850} else if (left != NULL) { // left but no top851DC16NoTop_SSE2(dst, left);852} else { // no top, no left, nothing.853DC16NoTopLeft_SSE2(dst);854}855}856857//------------------------------------------------------------------------------858// 4x4 predictions859860#define DST(x, y) dst[(x) + (y) * BPS]861#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)862#define AVG2(a, b) (((a) + (b) + 1) >> 1)863864// We use the following 8b-arithmetic tricks:865// (a + 2 * b + c + 2) >> 2 = (AC + b + 1) >> 1866// where: AC = (a + c) >> 1 = [(a + c + 1) >> 1] - [(a^c) & 1]867// and:868// (a + 2 * b + c + 2) >> 2 = (AB + BC + 1) >> 1 - (ab|bc)&lsb869// where: AC = (a + b + 1) >> 1, BC = (b + c + 1) >> 1870// and ab = a ^ b, bc = b ^ c, lsb = (AC^BC)&1871872// vertical873static WEBP_INLINE void VE4_SSE2(uint8_t* WEBP_RESTRICT dst,874const uint8_t* WEBP_RESTRICT top) {875const __m128i one = _mm_set1_epi8(1);876const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(top - 1));877const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1);878const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2);879const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00);880const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one);881const __m128i b = _mm_subs_epu8(a, lsb);882const __m128i avg = _mm_avg_epu8(b, BCDEFGH0);883const int vals = _mm_cvtsi128_si32(avg);884int i;885for (i = 0; i < 4; ++i) {886WebPInt32ToMem(dst + i * BPS, vals);887}888}889890// horizontal891static WEBP_INLINE void HE4_SSE2(uint8_t* WEBP_RESTRICT dst,892const uint8_t* WEBP_RESTRICT top) {893const int X = top[-1];894const int I = top[-2];895const int J = top[-3];896const int K = top[-4];897const int L = top[-5];898WebPUint32ToMem(dst + 0 * BPS, 0x01010101U * AVG3(X, I, J));899WebPUint32ToMem(dst + 1 * BPS, 0x01010101U * AVG3(I, J, K));900WebPUint32ToMem(dst + 2 * BPS, 0x01010101U * AVG3(J, K, L));901WebPUint32ToMem(dst + 3 * BPS, 0x01010101U * AVG3(K, L, L));902}903904static WEBP_INLINE void DC4_SSE2(uint8_t* WEBP_RESTRICT dst,905const uint8_t* WEBP_RESTRICT top) {906uint32_t dc = 4;907int i;908for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i];909Fill_SSE2(dst, dc >> 3, 4);910}911912// Down-Left913static WEBP_INLINE void LD4_SSE2(uint8_t* WEBP_RESTRICT dst,914const uint8_t* WEBP_RESTRICT top) {915const __m128i one = _mm_set1_epi8(1);916const __m128i ABCDEFGH = _mm_loadl_epi64((const __m128i*)top);917const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1);918const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2);919const __m128i CDEFGHH0 = _mm_insert_epi16(CDEFGH00, top[7], 3);920const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0);921const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one);922const __m128i avg2 = _mm_subs_epu8(avg1, lsb);923const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0);924WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg ));925WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1)));926WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2)));927WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3)));928}929930// Vertical-Right931static WEBP_INLINE void VR4_SSE2(uint8_t* WEBP_RESTRICT dst,932const uint8_t* WEBP_RESTRICT top) {933const __m128i one = _mm_set1_epi8(1);934const int I = top[-2];935const int J = top[-3];936const int K = top[-4];937const int X = top[-1];938const __m128i XABCD = _mm_loadl_epi64((const __m128i*)(top - 1));939const __m128i ABCD0 = _mm_srli_si128(XABCD, 1);940const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0);941const __m128i _XABCD = _mm_slli_si128(XABCD, 1);942const __m128i IXABCD = _mm_insert_epi16(_XABCD, (short)(I | (X << 8)), 0);943const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0);944const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one);945const __m128i avg2 = _mm_subs_epu8(avg1, lsb);946const __m128i efgh = _mm_avg_epu8(avg2, XABCD);947WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd ));948WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh ));949WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1)));950WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1)));951952// these two are hard to implement in SSE2, so we keep the C-version:953DST(0, 2) = AVG3(J, I, X);954DST(0, 3) = AVG3(K, J, I);955}956957// Vertical-Left958static WEBP_INLINE void VL4_SSE2(uint8_t* WEBP_RESTRICT dst,959const uint8_t* WEBP_RESTRICT top) {960const __m128i one = _mm_set1_epi8(1);961const __m128i ABCDEFGH = _mm_loadl_epi64((const __m128i*)top);962const __m128i BCDEFGH_ = _mm_srli_si128(ABCDEFGH, 1);963const __m128i CDEFGH__ = _mm_srli_si128(ABCDEFGH, 2);964const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_);965const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_);966const __m128i avg3 = _mm_avg_epu8(avg1, avg2);967const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one);968const __m128i ab = _mm_xor_si128(ABCDEFGH, BCDEFGH_);969const __m128i bc = _mm_xor_si128(CDEFGH__, BCDEFGH_);970const __m128i abbc = _mm_or_si128(ab, bc);971const __m128i lsb2 = _mm_and_si128(abbc, lsb1);972const __m128i avg4 = _mm_subs_epu8(avg3, lsb2);973const uint32_t extra_out =974(uint32_t)_mm_cvtsi128_si32(_mm_srli_si128(avg4, 4));975WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 ));976WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 ));977WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1)));978WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1)));979980// these two are hard to get and irregular981DST(3, 2) = (extra_out >> 0) & 0xff;982DST(3, 3) = (extra_out >> 8) & 0xff;983}984985// Down-right986static WEBP_INLINE void RD4_SSE2(uint8_t* WEBP_RESTRICT dst,987const uint8_t* WEBP_RESTRICT top) {988const __m128i one = _mm_set1_epi8(1);989const __m128i LKJIXABC = _mm_loadl_epi64((const __m128i*)(top - 5));990const __m128i LKJIXABCD = _mm_insert_epi16(LKJIXABC, top[3], 4);991const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1);992const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2);993const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD);994const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one);995const __m128i avg2 = _mm_subs_epu8(avg1, lsb);996const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_);997WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg ));998WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1)));999WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2)));1000WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3)));1001}10021003static WEBP_INLINE void HU4_SSE2(uint8_t* WEBP_RESTRICT dst,1004const uint8_t* WEBP_RESTRICT top) {1005const int I = top[-2];1006const int J = top[-3];1007const int K = top[-4];1008const int L = top[-5];1009DST(0, 0) = AVG2(I, J);1010DST(2, 0) = DST(0, 1) = AVG2(J, K);1011DST(2, 1) = DST(0, 2) = AVG2(K, L);1012DST(1, 0) = AVG3(I, J, K);1013DST(3, 0) = DST(1, 1) = AVG3(J, K, L);1014DST(3, 1) = DST(1, 2) = AVG3(K, L, L);1015DST(3, 2) = DST(2, 2) =1016DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L;1017}10181019static WEBP_INLINE void HD4_SSE2(uint8_t* WEBP_RESTRICT dst,1020const uint8_t* WEBP_RESTRICT top) {1021const int X = top[-1];1022const int I = top[-2];1023const int J = top[-3];1024const int K = top[-4];1025const int L = top[-5];1026const int A = top[0];1027const int B = top[1];1028const int C = top[2];10291030DST(0, 0) = DST(2, 1) = AVG2(I, X);1031DST(0, 1) = DST(2, 2) = AVG2(J, I);1032DST(0, 2) = DST(2, 3) = AVG2(K, J);1033DST(0, 3) = AVG2(L, K);10341035DST(3, 0) = AVG3(A, B, C);1036DST(2, 0) = AVG3(X, A, B);1037DST(1, 0) = DST(3, 1) = AVG3(I, X, A);1038DST(1, 1) = DST(3, 2) = AVG3(J, I, X);1039DST(1, 2) = DST(3, 3) = AVG3(K, J, I);1040DST(1, 3) = AVG3(L, K, J);1041}10421043static WEBP_INLINE void TM4_SSE2(uint8_t* WEBP_RESTRICT dst,1044const uint8_t* WEBP_RESTRICT top) {1045const __m128i zero = _mm_setzero_si128();1046const __m128i top_values = _mm_cvtsi32_si128(WebPMemToInt32(top));1047const __m128i top_base = _mm_unpacklo_epi8(top_values, zero);1048int y;1049for (y = 0; y < 4; ++y, dst += BPS) {1050const int val = top[-2 - y] - top[-1];1051const __m128i base = _mm_set1_epi16(val);1052const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero);1053WebPInt32ToMem(dst, _mm_cvtsi128_si32(out));1054}1055}10561057#undef DST1058#undef AVG31059#undef AVG210601061//------------------------------------------------------------------------------1062// luma 4x4 prediction10631064// Left samples are top[-5 .. -2], top_left is top[-1], top are1065// located at top[0..3], and top right is top[4..7]1066static void Intra4Preds_SSE2(uint8_t* WEBP_RESTRICT dst,1067const uint8_t* WEBP_RESTRICT top) {1068DC4_SSE2(I4DC4 + dst, top);1069TM4_SSE2(I4TM4 + dst, top);1070VE4_SSE2(I4VE4 + dst, top);1071HE4_SSE2(I4HE4 + dst, top);1072RD4_SSE2(I4RD4 + dst, top);1073VR4_SSE2(I4VR4 + dst, top);1074LD4_SSE2(I4LD4 + dst, top);1075VL4_SSE2(I4VL4 + dst, top);1076HD4_SSE2(I4HD4 + dst, top);1077HU4_SSE2(I4HU4 + dst, top);1078}10791080//------------------------------------------------------------------------------1081// Chroma 8x8 prediction (paragraph 12.2)10821083static void IntraChromaPreds_SSE2(uint8_t* WEBP_RESTRICT dst,1084const uint8_t* WEBP_RESTRICT left,1085const uint8_t* WEBP_RESTRICT top) {1086// U block1087DC8uvMode_SSE2(C8DC8 + dst, left, top);1088VerticalPred_SSE2(C8VE8 + dst, top, 8);1089HorizontalPred_SSE2(C8HE8 + dst, left, 8);1090TrueMotion_SSE2(C8TM8 + dst, left, top, 8);1091// V block1092dst += 8;1093if (top != NULL) top += 8;1094if (left != NULL) left += 16;1095DC8uvMode_SSE2(C8DC8 + dst, left, top);1096VerticalPred_SSE2(C8VE8 + dst, top, 8);1097HorizontalPred_SSE2(C8HE8 + dst, left, 8);1098TrueMotion_SSE2(C8TM8 + dst, left, top, 8);1099}11001101//------------------------------------------------------------------------------1102// luma 16x16 prediction (paragraph 12.3)11031104static void Intra16Preds_SSE2(uint8_t* WEBP_RESTRICT dst,1105const uint8_t* WEBP_RESTRICT left,1106const uint8_t* WEBP_RESTRICT top) {1107DC16Mode_SSE2(I16DC16 + dst, left, top);1108VerticalPred_SSE2(I16VE16 + dst, top, 16);1109HorizontalPred_SSE2(I16HE16 + dst, left, 16);1110TrueMotion_SSE2(I16TM16 + dst, left, top, 16);1111}11121113//------------------------------------------------------------------------------1114// Metric11151116static WEBP_INLINE void SubtractAndAccumulate_SSE2(const __m128i a,1117const __m128i b,1118__m128i* const sum) {1119// take abs(a-b) in 8b1120const __m128i a_b = _mm_subs_epu8(a, b);1121const __m128i b_a = _mm_subs_epu8(b, a);1122const __m128i abs_a_b = _mm_or_si128(a_b, b_a);1123// zero-extend to 16b1124const __m128i zero = _mm_setzero_si128();1125const __m128i C0 = _mm_unpacklo_epi8(abs_a_b, zero);1126const __m128i C1 = _mm_unpackhi_epi8(abs_a_b, zero);1127// multiply with self1128const __m128i sum1 = _mm_madd_epi16(C0, C0);1129const __m128i sum2 = _mm_madd_epi16(C1, C1);1130*sum = _mm_add_epi32(sum1, sum2);1131}11321133static WEBP_INLINE int SSE_16xN_SSE2(const uint8_t* WEBP_RESTRICT a,1134const uint8_t* WEBP_RESTRICT b,1135int num_pairs) {1136__m128i sum = _mm_setzero_si128();1137int32_t tmp[4];1138int i;11391140for (i = 0; i < num_pairs; ++i) {1141const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[BPS * 0]);1142const __m128i b0 = _mm_loadu_si128((const __m128i*)&b[BPS * 0]);1143const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[BPS * 1]);1144const __m128i b1 = _mm_loadu_si128((const __m128i*)&b[BPS * 1]);1145__m128i sum1, sum2;1146SubtractAndAccumulate_SSE2(a0, b0, &sum1);1147SubtractAndAccumulate_SSE2(a1, b1, &sum2);1148sum = _mm_add_epi32(sum, _mm_add_epi32(sum1, sum2));1149a += 2 * BPS;1150b += 2 * BPS;1151}1152_mm_storeu_si128((__m128i*)tmp, sum);1153return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);1154}11551156static int SSE16x16_SSE2(const uint8_t* WEBP_RESTRICT a,1157const uint8_t* WEBP_RESTRICT b) {1158return SSE_16xN_SSE2(a, b, 8);1159}11601161static int SSE16x8_SSE2(const uint8_t* WEBP_RESTRICT a,1162const uint8_t* WEBP_RESTRICT b) {1163return SSE_16xN_SSE2(a, b, 4);1164}11651166#define LOAD_8x16b(ptr) \1167_mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr)), zero)11681169static int SSE8x8_SSE2(const uint8_t* WEBP_RESTRICT a,1170const uint8_t* WEBP_RESTRICT b) {1171const __m128i zero = _mm_setzero_si128();1172int num_pairs = 4;1173__m128i sum = zero;1174int32_t tmp[4];1175while (num_pairs-- > 0) {1176const __m128i a0 = LOAD_8x16b(&a[BPS * 0]);1177const __m128i a1 = LOAD_8x16b(&a[BPS * 1]);1178const __m128i b0 = LOAD_8x16b(&b[BPS * 0]);1179const __m128i b1 = LOAD_8x16b(&b[BPS * 1]);1180// subtract1181const __m128i c0 = _mm_subs_epi16(a0, b0);1182const __m128i c1 = _mm_subs_epi16(a1, b1);1183// multiply/accumulate with self1184const __m128i d0 = _mm_madd_epi16(c0, c0);1185const __m128i d1 = _mm_madd_epi16(c1, c1);1186// collect1187const __m128i sum01 = _mm_add_epi32(d0, d1);1188sum = _mm_add_epi32(sum, sum01);1189a += 2 * BPS;1190b += 2 * BPS;1191}1192_mm_storeu_si128((__m128i*)tmp, sum);1193return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);1194}1195#undef LOAD_8x16b11961197static int SSE4x4_SSE2(const uint8_t* WEBP_RESTRICT a,1198const uint8_t* WEBP_RESTRICT b) {1199const __m128i zero = _mm_setzero_si128();12001201// Load values. Note that we read 8 pixels instead of 4,1202// but the a/b buffers are over-allocated to that effect.1203const __m128i a0 = _mm_loadl_epi64((const __m128i*)&a[BPS * 0]);1204const __m128i a1 = _mm_loadl_epi64((const __m128i*)&a[BPS * 1]);1205const __m128i a2 = _mm_loadl_epi64((const __m128i*)&a[BPS * 2]);1206const __m128i a3 = _mm_loadl_epi64((const __m128i*)&a[BPS * 3]);1207const __m128i b0 = _mm_loadl_epi64((const __m128i*)&b[BPS * 0]);1208const __m128i b1 = _mm_loadl_epi64((const __m128i*)&b[BPS * 1]);1209const __m128i b2 = _mm_loadl_epi64((const __m128i*)&b[BPS * 2]);1210const __m128i b3 = _mm_loadl_epi64((const __m128i*)&b[BPS * 3]);1211// Combine pair of lines.1212const __m128i a01 = _mm_unpacklo_epi32(a0, a1);1213const __m128i a23 = _mm_unpacklo_epi32(a2, a3);1214const __m128i b01 = _mm_unpacklo_epi32(b0, b1);1215const __m128i b23 = _mm_unpacklo_epi32(b2, b3);1216// Convert to 16b.1217const __m128i a01s = _mm_unpacklo_epi8(a01, zero);1218const __m128i a23s = _mm_unpacklo_epi8(a23, zero);1219const __m128i b01s = _mm_unpacklo_epi8(b01, zero);1220const __m128i b23s = _mm_unpacklo_epi8(b23, zero);1221// subtract, square and accumulate1222const __m128i d0 = _mm_subs_epi16(a01s, b01s);1223const __m128i d1 = _mm_subs_epi16(a23s, b23s);1224const __m128i e0 = _mm_madd_epi16(d0, d0);1225const __m128i e1 = _mm_madd_epi16(d1, d1);1226const __m128i sum = _mm_add_epi32(e0, e1);12271228int32_t tmp[4];1229_mm_storeu_si128((__m128i*)tmp, sum);1230return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);1231}12321233//------------------------------------------------------------------------------12341235static void Mean16x4_SSE2(const uint8_t* WEBP_RESTRICT ref, uint32_t dc[4]) {1236const __m128i mask = _mm_set1_epi16(0x00ff);1237const __m128i a0 = _mm_loadu_si128((const __m128i*)&ref[BPS * 0]);1238const __m128i a1 = _mm_loadu_si128((const __m128i*)&ref[BPS * 1]);1239const __m128i a2 = _mm_loadu_si128((const __m128i*)&ref[BPS * 2]);1240const __m128i a3 = _mm_loadu_si128((const __m128i*)&ref[BPS * 3]);1241const __m128i b0 = _mm_srli_epi16(a0, 8); // hi byte1242const __m128i b1 = _mm_srli_epi16(a1, 8);1243const __m128i b2 = _mm_srli_epi16(a2, 8);1244const __m128i b3 = _mm_srli_epi16(a3, 8);1245const __m128i c0 = _mm_and_si128(a0, mask); // lo byte1246const __m128i c1 = _mm_and_si128(a1, mask);1247const __m128i c2 = _mm_and_si128(a2, mask);1248const __m128i c3 = _mm_and_si128(a3, mask);1249const __m128i d0 = _mm_add_epi32(b0, c0);1250const __m128i d1 = _mm_add_epi32(b1, c1);1251const __m128i d2 = _mm_add_epi32(b2, c2);1252const __m128i d3 = _mm_add_epi32(b3, c3);1253const __m128i e0 = _mm_add_epi32(d0, d1);1254const __m128i e1 = _mm_add_epi32(d2, d3);1255const __m128i f0 = _mm_add_epi32(e0, e1);1256uint16_t tmp[8];1257_mm_storeu_si128((__m128i*)tmp, f0);1258dc[0] = tmp[0] + tmp[1];1259dc[1] = tmp[2] + tmp[3];1260dc[2] = tmp[4] + tmp[5];1261dc[3] = tmp[6] + tmp[7];1262}12631264//------------------------------------------------------------------------------1265// Texture distortion1266//1267// We try to match the spectral content (weighted) between source and1268// reconstructed samples.12691270// Hadamard transform1271// Returns the weighted sum of the absolute value of transformed coefficients.1272// w[] contains a row-major 4 by 4 symmetric matrix.1273static int TTransform_SSE2(const uint8_t* WEBP_RESTRICT inA,1274const uint8_t* WEBP_RESTRICT inB,1275const uint16_t* WEBP_RESTRICT const w) {1276int32_t sum[4];1277__m128i tmp_0, tmp_1, tmp_2, tmp_3;1278const __m128i zero = _mm_setzero_si128();12791280// Load and combine inputs.1281{1282const __m128i inA_0 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 0]);1283const __m128i inA_1 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 1]);1284const __m128i inA_2 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 2]);1285const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]);1286const __m128i inB_0 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 0]);1287const __m128i inB_1 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 1]);1288const __m128i inB_2 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 2]);1289const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]);12901291// Combine inA and inB (we'll do two transforms in parallel).1292const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0);1293const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1);1294const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2);1295const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3);1296tmp_0 = _mm_unpacklo_epi8(inAB_0, zero);1297tmp_1 = _mm_unpacklo_epi8(inAB_1, zero);1298tmp_2 = _mm_unpacklo_epi8(inAB_2, zero);1299tmp_3 = _mm_unpacklo_epi8(inAB_3, zero);1300// a00 a01 a02 a03 b00 b01 b02 b031301// a10 a11 a12 a13 b10 b11 b12 b131302// a20 a21 a22 a23 b20 b21 b22 b231303// a30 a31 a32 a33 b30 b31 b32 b331304}13051306// Vertical pass first to avoid a transpose (vertical and horizontal passes1307// are commutative because w/kWeightY is symmetric) and subsequent transpose.1308{1309// Calculate a and b (two 4x4 at once).1310const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);1311const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);1312const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);1313const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);1314const __m128i b0 = _mm_add_epi16(a0, a1);1315const __m128i b1 = _mm_add_epi16(a3, a2);1316const __m128i b2 = _mm_sub_epi16(a3, a2);1317const __m128i b3 = _mm_sub_epi16(a0, a1);1318// a00 a01 a02 a03 b00 b01 b02 b031319// a10 a11 a12 a13 b10 b11 b12 b131320// a20 a21 a22 a23 b20 b21 b22 b231321// a30 a31 a32 a33 b30 b31 b32 b3313221323// Transpose the two 4x4.1324VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3);1325}13261327// Horizontal pass and difference of weighted sums.1328{1329// Load all inputs.1330const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]);1331const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]);13321333// Calculate a and b (two 4x4 at once).1334const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);1335const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);1336const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);1337const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);1338const __m128i b0 = _mm_add_epi16(a0, a1);1339const __m128i b1 = _mm_add_epi16(a3, a2);1340const __m128i b2 = _mm_sub_epi16(a3, a2);1341const __m128i b3 = _mm_sub_epi16(a0, a1);13421343// Separate the transforms of inA and inB.1344__m128i A_b0 = _mm_unpacklo_epi64(b0, b1);1345__m128i A_b2 = _mm_unpacklo_epi64(b2, b3);1346__m128i B_b0 = _mm_unpackhi_epi64(b0, b1);1347__m128i B_b2 = _mm_unpackhi_epi64(b2, b3);13481349{1350const __m128i d0 = _mm_sub_epi16(zero, A_b0);1351const __m128i d1 = _mm_sub_epi16(zero, A_b2);1352const __m128i d2 = _mm_sub_epi16(zero, B_b0);1353const __m128i d3 = _mm_sub_epi16(zero, B_b2);1354A_b0 = _mm_max_epi16(A_b0, d0); // abs(v), 16b1355A_b2 = _mm_max_epi16(A_b2, d1);1356B_b0 = _mm_max_epi16(B_b0, d2);1357B_b2 = _mm_max_epi16(B_b2, d3);1358}13591360// weighted sums1361A_b0 = _mm_madd_epi16(A_b0, w_0);1362A_b2 = _mm_madd_epi16(A_b2, w_8);1363B_b0 = _mm_madd_epi16(B_b0, w_0);1364B_b2 = _mm_madd_epi16(B_b2, w_8);1365A_b0 = _mm_add_epi32(A_b0, A_b2);1366B_b0 = _mm_add_epi32(B_b0, B_b2);13671368// difference of weighted sums1369A_b0 = _mm_sub_epi32(A_b0, B_b0);1370_mm_storeu_si128((__m128i*)&sum[0], A_b0);1371}1372return sum[0] + sum[1] + sum[2] + sum[3];1373}13741375static int Disto4x4_SSE2(const uint8_t* WEBP_RESTRICT const a,1376const uint8_t* WEBP_RESTRICT const b,1377const uint16_t* WEBP_RESTRICT const w) {1378const int diff_sum = TTransform_SSE2(a, b, w);1379return abs(diff_sum) >> 5;1380}13811382static int Disto16x16_SSE2(const uint8_t* WEBP_RESTRICT const a,1383const uint8_t* WEBP_RESTRICT const b,1384const uint16_t* WEBP_RESTRICT const w) {1385int D = 0;1386int x, y;1387for (y = 0; y < 16 * BPS; y += 4 * BPS) {1388for (x = 0; x < 16; x += 4) {1389D += Disto4x4_SSE2(a + x + y, b + x + y, w);1390}1391}1392return D;1393}13941395//------------------------------------------------------------------------------1396// Quantization1397//13981399static WEBP_INLINE int DoQuantizeBlock_SSE2(1400int16_t in[16], int16_t out[16],1401const uint16_t* WEBP_RESTRICT const sharpen,1402const VP8Matrix* WEBP_RESTRICT const mtx) {1403const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);1404const __m128i zero = _mm_setzero_si128();1405__m128i coeff0, coeff8;1406__m128i out0, out8;1407__m128i packed_out;14081409// Load all inputs.1410__m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);1411__m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);1412const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);1413const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);1414const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);1415const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);14161417// extract sign(in) (0x0000 if positive, 0xffff if negative)1418const __m128i sign0 = _mm_cmpgt_epi16(zero, in0);1419const __m128i sign8 = _mm_cmpgt_epi16(zero, in8);14201421// coeff = abs(in) = (in ^ sign) - sign1422coeff0 = _mm_xor_si128(in0, sign0);1423coeff8 = _mm_xor_si128(in8, sign8);1424coeff0 = _mm_sub_epi16(coeff0, sign0);1425coeff8 = _mm_sub_epi16(coeff8, sign8);14261427// coeff = abs(in) + sharpen1428if (sharpen != NULL) {1429const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);1430const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);1431coeff0 = _mm_add_epi16(coeff0, sharpen0);1432coeff8 = _mm_add_epi16(coeff8, sharpen8);1433}14341435// out = (coeff * iQ + B) >> QFIX1436{1437// doing calculations with 32b precision (QFIX=17)1438// out = (coeff * iQ)1439const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);1440const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);1441const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);1442const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);1443__m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);1444__m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);1445__m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);1446__m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);1447// out = (coeff * iQ + B)1448const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);1449const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);1450const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);1451const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);1452out_00 = _mm_add_epi32(out_00, bias_00);1453out_04 = _mm_add_epi32(out_04, bias_04);1454out_08 = _mm_add_epi32(out_08, bias_08);1455out_12 = _mm_add_epi32(out_12, bias_12);1456// out = QUANTDIV(coeff, iQ, B, QFIX)1457out_00 = _mm_srai_epi32(out_00, QFIX);1458out_04 = _mm_srai_epi32(out_04, QFIX);1459out_08 = _mm_srai_epi32(out_08, QFIX);1460out_12 = _mm_srai_epi32(out_12, QFIX);14611462// pack result as 16b1463out0 = _mm_packs_epi32(out_00, out_04);1464out8 = _mm_packs_epi32(out_08, out_12);14651466// if (coeff > 2047) coeff = 20471467out0 = _mm_min_epi16(out0, max_coeff_2047);1468out8 = _mm_min_epi16(out8, max_coeff_2047);1469}14701471// get sign back (if (sign[j]) out_n = -out_n)1472out0 = _mm_xor_si128(out0, sign0);1473out8 = _mm_xor_si128(out8, sign8);1474out0 = _mm_sub_epi16(out0, sign0);1475out8 = _mm_sub_epi16(out8, sign8);14761477// in = out * Q1478in0 = _mm_mullo_epi16(out0, q0);1479in8 = _mm_mullo_epi16(out8, q8);14801481_mm_storeu_si128((__m128i*)&in[0], in0);1482_mm_storeu_si128((__m128i*)&in[8], in8);14831484// zigzag the output before storing it.1485//1486// The zigzag pattern can almost be reproduced with a small sequence of1487// shuffles. After it, we only need to swap the 7th (ending up in third1488// position instead of twelfth) and 8th values.1489{1490__m128i outZ0, outZ8;1491outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0));1492outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0));1493outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2));1494outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1));1495outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0));1496outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0));1497_mm_storeu_si128((__m128i*)&out[0], outZ0);1498_mm_storeu_si128((__m128i*)&out[8], outZ8);1499packed_out = _mm_packs_epi16(outZ0, outZ8);1500}1501{1502const int16_t outZ_12 = out[12];1503const int16_t outZ_3 = out[3];1504out[3] = outZ_12;1505out[12] = outZ_3;1506}15071508// detect if all 'out' values are zeroes or not1509return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);1510}15111512static int QuantizeBlock_SSE2(int16_t in[16], int16_t out[16],1513const VP8Matrix* WEBP_RESTRICT const mtx) {1514return DoQuantizeBlock_SSE2(in, out, &mtx->sharpen_[0], mtx);1515}15161517static int QuantizeBlockWHT_SSE2(int16_t in[16], int16_t out[16],1518const VP8Matrix* WEBP_RESTRICT const mtx) {1519return DoQuantizeBlock_SSE2(in, out, NULL, mtx);1520}15211522static int Quantize2Blocks_SSE2(int16_t in[32], int16_t out[32],1523const VP8Matrix* WEBP_RESTRICT const mtx) {1524int nz;1525const uint16_t* const sharpen = &mtx->sharpen_[0];1526nz = DoQuantizeBlock_SSE2(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;1527nz |= DoQuantizeBlock_SSE2(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;1528return nz;1529}15301531//------------------------------------------------------------------------------1532// Entry point15331534extern void VP8EncDspInitSSE2(void);15351536WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE2(void) {1537VP8CollectHistogram = CollectHistogram_SSE2;1538VP8EncPredLuma16 = Intra16Preds_SSE2;1539VP8EncPredChroma8 = IntraChromaPreds_SSE2;1540VP8EncPredLuma4 = Intra4Preds_SSE2;1541VP8EncQuantizeBlock = QuantizeBlock_SSE2;1542VP8EncQuantize2Blocks = Quantize2Blocks_SSE2;1543VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE2;1544VP8ITransform = ITransform_SSE2;1545VP8FTransform = FTransform_SSE2;1546VP8FTransform2 = FTransform2_SSE2;1547VP8FTransformWHT = FTransformWHT_SSE2;1548VP8SSE16x16 = SSE16x16_SSE2;1549VP8SSE16x8 = SSE16x8_SSE2;1550VP8SSE8x8 = SSE8x8_SSE2;1551VP8SSE4x4 = SSE4x4_SSE2;1552VP8TDisto4x4 = Disto4x4_SSE2;1553VP8TDisto16x16 = Disto16x16_SSE2;1554VP8Mean16x4 = Mean16x4_SSE2;1555}15561557#else // !WEBP_USE_SSE215581559WEBP_DSP_INIT_STUB(VP8EncDspInitSSE2)15601561#endif // WEBP_USE_SSE2156215631564