Path: blob/master/thirdparty/libwebp/src/dsp/alpha_processing_sse2.c
21026 views
// Copyright 2014 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// Utilities for processing transparent channel.10//11// Author: Skal ([email protected])1213#include "src/dsp/dsp.h"1415#if defined(WEBP_USE_SSE2)16#include <emmintrin.h>1718#include "src/webp/types.h"19#include "src/dsp/cpu.h"2021//------------------------------------------------------------------------------2223static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,24int alpha_stride, int width, int height,25uint8_t* WEBP_RESTRICT dst, int dst_stride) {26// alpha_and stores an 'and' operation of all the alpha[] values. The final27// value is not 0xff if any of the alpha[] is not equal to 0xff.28uint32_t alpha_and = 0xff;29int i, j;30const __m128i zero = _mm_setzero_si128();31const __m128i alpha_mask = _mm_set1_epi32((int)0xff); // to preserve A32const __m128i all_0xff = _mm_set1_epi8((char)0xff);33__m128i all_alphas16 = all_0xff;34__m128i all_alphas8 = all_0xff;3536// We must be able to access 3 extra bytes after the last written byte37// 'dst[4 * width - 4]', because we don't know if alpha is the first or the38// last byte of the quadruplet.39for (j = 0; j < height; ++j) {40char* ptr = (char*)dst;41for (i = 0; i + 16 <= width - 1; i += 16) {42// load 16 alpha bytes43const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]);44const __m128i a1_lo = _mm_unpacklo_epi8(a0, zero);45const __m128i a1_hi = _mm_unpackhi_epi8(a0, zero);46const __m128i a2_lo_lo = _mm_unpacklo_epi16(a1_lo, zero);47const __m128i a2_lo_hi = _mm_unpackhi_epi16(a1_lo, zero);48const __m128i a2_hi_lo = _mm_unpacklo_epi16(a1_hi, zero);49const __m128i a2_hi_hi = _mm_unpackhi_epi16(a1_hi, zero);50_mm_maskmoveu_si128(a2_lo_lo, alpha_mask, ptr + 0);51_mm_maskmoveu_si128(a2_lo_hi, alpha_mask, ptr + 16);52_mm_maskmoveu_si128(a2_hi_lo, alpha_mask, ptr + 32);53_mm_maskmoveu_si128(a2_hi_hi, alpha_mask, ptr + 48);54// accumulate 16 alpha 'and' in parallel55all_alphas16 = _mm_and_si128(all_alphas16, a0);56ptr += 64;57}58if (i + 8 <= width - 1) {59// load 8 alpha bytes60const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]);61const __m128i a1 = _mm_unpacklo_epi8(a0, zero);62const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);63const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);64_mm_maskmoveu_si128(a2_lo, alpha_mask, ptr);65_mm_maskmoveu_si128(a2_hi, alpha_mask, ptr + 16);66// accumulate 8 alpha 'and' in parallel67all_alphas8 = _mm_and_si128(all_alphas8, a0);68i += 8;69}70for (; i < width; ++i) {71const uint32_t alpha_value = alpha[i];72dst[4 * i] = alpha_value;73alpha_and &= alpha_value;74}75alpha += alpha_stride;76dst += dst_stride;77}78// Combine the eight alpha 'and' into a 8-bit mask.79alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas8, all_0xff)) & 0xff;80return (alpha_and != 0xff ||81_mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas16, all_0xff)) != 0xffff);82}8384static void DispatchAlphaToGreen_SSE2(const uint8_t* WEBP_RESTRICT alpha,85int alpha_stride, int width, int height,86uint32_t* WEBP_RESTRICT dst,87int dst_stride) {88int i, j;89const __m128i zero = _mm_setzero_si128();90const int limit = width & ~15;91for (j = 0; j < height; ++j) {92for (i = 0; i < limit; i += 16) { // process 16 alpha bytes93const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]);94const __m128i a1 = _mm_unpacklo_epi8(zero, a0); // note the 'zero' first!95const __m128i b1 = _mm_unpackhi_epi8(zero, a0);96const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);97const __m128i b2_lo = _mm_unpacklo_epi16(b1, zero);98const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);99const __m128i b2_hi = _mm_unpackhi_epi16(b1, zero);100_mm_storeu_si128((__m128i*)&dst[i + 0], a2_lo);101_mm_storeu_si128((__m128i*)&dst[i + 4], a2_hi);102_mm_storeu_si128((__m128i*)&dst[i + 8], b2_lo);103_mm_storeu_si128((__m128i*)&dst[i + 12], b2_hi);104}105for (; i < width; ++i) dst[i] = alpha[i] << 8;106alpha += alpha_stride;107dst += dst_stride;108}109}110111static int ExtractAlpha_SSE2(const uint8_t* WEBP_RESTRICT argb, int argb_stride,112int width, int height,113uint8_t* WEBP_RESTRICT alpha, int alpha_stride) {114// alpha_and stores an 'and' operation of all the alpha[] values. The final115// value is not 0xff if any of the alpha[] is not equal to 0xff.116uint32_t alpha_and = 0xff;117int i, j;118const __m128i a_mask = _mm_set1_epi32(0xff); // to preserve alpha119const __m128i all_0xff = _mm_set_epi32(0, 0, ~0, ~0);120__m128i all_alphas = all_0xff;121122// We must be able to access 3 extra bytes after the last written byte123// 'src[4 * width - 4]', because we don't know if alpha is the first or the124// last byte of the quadruplet.125const int limit = (width - 1) & ~7;126127for (j = 0; j < height; ++j) {128const __m128i* src = (const __m128i*)argb;129for (i = 0; i < limit; i += 8) {130// load 32 argb bytes131const __m128i a0 = _mm_loadu_si128(src + 0);132const __m128i a1 = _mm_loadu_si128(src + 1);133const __m128i b0 = _mm_and_si128(a0, a_mask);134const __m128i b1 = _mm_and_si128(a1, a_mask);135const __m128i c0 = _mm_packs_epi32(b0, b1);136const __m128i d0 = _mm_packus_epi16(c0, c0);137// store138_mm_storel_epi64((__m128i*)&alpha[i], d0);139// accumulate eight alpha 'and' in parallel140all_alphas = _mm_and_si128(all_alphas, d0);141src += 2;142}143for (; i < width; ++i) {144const uint32_t alpha_value = argb[4 * i];145alpha[i] = alpha_value;146alpha_and &= alpha_value;147}148argb += argb_stride;149alpha += alpha_stride;150}151// Combine the eight alpha 'and' into a 8-bit mask.152alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));153return (alpha_and == 0xff);154}155156static void ExtractGreen_SSE2(const uint32_t* WEBP_RESTRICT argb,157uint8_t* WEBP_RESTRICT alpha, int size) {158int i;159const __m128i mask = _mm_set1_epi32(0xff);160const __m128i* src = (const __m128i*)argb;161162for (i = 0; i + 16 <= size; i += 16, src += 4) {163const __m128i a0 = _mm_loadu_si128(src + 0);164const __m128i a1 = _mm_loadu_si128(src + 1);165const __m128i a2 = _mm_loadu_si128(src + 2);166const __m128i a3 = _mm_loadu_si128(src + 3);167const __m128i b0 = _mm_srli_epi32(a0, 8);168const __m128i b1 = _mm_srli_epi32(a1, 8);169const __m128i b2 = _mm_srli_epi32(a2, 8);170const __m128i b3 = _mm_srli_epi32(a3, 8);171const __m128i c0 = _mm_and_si128(b0, mask);172const __m128i c1 = _mm_and_si128(b1, mask);173const __m128i c2 = _mm_and_si128(b2, mask);174const __m128i c3 = _mm_and_si128(b3, mask);175const __m128i d0 = _mm_packs_epi32(c0, c1);176const __m128i d1 = _mm_packs_epi32(c2, c3);177const __m128i e = _mm_packus_epi16(d0, d1);178// store179_mm_storeu_si128((__m128i*)&alpha[i], e);180}181if (i + 8 <= size) {182const __m128i a0 = _mm_loadu_si128(src + 0);183const __m128i a1 = _mm_loadu_si128(src + 1);184const __m128i b0 = _mm_srli_epi32(a0, 8);185const __m128i b1 = _mm_srli_epi32(a1, 8);186const __m128i c0 = _mm_and_si128(b0, mask);187const __m128i c1 = _mm_and_si128(b1, mask);188const __m128i d = _mm_packs_epi32(c0, c1);189const __m128i e = _mm_packus_epi16(d, d);190_mm_storel_epi64((__m128i*)&alpha[i], e);191i += 8;192}193for (; i < size; ++i) alpha[i] = argb[i] >> 8;194}195196//------------------------------------------------------------------------------197// Non-dither premultiplied modes198199#define MULTIPLIER(a) ((a) * 0x8081)200#define PREMULTIPLY(x, m) (((x) * (m)) >> 23)201202// We can't use a 'const int' for the SHUFFLE value, because it has to be an203// immediate in the _mm_shufflexx_epi16() instruction. We really need a macro.204// We use: v / 255 = (v * 0x8081) >> 23, where v = alpha * {r,g,b} is a 16bit205// value.206#define APPLY_ALPHA(RGBX, SHUFFLE) do { \207const __m128i argb0 = _mm_loadu_si128((const __m128i*)&(RGBX)); \208const __m128i argb1_lo = _mm_unpacklo_epi8(argb0, zero); \209const __m128i argb1_hi = _mm_unpackhi_epi8(argb0, zero); \210const __m128i alpha0_lo = _mm_or_si128(argb1_lo, kMask); \211const __m128i alpha0_hi = _mm_or_si128(argb1_hi, kMask); \212const __m128i alpha1_lo = _mm_shufflelo_epi16(alpha0_lo, SHUFFLE); \213const __m128i alpha1_hi = _mm_shufflelo_epi16(alpha0_hi, SHUFFLE); \214const __m128i alpha2_lo = _mm_shufflehi_epi16(alpha1_lo, SHUFFLE); \215const __m128i alpha2_hi = _mm_shufflehi_epi16(alpha1_hi, SHUFFLE); \216/* alpha2 = [ff a0 a0 a0][ff a1 a1 a1] */ \217const __m128i A0_lo = _mm_mullo_epi16(alpha2_lo, argb1_lo); \218const __m128i A0_hi = _mm_mullo_epi16(alpha2_hi, argb1_hi); \219const __m128i A1_lo = _mm_mulhi_epu16(A0_lo, kMult); \220const __m128i A1_hi = _mm_mulhi_epu16(A0_hi, kMult); \221const __m128i A2_lo = _mm_srli_epi16(A1_lo, 7); \222const __m128i A2_hi = _mm_srli_epi16(A1_hi, 7); \223const __m128i A3 = _mm_packus_epi16(A2_lo, A2_hi); \224_mm_storeu_si128((__m128i*)&(RGBX), A3); \225} while (0)226227static void ApplyAlphaMultiply_SSE2(uint8_t* rgba, int alpha_first,228int w, int h, int stride) {229const __m128i zero = _mm_setzero_si128();230const __m128i kMult = _mm_set1_epi16((short)0x8081);231const __m128i kMask = _mm_set_epi16(0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0);232const int kSpan = 4;233while (h-- > 0) {234uint32_t* const rgbx = (uint32_t*)rgba;235int i;236if (!alpha_first) {237for (i = 0; i + kSpan <= w; i += kSpan) {238APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(2, 3, 3, 3));239}240} else {241for (i = 0; i + kSpan <= w; i += kSpan) {242APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(0, 0, 0, 1));243}244}245// Finish with left-overs.246for (; i < w; ++i) {247uint8_t* const rgb = rgba + (alpha_first ? 1 : 0);248const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3);249const uint32_t a = alpha[4 * i];250if (a != 0xff) {251const uint32_t mult = MULTIPLIER(a);252rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult);253rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult);254rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult);255}256}257rgba += stride;258}259}260#undef MULTIPLIER261#undef PREMULTIPLY262263//------------------------------------------------------------------------------264// Alpha detection265266static int HasAlpha8b_SSE2(const uint8_t* src, int length) {267const __m128i all_0xff = _mm_set1_epi8((char)0xff);268int i = 0;269for (; i + 16 <= length; i += 16) {270const __m128i v = _mm_loadu_si128((const __m128i*)(src + i));271const __m128i bits = _mm_cmpeq_epi8(v, all_0xff);272const int mask = _mm_movemask_epi8(bits);273if (mask != 0xffff) return 1;274}275for (; i < length; ++i) if (src[i] != 0xff) return 1;276return 0;277}278279static int HasAlpha32b_SSE2(const uint8_t* src, int length) {280const __m128i alpha_mask = _mm_set1_epi32(0xff);281const __m128i all_0xff = _mm_set1_epi8((char)0xff);282int i = 0;283// We don't know if we can access the last 3 bytes after the last alpha284// value 'src[4 * length - 4]' (because we don't know if alpha is the first285// or the last byte of the quadruplet). Hence the '-3' protection below.286length = length * 4 - 3; // size in bytes287for (; i + 64 <= length; i += 64) {288const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));289const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16));290const __m128i a2 = _mm_loadu_si128((const __m128i*)(src + i + 32));291const __m128i a3 = _mm_loadu_si128((const __m128i*)(src + i + 48));292const __m128i b0 = _mm_and_si128(a0, alpha_mask);293const __m128i b1 = _mm_and_si128(a1, alpha_mask);294const __m128i b2 = _mm_and_si128(a2, alpha_mask);295const __m128i b3 = _mm_and_si128(a3, alpha_mask);296const __m128i c0 = _mm_packs_epi32(b0, b1);297const __m128i c1 = _mm_packs_epi32(b2, b3);298const __m128i d = _mm_packus_epi16(c0, c1);299const __m128i bits = _mm_cmpeq_epi8(d, all_0xff);300const int mask = _mm_movemask_epi8(bits);301if (mask != 0xffff) return 1;302}303for (; i + 32 <= length; i += 32) {304const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));305const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16));306const __m128i b0 = _mm_and_si128(a0, alpha_mask);307const __m128i b1 = _mm_and_si128(a1, alpha_mask);308const __m128i c = _mm_packs_epi32(b0, b1);309const __m128i d = _mm_packus_epi16(c, c);310const __m128i bits = _mm_cmpeq_epi8(d, all_0xff);311const int mask = _mm_movemask_epi8(bits);312if (mask != 0xffff) return 1;313}314for (; i <= length; i += 4) if (src[i] != 0xff) return 1;315return 0;316}317318static void AlphaReplace_SSE2(uint32_t* src, int length, uint32_t color) {319const __m128i m_color = _mm_set1_epi32((int)color);320const __m128i zero = _mm_setzero_si128();321int i = 0;322for (; i + 8 <= length; i += 8) {323const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));324const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 4));325const __m128i b0 = _mm_srai_epi32(a0, 24);326const __m128i b1 = _mm_srai_epi32(a1, 24);327const __m128i c0 = _mm_cmpeq_epi32(b0, zero);328const __m128i c1 = _mm_cmpeq_epi32(b1, zero);329const __m128i d0 = _mm_and_si128(c0, m_color);330const __m128i d1 = _mm_and_si128(c1, m_color);331const __m128i e0 = _mm_andnot_si128(c0, a0);332const __m128i e1 = _mm_andnot_si128(c1, a1);333_mm_storeu_si128((__m128i*)(src + i + 0), _mm_or_si128(d0, e0));334_mm_storeu_si128((__m128i*)(src + i + 4), _mm_or_si128(d1, e1));335}336for (; i < length; ++i) if ((src[i] >> 24) == 0) src[i] = color;337}338339// -----------------------------------------------------------------------------340// Apply alpha value to rows341342static void MultARGBRow_SSE2(uint32_t* const ptr, int width, int inverse) {343int x = 0;344if (!inverse) {345const int kSpan = 2;346const __m128i zero = _mm_setzero_si128();347const __m128i k128 = _mm_set1_epi16(128);348const __m128i kMult = _mm_set1_epi16(0x0101);349const __m128i kMask = _mm_set_epi16(0, 0xff, 0, 0, 0, 0xff, 0, 0);350for (x = 0; x + kSpan <= width; x += kSpan) {351// To compute 'result = (int)(a * x / 255. + .5)', we use:352// tmp = a * v + 128, result = (tmp * 0x0101u) >> 16353const __m128i A0 = _mm_loadl_epi64((const __m128i*)&ptr[x]);354const __m128i A1 = _mm_unpacklo_epi8(A0, zero);355const __m128i A2 = _mm_or_si128(A1, kMask);356const __m128i A3 = _mm_shufflelo_epi16(A2, _MM_SHUFFLE(2, 3, 3, 3));357const __m128i A4 = _mm_shufflehi_epi16(A3, _MM_SHUFFLE(2, 3, 3, 3));358// here, A4 = [ff a0 a0 a0][ff a1 a1 a1]359const __m128i A5 = _mm_mullo_epi16(A4, A1);360const __m128i A6 = _mm_add_epi16(A5, k128);361const __m128i A7 = _mm_mulhi_epu16(A6, kMult);362const __m128i A10 = _mm_packus_epi16(A7, zero);363_mm_storel_epi64((__m128i*)&ptr[x], A10);364}365}366width -= x;367if (width > 0) WebPMultARGBRow_C(ptr + x, width, inverse);368}369370static void MultRow_SSE2(uint8_t* WEBP_RESTRICT const ptr,371const uint8_t* WEBP_RESTRICT const alpha,372int width, int inverse) {373int x = 0;374if (!inverse) {375const __m128i zero = _mm_setzero_si128();376const __m128i k128 = _mm_set1_epi16(128);377const __m128i kMult = _mm_set1_epi16(0x0101);378for (x = 0; x + 8 <= width; x += 8) {379const __m128i v0 = _mm_loadl_epi64((__m128i*)&ptr[x]);380const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[x]);381const __m128i v1 = _mm_unpacklo_epi8(v0, zero);382const __m128i a1 = _mm_unpacklo_epi8(a0, zero);383const __m128i v2 = _mm_mullo_epi16(v1, a1);384const __m128i v3 = _mm_add_epi16(v2, k128);385const __m128i v4 = _mm_mulhi_epu16(v3, kMult);386const __m128i v5 = _mm_packus_epi16(v4, zero);387_mm_storel_epi64((__m128i*)&ptr[x], v5);388}389}390width -= x;391if (width > 0) WebPMultRow_C(ptr + x, alpha + x, width, inverse);392}393394//------------------------------------------------------------------------------395// Entry point396397extern void WebPInitAlphaProcessingSSE2(void);398399WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingSSE2(void) {400WebPMultARGBRow = MultARGBRow_SSE2;401WebPMultRow = MultRow_SSE2;402WebPApplyAlphaMultiply = ApplyAlphaMultiply_SSE2;403WebPDispatchAlpha = DispatchAlpha_SSE2;404WebPDispatchAlphaToGreen = DispatchAlphaToGreen_SSE2;405WebPExtractAlpha = ExtractAlpha_SSE2;406WebPExtractGreen = ExtractGreen_SSE2;407408WebPHasAlpha8b = HasAlpha8b_SSE2;409WebPHasAlpha32b = HasAlpha32b_SSE2;410WebPAlphaReplace = AlphaReplace_SSE2;411}412413#else // !WEBP_USE_SSE2414415WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingSSE2)416417#endif // WEBP_USE_SSE2418419420