Path: blob/master/thirdparty/libtheora/x86_vc/mmxloop.h
9904 views
#if !defined(_x86_vc_mmxloop_H)1# define _x86_vc_mmxloop_H (1)2# include <stddef.h>3# include "x86int.h"45#if defined(OC_X86_ASM)67/*On entry, mm0={a0,...,a7}, mm1={b0,...,b7}, mm2={c0,...,c7}, mm3={d0,...d7}.8On exit, mm1={b0+lflim(R_0,L),...,b7+lflim(R_7,L)} and9mm2={c0-lflim(R_0,L),...,c7-lflim(R_7,L)}; mm0 and mm3 are clobbered.*/10#define OC_LOOP_FILTER8_MMX __asm{ \11/*mm7=0*/ \12__asm pxor mm7,mm7 \13/*mm6:mm0={a0,...,a7}*/ \14__asm movq mm6,mm0 \15__asm punpcklbw mm0,mm7 \16__asm punpckhbw mm6,mm7 \17/*mm3:mm5={d0,...,d7}*/ \18__asm movq mm5,mm3 \19__asm punpcklbw mm3,mm7 \20__asm punpckhbw mm5,mm7 \21/*mm6:mm0={a0-d0,...,a7-d7}*/ \22__asm psubw mm0,mm3 \23__asm psubw mm6,mm5 \24/*mm3:mm1={b0,...,b7}*/ \25__asm movq mm3,mm1 \26__asm punpcklbw mm1,mm7 \27__asm movq mm4,mm2 \28__asm punpckhbw mm3,mm7 \29/*mm5:mm4={c0,...,c7}*/ \30__asm movq mm5,mm2 \31__asm punpcklbw mm4,mm7 \32__asm punpckhbw mm5,mm7 \33/*mm7={3}x4 \34mm5:mm4={c0-b0,...,c7-b7}*/ \35__asm pcmpeqw mm7,mm7 \36__asm psubw mm4,mm1 \37__asm psrlw mm7,14 \38__asm psubw mm5,mm3 \39/*Scale by 3.*/ \40__asm pmullw mm4,mm7 \41__asm pmullw mm5,mm7 \42/*mm7={4}x4 \43mm5:mm4=f={a0-d0+3*(c0-b0),...,a7-d7+3*(c7-b7)}*/ \44__asm psrlw mm7,1 \45__asm paddw mm4,mm0 \46__asm psllw mm7,2 \47__asm movq mm0,[LL] \48__asm paddw mm5,mm6 \49/*R_i has the range [-127,128], so we compute -R_i instead. \50mm4=-R_i=-(f+4>>3)=0xFF^(f-4>>3)*/ \51__asm psubw mm4,mm7 \52__asm psubw mm5,mm7 \53__asm psraw mm4,3 \54__asm psraw mm5,3 \55__asm pcmpeqb mm7,mm7 \56__asm packsswb mm4,mm5 \57__asm pxor mm6,mm6 \58__asm pxor mm4,mm7 \59__asm packuswb mm1,mm3 \60/*Now compute lflim of -mm4 cf. Section 7.10 of the sepc.*/ \61/*There's no unsigned byte+signed byte with unsigned saturation op code, so \62we have to split things by sign (the other option is to work in 16 bits, \63but working in 8 bits gives much better parallelism). \64We compute abs(R_i), but save a mask of which terms were negative in mm6. \65Then we compute mm4=abs(lflim(R_i,L))=min(abs(R_i),max(2*L-abs(R_i),0)). \66Finally, we split mm4 into positive and negative pieces using the mask in \67mm6, and add and subtract them as appropriate.*/ \68/*mm4=abs(-R_i)*/ \69/*mm7=255-2*L*/ \70__asm pcmpgtb mm6,mm4 \71__asm psubb mm7,mm0 \72__asm pxor mm4,mm6 \73__asm psubb mm7,mm0 \74__asm psubb mm4,mm6 \75/*mm7=255-max(2*L-abs(R_i),0)*/ \76__asm paddusb mm7,mm4 \77/*mm4=min(abs(R_i),max(2*L-abs(R_i),0))*/ \78__asm paddusb mm4,mm7 \79__asm psubusb mm4,mm7 \80/*Now split mm4 by the original sign of -R_i.*/ \81__asm movq mm5,mm4 \82__asm pand mm4,mm6 \83__asm pandn mm6,mm5 \84/*mm1={b0+lflim(R_0,L),...,b7+lflim(R_7,L)}*/ \85/*mm2={c0-lflim(R_0,L),...,c7-lflim(R_7,L)}*/ \86__asm paddusb mm1,mm4 \87__asm psubusb mm2,mm4 \88__asm psubusb mm1,mm6 \89__asm paddusb mm2,mm6 \90}9192#define OC_LOOP_FILTER_V_MMX(_pix,_ystride,_ll) \93do{ \94/*Used local variable pix__ in order to fix compilation errors like: \95"error C2425: 'SHL' : non-constant expression in 'second operand'".*/ \96unsigned char *pix__; \97unsigned char *ll__; \98ll__=(_ll); \99pix__=(_pix); \100__asm mov YSTRIDE,_ystride \101__asm mov LL,ll__ \102__asm mov PIX,pix__ \103__asm sub PIX,YSTRIDE \104__asm sub PIX,YSTRIDE \105/*mm0={a0,...,a7}*/ \106__asm movq mm0,[PIX] \107/*ystride3=_ystride*3*/ \108__asm lea YSTRIDE3,[YSTRIDE+YSTRIDE*2] \109/*mm3={d0,...,d7}*/ \110__asm movq mm3,[PIX+YSTRIDE3] \111/*mm1={b0,...,b7}*/ \112__asm movq mm1,[PIX+YSTRIDE] \113/*mm2={c0,...,c7}*/ \114__asm movq mm2,[PIX+YSTRIDE*2] \115OC_LOOP_FILTER8_MMX \116/*Write it back out.*/ \117__asm movq [PIX+YSTRIDE],mm1 \118__asm movq [PIX+YSTRIDE*2],mm2 \119} \120while(0)121122#define OC_LOOP_FILTER_H_MMX(_pix,_ystride,_ll) \123do{ \124/*Used local variable ll__ in order to fix compilation errors like: \125"error C2443: operand size conflict".*/ \126unsigned char *ll__; \127unsigned char *pix__; \128ll__=(_ll); \129pix__=(_pix)-2; \130__asm mov PIX,pix__ \131__asm mov YSTRIDE,_ystride \132__asm mov LL,ll__ \133/*x x x x d0 c0 b0 a0*/ \134__asm movd mm0,[PIX] \135/*x x x x d1 c1 b1 a1*/ \136__asm movd mm1,[PIX+YSTRIDE] \137/*ystride3=_ystride*3*/ \138__asm lea YSTRIDE3,[YSTRIDE+YSTRIDE*2] \139/*x x x x d2 c2 b2 a2*/ \140__asm movd mm2,[PIX+YSTRIDE*2] \141/*x x x x d3 c3 b3 a3*/ \142__asm lea D,[PIX+YSTRIDE*4] \143__asm movd mm3,[PIX+YSTRIDE3] \144/*x x x x d4 c4 b4 a4*/ \145__asm movd mm4,[D] \146/*x x x x d5 c5 b5 a5*/ \147__asm movd mm5,[D+YSTRIDE] \148/*x x x x d6 c6 b6 a6*/ \149__asm movd mm6,[D+YSTRIDE*2] \150/*x x x x d7 c7 b7 a7*/ \151__asm movd mm7,[D+YSTRIDE3] \152/*mm0=d1 d0 c1 c0 b1 b0 a1 a0*/ \153__asm punpcklbw mm0,mm1 \154/*mm2=d3 d2 c3 c2 b3 b2 a3 a2*/ \155__asm punpcklbw mm2,mm3 \156/*mm3=d1 d0 c1 c0 b1 b0 a1 a0*/ \157__asm movq mm3,mm0 \158/*mm0=b3 b2 b1 b0 a3 a2 a1 a0*/ \159__asm punpcklwd mm0,mm2 \160/*mm3=d3 d2 d1 d0 c3 c2 c1 c0*/ \161__asm punpckhwd mm3,mm2 \162/*mm1=b3 b2 b1 b0 a3 a2 a1 a0*/ \163__asm movq mm1,mm0 \164/*mm4=d5 d4 c5 c4 b5 b4 a5 a4*/ \165__asm punpcklbw mm4,mm5 \166/*mm6=d7 d6 c7 c6 b7 b6 a7 a6*/ \167__asm punpcklbw mm6,mm7 \168/*mm5=d5 d4 c5 c4 b5 b4 a5 a4*/ \169__asm movq mm5,mm4 \170/*mm4=b7 b6 b5 b4 a7 a6 a5 a4*/ \171__asm punpcklwd mm4,mm6 \172/*mm5=d7 d6 d5 d4 c7 c6 c5 c4*/ \173__asm punpckhwd mm5,mm6 \174/*mm2=d3 d2 d1 d0 c3 c2 c1 c0*/ \175__asm movq mm2,mm3 \176/*mm0=a7 a6 a5 a4 a3 a2 a1 a0*/ \177__asm punpckldq mm0,mm4 \178/*mm1=b7 b6 b5 b4 b3 b2 b1 b0*/ \179__asm punpckhdq mm1,mm4 \180/*mm2=c7 c6 c5 c4 c3 c2 c1 c0*/ \181__asm punpckldq mm2,mm5 \182/*mm3=d7 d6 d5 d4 d3 d2 d1 d0*/ \183__asm punpckhdq mm3,mm5 \184OC_LOOP_FILTER8_MMX \185/*mm2={b0+R_0'',...,b7+R_7''}*/ \186__asm movq mm0,mm1 \187/*mm1={b0+R_0'',c0-R_0'',...,b3+R_3'',c3-R_3''}*/ \188__asm punpcklbw mm1,mm2 \189/*mm2={b4+R_4'',c4-R_4'',...,b7+R_7'',c7-R_7''}*/ \190__asm punpckhbw mm0,mm2 \191/*[d]=c1 b1 c0 b0*/ \192__asm movd D,mm1 \193__asm mov [PIX+1],D_WORD \194__asm psrlq mm1,32 \195__asm shr D,16 \196__asm mov [PIX+YSTRIDE+1],D_WORD \197/*[d]=c3 b3 c2 b2*/ \198__asm movd D,mm1 \199__asm mov [PIX+YSTRIDE*2+1],D_WORD \200__asm shr D,16 \201__asm mov [PIX+YSTRIDE3+1],D_WORD \202__asm lea PIX,[PIX+YSTRIDE*4] \203/*[d]=c5 b5 c4 b4*/ \204__asm movd D,mm0 \205__asm mov [PIX+1],D_WORD \206__asm psrlq mm0,32 \207__asm shr D,16 \208__asm mov [PIX+YSTRIDE+1],D_WORD \209/*[d]=c7 b7 c6 b6*/ \210__asm movd D,mm0 \211__asm mov [PIX+YSTRIDE*2+1],D_WORD \212__asm shr D,16 \213__asm mov [PIX+YSTRIDE3+1],D_WORD \214} \215while(0)216217# endif218#endif219220221