Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/libtheora/x86_vc/x86zigzag.h
9904 views
1
/********************************************************************
2
* *
3
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
4
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
5
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
6
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
7
* *
8
* THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
9
* by the Xiph.Org Foundation and contributors *
10
* https://www.xiph.org/ *
11
* *
12
********************************************************************
13
14
function:
15
16
********************************************************************/
17
18
#if !defined(_x86_vc_x86zigzag_H)
19
# define _x86_vc_x86zigzag_H (1)
20
# include "x86enc.h"
21
22
23
/*Converts DCT coefficients from transposed order into zig-zag scan order and
24
stores them in Y.
25
This relies on two macros to load the contents of each row:
26
OC_ZZ_LOAD_ROW_LO(row,reg) and OC_ZZ_LOAD_ROW_HI(row,reg), which load the
27
first four and second four entries of each row into the specified register,
28
respectively.
29
OC_ZZ_LOAD_ROW_LO must be called before OC_ZZ_LOAD_ROW_HI for the same row
30
(because when the rows are already in SSE2 registers, loading the high half
31
destructively modifies the register).
32
The index of each output element in the original 64-element array should wind
33
up in the following 8x8 matrix (the letters indicate the order we compute
34
each 4-tuple below):
35
A 0 8 1 2 9 16 24 17 B
36
C 10 3 4 11 18 25 32 40 E
37
F 33 26 19 12 5 6 13 20 D
38
G 27 34 41 48 56 49 42 35 I
39
L 28 21 14 7 15 22 29 36 M
40
H 43 50 57 58 51 44 37 30 O
41
N 23 31 38 45 52 59 60 53 J
42
P 46 39 47 54 61 62 55 63 K
43
The order of the coefficients within each tuple is reversed in the comments
44
below to reflect the usual MSB to LSB notation.*/
45
#define OC_TRANSPOSE_ZIG_ZAG_MMXEXT \
46
OC_ZZ_LOAD_ROW_LO(0,mm0) /*mm0=03 02 01 00*/ \
47
OC_ZZ_LOAD_ROW_LO(1,mm1) /*mm1=11 10 09 08*/ \
48
OC_ZZ_LOAD_ROW_LO(2,mm2) /*mm2=19 18 17 16*/ \
49
OC_ZZ_LOAD_ROW_LO(3,mm3) /*mm3=27 26 25 24*/ \
50
OC_ZZ_LOAD_ROW_HI(0,mm4) /*mm4=07 06 05 04*/ \
51
OC_ZZ_LOAD_ROW_HI(1,mm5) /*mm5=15 14 13 12*/ \
52
OC_ZZ_LOAD_ROW_HI(2,mm6) /*mm6=23 22 21 20*/ \
53
__asm movq mm7,mm0 /*mm7=03 02 01 00*/ \
54
__asm punpckhdq mm0,mm1 /*mm0=11 10 03 02*/ \
55
__asm pshufw mm4,mm4,0x39 /*mm4=04 07 06 05*/ \
56
__asm punpcklwd mm1,mm0 /*mm1=03 09 02 08*/ \
57
__asm pshufw mm5,mm5,0x39 /*mm5=12 15 14 13*/ \
58
__asm punpcklwd mm7,mm1 /*mm7=02 01 08 00 *A*/ \
59
__asm movq [Y+0x00],mm7 \
60
__asm punpckhwd mm1,mm4 /*mm1=04 03 07 09*/ \
61
__asm movq mm7,mm2 /*mm7=19 18 17 16*/ \
62
__asm punpckhdq mm0,mm1 /*mm0=04 03 11 10*/ \
63
__asm punpckhwd mm7,mm5 /*mm7=12 19 15 18*/ \
64
__asm punpcklwd mm1,mm3 /*mm1=25 07 24 09*/ \
65
__asm punpcklwd mm5,mm6 /*mm5=21 14 20 13*/ \
66
__asm punpcklwd mm1,mm2 /*mm1=17 24 16 09 *B*/ \
67
OC_ZZ_LOAD_ROW_LO(4,mm2) /*mm2=35 34 33 32*/ \
68
__asm movq [Y+0x08],mm1 \
69
OC_ZZ_LOAD_ROW_LO(5,mm1) /*mm1=43 42 41 40*/ \
70
__asm pshufw mm0,mm0,0x78 /*mm0=11 04 03 10 *C*/ \
71
__asm movq [Y+0x10],mm0 \
72
__asm punpckhdq mm6,mm4 /*mm6=?? 07 23 22*/ \
73
__asm punpckldq mm4,mm5 /*mm4=20 13 06 05 *D*/ \
74
__asm movq [Y+0x28],mm4 \
75
__asm psrlq mm3,16 /*mm3=.. 27 26 25*/ \
76
__asm pshufw mm0,mm2,0x0E /*mm0=?? ?? 35 34*/ \
77
__asm movq mm4,mm7 /*mm4=12 19 15 18*/ \
78
__asm punpcklwd mm2,mm3 /*mm2=26 33 25 32*/ \
79
__asm punpcklwd mm4,mm1 /*mm4=41 15 40 18*/ \
80
__asm punpckhwd mm3,mm1 /*mm3=43 .. 42 27*/ \
81
__asm punpckldq mm4,mm2 /*mm4=25 32 40 18*/ \
82
__asm punpcklwd mm3,mm0 /*mm3=35 42 34 27*/ \
83
OC_ZZ_LOAD_ROW_LO(6,mm0) /*mm0=51 50 49 48*/ \
84
__asm pshufw mm4,mm4,0x6C /*mm4=40 32 25 18 *E*/ \
85
__asm movq [Y+0x18],mm4 \
86
OC_ZZ_LOAD_ROW_LO(7,mm4) /*mm4=59 58 57 56*/ \
87
__asm punpckhdq mm2,mm7 /*mm2=12 19 26 33 *F*/ \
88
__asm movq [Y+0x20],mm2 \
89
__asm pshufw mm1,mm1,0xD0 /*mm1=43 41 ?? ??*/ \
90
__asm pshufw mm0,mm0,0x87 /*mm0=50 48 49 51*/ \
91
__asm movq mm2,mm3 /*mm2=35 42 34 27*/ \
92
__asm punpckhwd mm1,mm0 /*mm1=50 43 48 41*/ \
93
__asm pshufw mm4,mm4,0x93 /*mm4=58 57 56 59*/ \
94
__asm punpckldq mm3,mm1 /*mm3=48 41 34 27 *G*/ \
95
__asm movq [Y+0x30],mm3 \
96
__asm punpckhdq mm1,mm4 /*mm1=58 57 50 43 *H*/ \
97
__asm movq [Y+0x50],mm1 \
98
OC_ZZ_LOAD_ROW_HI(7,mm1) /*mm1=63 62 61 60*/ \
99
__asm punpcklwd mm4,mm0 /*mm4=49 56 51 59*/ \
100
OC_ZZ_LOAD_ROW_HI(6,mm0) /*mm0=55 54 53 52*/ \
101
__asm psllq mm6,16 /*mm6=07 23 22 ..*/ \
102
__asm movq mm3,mm4 /*mm3=49 56 51 59*/ \
103
__asm punpckhdq mm4,mm2 /*mm4=35 42 49 56 *I*/ \
104
OC_ZZ_LOAD_ROW_HI(3,mm2) /*mm2=31 30 29 28*/ \
105
__asm movq [Y+0x38],mm4 \
106
__asm punpcklwd mm3,mm1 /*mm3=61 51 60 59*/ \
107
__asm punpcklwd mm7,mm6 /*mm7=22 15 .. ??*/ \
108
__asm movq mm4,mm3 /*mm4=61 51 60 59*/ \
109
__asm punpcklwd mm3,mm0 /*mm3=53 60 52 59*/ \
110
__asm punpckhwd mm4,mm0 /*mm4=55 61 54 51*/ \
111
OC_ZZ_LOAD_ROW_HI(4,mm0) /*mm0=39 38 37 36*/ \
112
__asm pshufw mm3,mm3,0xE1 /*mm3=53 60 59 52 *J*/ \
113
__asm movq [Y+0x68],mm3 \
114
__asm movq mm3,mm4 /*mm3=?? ?? 54 51*/ \
115
__asm pshufw mm2,mm2,0x39 /*mm2=28 31 30 29*/ \
116
__asm punpckhwd mm4,mm1 /*mm4=63 55 62 61 *K*/ \
117
OC_ZZ_LOAD_ROW_HI(5,mm1) /*mm1=47 46 45 44*/ \
118
__asm movq [Y+0x78],mm4 \
119
__asm punpckhwd mm6,mm2 /*mm6=28 07 31 23*/ \
120
__asm punpcklwd mm2,mm0 /*mm2=37 30 36 29*/ \
121
__asm punpckhdq mm5,mm6 /*mm5=28 07 21 14*/ \
122
__asm pshufw mm2,mm2,0x4B /*mm2=36 29 30 37*/ \
123
__asm pshufw mm5,mm5,0x87 /*mm5=07 14 21 28 *L*/ \
124
__asm movq [Y+0x40],mm5 \
125
__asm punpckhdq mm7,mm2 /*mm7=36 29 22 15 *M*/ \
126
__asm movq [Y+0x48],mm7 \
127
__asm pshufw mm1,mm1,0x9C /*mm1=46 45 47 44*/ \
128
__asm punpckhwd mm0,mm1 /*mm0=46 39 45 38*/ \
129
__asm punpcklwd mm3,mm1 /*mm3=47 54 44 51*/ \
130
__asm punpckldq mm6,mm0 /*mm6=45 38 31 23 *N*/ \
131
__asm movq [Y+0x60],mm6 \
132
__asm punpckhdq mm0,mm3 /*mm0=47 54 46 39*/ \
133
__asm punpckldq mm3,mm2 /*mm3=30 37 44 51 *O*/ \
134
__asm movq [Y+0x58],mm3 \
135
__asm pshufw mm0,mm0,0xB1 /*mm0=54 47 39 46 *P*/ \
136
__asm movq [Y+0x70],mm0 \
137
138
/*Converts DCT coefficients in %[dct] from natural order into zig-zag scan
139
order and stores them in %[qdct].
140
The index of each output element in the original 64-element array should wind
141
up in the following 8x8 matrix (the letters indicate the order we compute
142
each 4-tuple below):
143
A 0 1 8 16 9 2 3 10 B
144
C 17 24 32 25 18 11 4 5 D
145
E 12 19 26 33 40 48 41 34 I
146
H 27 20 13 6 7 14 21 28 G
147
K 35 42 49 56 57 50 43 36 J
148
F 29 22 15 23 30 37 44 51 M
149
P 58 59 52 45 38 31 39 46 L
150
N 53 60 61 54 47 55 62 63 O
151
The order of the coefficients within each tuple is reversed in the comments
152
below to reflect the usual MSB to LSB notation.*/
153
#define OC_ZIG_ZAG_MMXEXT \
154
"movq 0x00(%[dct]),%%mm0\n\t" /*mm0=03 02 01 00*/ \
155
"movq 0x08(%[dct]),%%mm1\n\t" /*mm1=07 06 05 04*/ \
156
"movq 0x10(%[dct]),%%mm2\n\t" /*mm2=11 10 09 08*/ \
157
"movq 0x20(%[dct]),%%mm3\n\t" /*mm3=19 18 17 16*/ \
158
"movq 0x30(%[dct]),%%mm4\n\t" /*mm4=27 26 25 24*/ \
159
"movq 0x40(%[dct]),%%mm5\n\t" /*mm5=35 34 33 32*/ \
160
"movq %%mm2,%%mm7\n\t" /*mm7=11 10 09 08*/ \
161
"punpcklwd %%mm3,%%mm2\n\t" /*mm2=17 09 16 08*/ \
162
"movq %%mm0,%%mm6\n\t" /*mm6=03 02 01 00*/ \
163
"punpckldq %%mm2,%%mm0\n\t" /*mm0=16 08 01 00 *A*/ \
164
"movq %%mm0,0x00(%[qdct])\n\t" \
165
"movq 0x18(%[dct]),%%mm0\n\t" /*mm0=15 14 13 12*/ \
166
"punpckhdq %%mm6,%%mm6\n\t" /*mm6=03 02 03 02*/ \
167
"psrlq $16,%%mm7\n\t" /*mm7=.. 11 10 09*/ \
168
"punpckldq %%mm7,%%mm6\n\t" /*mm6=10 09 03 02*/ \
169
"punpckhwd %%mm7,%%mm3\n\t" /*mm3=.. 19 11 18*/ \
170
"pshufw $0xD2,%%mm6,%%mm6\n\t" /*mm6=10 03 02 09 *B*/ \
171
"movq %%mm6,0x08(%[qdct])\n\t" \
172
"psrlq $48,%%mm2\n\t" /*mm2=.. .. .. 17*/ \
173
"movq %%mm1,%%mm6\n\t" /*mm6=07 06 05 04*/ \
174
"punpcklwd %%mm5,%%mm2\n\t" /*mm2=33 .. 32 17*/ \
175
"movq %%mm3,%%mm7\n\t" /*mm7=.. 19 11 18*/ \
176
"punpckldq %%mm1,%%mm3\n\t" /*mm3=05 04 11 18 *C*/ \
177
"por %%mm2,%%mm7\n\t" /*mm7=33 19 ?? ??*/ \
178
"punpcklwd %%mm4,%%mm2\n\t" /*mm2=25 32 24 17 *D**/ \
179
"movq %%mm2,0x10(%[qdct])\n\t" \
180
"movq %%mm3,0x18(%[qdct])\n\t" \
181
"movq 0x28(%[dct]),%%mm2\n\t" /*mm2=23 22 21 20*/ \
182
"movq 0x38(%[dct]),%%mm1\n\t" /*mm1=31 30 29 28*/ \
183
"pshufw $0x9C,%%mm0,%%mm3\n\t" /*mm3=14 13 15 12*/ \
184
"punpckhdq %%mm7,%%mm7\n\t" /*mm7=33 19 33 19*/ \
185
"punpckhwd %%mm3,%%mm6\n\t" /*mm6=14 07 13 06*/ \
186
"punpckldq %%mm0,%%mm0\n\t" /*mm0=13 12 13 12*/ \
187
"punpcklwd %%mm1,%%mm3\n\t" /*mm3=29 15 28 12*/ \
188
"punpckhwd %%mm4,%%mm0\n\t" /*mm0=27 13 26 12*/ \
189
"pshufw $0xB4,%%mm3,%%mm3\n\t" /*mm3=15 29 28 12*/ \
190
"psrlq $48,%%mm4\n\t" /*mm4=.. .. .. 27*/ \
191
"punpcklwd %%mm7,%%mm0\n\t" /*mm0=33 26 19 12 *E*/ \
192
"punpcklwd %%mm1,%%mm4\n\t" /*mm4=29 .. 28 27*/ \
193
"punpckhwd %%mm2,%%mm3\n\t" /*mm3=23 15 22 29 *F*/ \
194
"movq %%mm0,0x20(%[qdct])\n\t" \
195
"movq %%mm3,0x50(%[qdct])\n\t" \
196
"movq 0x60(%[dct]),%%mm3\n\t" /*mm3=51 50 49 48*/ \
197
"movq 0x70(%[dct]),%%mm7\n\t" /*mm7=59 58 57 56*/ \
198
"movq 0x50(%[dct]),%%mm0\n\t" /*mm0=43 42 41 40*/ \
199
"punpcklwd %%mm4,%%mm2\n\t" /*mm2=28 21 27 20*/ \
200
"psrlq $32,%%mm5\n\t" /*mm5=.. .. 35 34*/ \
201
"movq %%mm2,%%mm4\n\t" /*mm4=28 21 27 20*/ \
202
"punpckldq %%mm6,%%mm2\n\t" /*mm2=13 06 27 20*/ \
203
"punpckhdq %%mm4,%%mm6\n\t" /*mm6=28 21 14 07 *G*/ \
204
"movq %%mm3,%%mm4\n\t" /*mm4=51 50 49 48*/ \
205
"pshufw $0xB1,%%mm2,%%mm2\n\t" /*mm2=06 13 20 27 *H*/ \
206
"movq %%mm2,0x30(%[qdct])\n\t" \
207
"movq %%mm6,0x38(%[qdct])\n\t" \
208
"movq 0x48(%[dct]),%%mm2\n\t" /*mm2=39 38 37 36*/ \
209
"punpcklwd %%mm5,%%mm4\n\t" /*mm4=35 49 34 48*/ \
210
"movq 0x58(%[dct]),%%mm5\n\t" /*mm5=47 46 45 44*/ \
211
"punpckldq %%mm7,%%mm6\n\t" /*mm6=57 56 14 07*/ \
212
"psrlq $32,%%mm3\n\t" /*mm3=.. .. 51 50*/ \
213
"punpckhwd %%mm0,%%mm6\n\t" /*mm6=43 57 42 56*/ \
214
"punpcklwd %%mm4,%%mm0\n\t" /*mm0=34 41 48 40 *I*/ \
215
"pshufw $0x4E,%%mm6,%%mm6\n\t" /*mm6=42 56 43 57*/ \
216
"movq %%mm0,0x28(%[qdct])\n\t" \
217
"punpcklwd %%mm2,%%mm3\n\t" /*mm3=37 51 36 50*/ \
218
"punpckhwd %%mm6,%%mm4\n\t" /*mm4=42 35 56 49*/ \
219
"punpcklwd %%mm3,%%mm6\n\t" /*mm6=36 43 50 57 *J*/ \
220
"pshufw $0x4E,%%mm4,%%mm4\n\t" /*mm4=56 49 42 35 *K*/ \
221
"movq %%mm4,0x40(%[qdct])\n\t" \
222
"movq %%mm6,0x48(%[qdct])\n\t" \
223
"movq 0x68(%[dct]),%%mm6\n\t" /*mm6=55 54 53 52*/ \
224
"movq 0x78(%[dct]),%%mm0\n\t" /*mm0=63 62 61 60*/ \
225
"psrlq $32,%%mm1\n\t" /*mm1=.. .. 31 30*/ \
226
"pshufw $0xD8,%%mm5,%%mm5\n\t" /*mm5=47 45 46 44*/ \
227
"pshufw $0x0B,%%mm3,%%mm3\n\t" /*mm3=50 50 51 37*/ \
228
"punpcklwd %%mm5,%%mm1\n\t" /*mm1=46 31 44 30*/ \
229
"pshufw $0xC9,%%mm6,%%mm6\n\t" /*mm6=55 52 54 53*/ \
230
"punpckhwd %%mm1,%%mm2\n\t" /*mm2=46 39 31 38 *L*/ \
231
"punpcklwd %%mm3,%%mm1\n\t" /*mm1=51 44 37 30 *M*/ \
232
"movq %%mm2,0x68(%[qdct])\n\t" \
233
"movq %%mm1,0x58(%[qdct])\n\t" \
234
"punpckhwd %%mm6,%%mm5\n\t" /*mm5=55 47 52 45*/ \
235
"punpckldq %%mm0,%%mm6\n\t" /*mm6=61 60 54 53*/ \
236
"pshufw $0x10,%%mm5,%%mm4\n\t" /*mm4=45 52 45 45*/ \
237
"pshufw $0x78,%%mm6,%%mm6\n\t" /*mm6=53 60 61 54 *N*/ \
238
"punpckhdq %%mm0,%%mm5\n\t" /*mm5=63 62 55 47 *O*/ \
239
"punpckhdq %%mm4,%%mm7\n\t" /*mm7=45 52 59 58 *P*/ \
240
"movq %%mm6,0x70(%[qdct])\n\t" \
241
"movq %%mm5,0x78(%[qdct])\n\t" \
242
"movq %%mm7,0x60(%[qdct])\n\t" \
243
244
#endif
245
246