CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/Common/Data/Convert/ColorConv.cpp
Views: 1401
1
// Copyright (c) 2015- PPSSPP Project.
2
3
// This program is free software: you can redistribute it and/or modify
4
// it under the terms of the GNU General Public License as published by
5
// the Free Software Foundation, version 2.0 or later versions.
6
7
// This program is distributed in the hope that it will be useful,
8
// but WITHOUT ANY WARRANTY; without even the implied warranty of
9
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
// GNU General Public License 2.0 for more details.
11
12
// A copy of the GPL 2.0 should have been included with the program.
13
// If not, see http://www.gnu.org/licenses/
14
15
// Official git repository and contact information can be found at
16
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
17
18
#include "ppsspp_config.h"
19
#include "Common/Data/Convert/ColorConv.h"
20
#include "Common/Data/Convert/SmallDataConvert.h"
21
#include "Common/Common.h"
22
#include "Common/CPUDetect.h"
23
24
#ifdef _M_SSE
25
#include <emmintrin.h>
26
#include <smmintrin.h>
27
#endif
28
29
#if PPSSPP_ARCH(ARM_NEON)
30
#if defined(_MSC_VER) && PPSSPP_ARCH(ARM64)
31
#include <arm64_neon.h>
32
#else
33
#include <arm_neon.h>
34
#endif
35
#endif
36
37
void ConvertBGRA8888ToRGBA8888(u32 *dst, const u32 *src, u32 numPixels) {
38
#ifdef _M_SSE
39
const __m128i maskGA = _mm_set1_epi32(0xFF00FF00);
40
41
const __m128i *srcp = (const __m128i *)src;
42
__m128i *dstp = (__m128i *)dst;
43
u32 sseChunks = numPixels / 4;
44
if (((intptr_t)src & 0xF) || ((intptr_t)dst & 0xF)) {
45
sseChunks = 0;
46
}
47
for (u32 i = 0; i < sseChunks; ++i) {
48
__m128i c = _mm_load_si128(&srcp[i]);
49
__m128i rb = _mm_andnot_si128(maskGA, c);
50
c = _mm_and_si128(c, maskGA);
51
52
__m128i b = _mm_srli_epi32(rb, 16);
53
__m128i r = _mm_slli_epi32(rb, 16);
54
c = _mm_or_si128(_mm_or_si128(c, r), b);
55
_mm_store_si128(&dstp[i], c);
56
}
57
// The remainder starts right after those done via SSE.
58
u32 i = sseChunks * 4;
59
#else
60
u32 i = 0;
61
#endif
62
for (; i < numPixels; i++) {
63
const u32 c = src[i];
64
dst[i] = ((c >> 16) & 0x000000FF) |
65
(c & 0xFF00FF00) |
66
((c << 16) & 0x00FF0000);
67
}
68
}
69
70
void ConvertBGRA8888ToRGB888(u8 *dst, const u32 *src, u32 numPixels) {
71
for (uint32_t x = 0; x < numPixels; ++x) {
72
uint32_t c = src[x];
73
dst[x * 3 + 0] = (c >> 16) & 0xFF;
74
dst[x * 3 + 1] = (c >> 8) & 0xFF;
75
dst[x * 3 + 2] = (c >> 0) & 0xFF;
76
}
77
}
78
79
#if defined(_M_SSE)
80
#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)
81
[[gnu::target("sse4.1")]]
82
#endif
83
static inline void ConvertRGBA8888ToRGBA5551_SSE4(__m128i *dstp, const __m128i *srcp, u32 sseChunks) {
84
const __m128i maskAG = _mm_set1_epi32(0x8000F800);
85
const __m128i maskRB = _mm_set1_epi32(0x00F800F8);
86
const __m128i mask = _mm_set1_epi32(0x0000FFFF);
87
88
for (u32 i = 0; i < sseChunks; i += 2) {
89
__m128i c1 = _mm_load_si128(&srcp[i + 0]);
90
__m128i c2 = _mm_load_si128(&srcp[i + 1]);
91
__m128i ag, rb;
92
93
ag = _mm_and_si128(c1, maskAG);
94
ag = _mm_or_si128(_mm_srli_epi32(ag, 16), _mm_srli_epi32(ag, 6));
95
rb = _mm_and_si128(c1, maskRB);
96
rb = _mm_or_si128(_mm_srli_epi32(rb, 3), _mm_srli_epi32(rb, 9));
97
c1 = _mm_and_si128(_mm_or_si128(ag, rb), mask);
98
99
ag = _mm_and_si128(c2, maskAG);
100
ag = _mm_or_si128(_mm_srli_epi32(ag, 16), _mm_srli_epi32(ag, 6));
101
rb = _mm_and_si128(c2, maskRB);
102
rb = _mm_or_si128(_mm_srli_epi32(rb, 3), _mm_srli_epi32(rb, 9));
103
c2 = _mm_and_si128(_mm_or_si128(ag, rb), mask);
104
105
_mm_store_si128(&dstp[i / 2], _mm_packus_epi32(c1, c2));
106
}
107
}
108
#endif
109
110
void ConvertRGBA8888ToRGBA5551(u16 *dst, const u32 *src, u32 numPixels) {
111
#if defined(_M_SSE)
112
const __m128i *srcp = (const __m128i *)src;
113
__m128i *dstp = (__m128i *)dst;
114
u32 sseChunks = (numPixels / 4) & ~1;
115
// SSE 4.1 required for _mm_packus_epi32.
116
if (((intptr_t)src & 0xF) || ((intptr_t)dst & 0xF) || !cpu_info.bSSE4_1) {
117
sseChunks = 0;
118
} else {
119
ConvertRGBA8888ToRGBA5551_SSE4(dstp, srcp, sseChunks);
120
}
121
122
// The remainder starts right after those done via SSE.
123
u32 i = sseChunks * 4;
124
#else
125
u32 i = 0;
126
#endif
127
for (; i < numPixels; i++) {
128
dst[i] = RGBA8888toRGBA5551(src[i]);
129
}
130
}
131
132
#if defined(_M_SSE)
133
#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)
134
[[gnu::target("sse4.1")]]
135
#endif
136
static inline void ConvertBGRA8888ToRGBA5551_SSE4(__m128i *dstp, const __m128i *srcp, u32 sseChunks) {
137
const __m128i maskAG = _mm_set1_epi32(0x8000F800);
138
const __m128i maskRB = _mm_set1_epi32(0x00F800F8);
139
const __m128i mask = _mm_set1_epi32(0x0000FFFF);
140
141
for (u32 i = 0; i < sseChunks; i += 2) {
142
__m128i c1 = _mm_load_si128(&srcp[i + 0]);
143
__m128i c2 = _mm_load_si128(&srcp[i + 1]);
144
__m128i ag, rb;
145
146
ag = _mm_and_si128(c1, maskAG);
147
ag = _mm_or_si128(_mm_srli_epi32(ag, 16), _mm_srli_epi32(ag, 6));
148
rb = _mm_and_si128(c1, maskRB);
149
rb = _mm_or_si128(_mm_srli_epi32(rb, 19), _mm_slli_epi32(rb, 7));
150
c1 = _mm_and_si128(_mm_or_si128(ag, rb), mask);
151
152
ag = _mm_and_si128(c2, maskAG);
153
ag = _mm_or_si128(_mm_srli_epi32(ag, 16), _mm_srli_epi32(ag, 6));
154
rb = _mm_and_si128(c2, maskRB);
155
rb = _mm_or_si128(_mm_srli_epi32(rb, 19), _mm_slli_epi32(rb, 7));
156
c2 = _mm_and_si128(_mm_or_si128(ag, rb), mask);
157
158
_mm_store_si128(&dstp[i / 2], _mm_packus_epi32(c1, c2));
159
}
160
}
161
#endif
162
163
void ConvertBGRA8888ToRGBA5551(u16 *dst, const u32 *src, u32 numPixels) {
164
#if defined(_M_SSE)
165
const __m128i *srcp = (const __m128i *)src;
166
__m128i *dstp = (__m128i *)dst;
167
u32 sseChunks = (numPixels / 4) & ~1;
168
// SSE 4.1 required for _mm_packus_epi32.
169
if (((intptr_t)src & 0xF) || ((intptr_t)dst & 0xF) || !cpu_info.bSSE4_1) {
170
sseChunks = 0;
171
} else {
172
ConvertBGRA8888ToRGBA5551_SSE4(dstp, srcp, sseChunks);
173
}
174
175
// The remainder starts right after those done via SSE.
176
u32 i = sseChunks * 4;
177
#else
178
u32 i = 0;
179
#endif
180
for (; i < numPixels; i++) {
181
dst[i] = BGRA8888toRGBA5551(src[i]);
182
}
183
}
184
185
void ConvertBGRA8888ToRGB565(u16 *dst, const u32 *src, u32 numPixels) {
186
for (u32 i = 0; i < numPixels; i++) {
187
dst[i] = BGRA8888toRGB565(src[i]);
188
}
189
}
190
191
void ConvertBGRA8888ToRGBA4444(u16 *dst, const u32 *src, u32 numPixels) {
192
for (u32 i = 0; i < numPixels; i++) {
193
dst[i] = BGRA8888toRGBA4444(src[i]);
194
}
195
}
196
197
void ConvertRGBA8888ToRGB565(u16 *dst, const u32 *src, u32 numPixels) {
198
for (u32 x = 0; x < numPixels; ++x) {
199
dst[x] = RGBA8888toRGB565(src[x]);
200
}
201
}
202
203
void ConvertRGBA8888ToRGBA4444(u16 *dst, const u32 *src, u32 numPixels) {
204
for (u32 x = 0; x < numPixels; ++x) {
205
dst[x] = RGBA8888toRGBA4444(src[x]);
206
}
207
}
208
209
void ConvertRGBA8888ToRGB888(u8 *dst, const u32 *src, u32 numPixels) {
210
for (uint32_t x = 0; x < numPixels; ++x) {
211
memcpy(dst + x * 3, src + x, 3);
212
}
213
}
214
215
void ConvertRGB565ToRGBA8888(u32 *dst32, const u16 *src, u32 numPixels) {
216
#ifdef _M_SSE
217
const __m128i mask5 = _mm_set1_epi16(0x001f);
218
const __m128i mask6 = _mm_set1_epi16(0x003f);
219
const __m128i mask8 = _mm_set1_epi16(0x00ff);
220
221
const __m128i *srcp = (const __m128i *)src;
222
__m128i *dstp = (__m128i *)dst32;
223
u32 sseChunks = numPixels / 8;
224
if (((intptr_t)src & 0xF) || ((intptr_t)dst32 & 0xF)) {
225
sseChunks = 0;
226
}
227
for (u32 i = 0; i < sseChunks; ++i) {
228
const __m128i c = _mm_load_si128(&srcp[i]);
229
230
// Swizzle, resulting in RR00 RR00.
231
__m128i r = _mm_and_si128(c, mask5);
232
r = _mm_or_si128(_mm_slli_epi16(r, 3), _mm_srli_epi16(r, 2));
233
r = _mm_and_si128(r, mask8);
234
235
// This one becomes 00GG 00GG.
236
__m128i g = _mm_and_si128(_mm_srli_epi16(c, 5), mask6);
237
g = _mm_or_si128(_mm_slli_epi16(g, 2), _mm_srli_epi16(g, 4));
238
g = _mm_slli_epi16(g, 8);
239
240
// Almost done, we aim for BB00 BB00 again here.
241
__m128i b = _mm_and_si128(_mm_srli_epi16(c, 11), mask5);
242
b = _mm_or_si128(_mm_slli_epi16(b, 3), _mm_srli_epi16(b, 2));
243
b = _mm_and_si128(b, mask8);
244
245
// Always set alpha to 00FF 00FF.
246
__m128i a = _mm_slli_epi16(mask8, 8);
247
248
// Now combine them, RRGG RRGG and BBAA BBAA, and then interleave.
249
const __m128i rg = _mm_or_si128(r, g);
250
const __m128i ba = _mm_or_si128(b, a);
251
_mm_store_si128(&dstp[i * 2 + 0], _mm_unpacklo_epi16(rg, ba));
252
_mm_store_si128(&dstp[i * 2 + 1], _mm_unpackhi_epi16(rg, ba));
253
}
254
u32 i = sseChunks * 8;
255
#else
256
u32 i = 0;
257
#endif
258
259
u8 *dst = (u8 *)dst32;
260
for (u32 x = i; x < numPixels; x++) {
261
u16 col = src[x];
262
dst[x * 4] = Convert5To8((col) & 0x1f);
263
dst[x * 4 + 1] = Convert6To8((col >> 5) & 0x3f);
264
dst[x * 4 + 2] = Convert5To8((col >> 11) & 0x1f);
265
dst[x * 4 + 3] = 255;
266
}
267
}
268
269
void ConvertRGBA5551ToRGBA8888(u32 *dst32, const u16 *src, u32 numPixels) {
270
#ifdef _M_SSE
271
const __m128i mask5 = _mm_set1_epi16(0x001f);
272
const __m128i mask8 = _mm_set1_epi16(0x00ff);
273
274
const __m128i *srcp = (const __m128i *)src;
275
__m128i *dstp = (__m128i *)dst32;
276
u32 sseChunks = numPixels / 8;
277
if (((intptr_t)src & 0xF) || ((intptr_t)dst32 & 0xF)) {
278
sseChunks = 0;
279
}
280
for (u32 i = 0; i < sseChunks; ++i) {
281
const __m128i c = _mm_load_si128(&srcp[i]);
282
283
// Swizzle, resulting in RR00 RR00.
284
__m128i r = _mm_and_si128(c, mask5);
285
r = _mm_or_si128(_mm_slli_epi16(r, 3), _mm_srli_epi16(r, 2));
286
r = _mm_and_si128(r, mask8);
287
288
// This one becomes 00GG 00GG.
289
__m128i g = _mm_and_si128(_mm_srli_epi16(c, 5), mask5);
290
g = _mm_or_si128(_mm_slli_epi16(g, 3), _mm_srli_epi16(g, 2));
291
g = _mm_slli_epi16(g, 8);
292
293
// Almost done, we aim for BB00 BB00 again here.
294
__m128i b = _mm_and_si128(_mm_srli_epi16(c, 10), mask5);
295
b = _mm_or_si128(_mm_slli_epi16(b, 3), _mm_srli_epi16(b, 2));
296
b = _mm_and_si128(b, mask8);
297
298
// 1 bit A to 00AA 00AA.
299
__m128i a = _mm_srai_epi16(c, 15);
300
a = _mm_slli_epi16(a, 8);
301
302
// Now combine them, RRGG RRGG and BBAA BBAA, and then interleave.
303
const __m128i rg = _mm_or_si128(r, g);
304
const __m128i ba = _mm_or_si128(b, a);
305
_mm_store_si128(&dstp[i * 2 + 0], _mm_unpacklo_epi16(rg, ba));
306
_mm_store_si128(&dstp[i * 2 + 1], _mm_unpackhi_epi16(rg, ba));
307
}
308
u32 i = sseChunks * 8;
309
#else
310
u32 i = 0;
311
#endif
312
313
u8 *dst = (u8 *)dst32;
314
for (u32 x = i; x < numPixels; x++) {
315
u16 col = src[x];
316
dst[x * 4] = Convert5To8((col) & 0x1f);
317
dst[x * 4 + 1] = Convert5To8((col >> 5) & 0x1f);
318
dst[x * 4 + 2] = Convert5To8((col >> 10) & 0x1f);
319
dst[x * 4 + 3] = (col >> 15) ? 255 : 0;
320
}
321
}
322
323
void ConvertRGBA4444ToRGBA8888(u32 *dst32, const u16 *src, u32 numPixels) {
324
#ifdef _M_SSE
325
const __m128i mask4 = _mm_set1_epi16(0x000f);
326
327
const __m128i *srcp = (const __m128i *)src;
328
__m128i *dstp = (__m128i *)dst32;
329
u32 sseChunks = numPixels / 8;
330
if (((intptr_t)src & 0xF) || ((intptr_t)dst32 & 0xF)) {
331
sseChunks = 0;
332
}
333
for (u32 i = 0; i < sseChunks; ++i) {
334
const __m128i c = _mm_load_si128(&srcp[i]);
335
336
// Let's just grab R000 R000, without swizzling yet.
337
__m128i r = _mm_and_si128(c, mask4);
338
// And then 00G0 00G0.
339
__m128i g = _mm_and_si128(_mm_srli_epi16(c, 4), mask4);
340
g = _mm_slli_epi16(g, 8);
341
// Now B000 B000.
342
__m128i b = _mm_and_si128(_mm_srli_epi16(c, 8), mask4);
343
// And lastly 00A0 00A0. No mask needed, we have a wall.
344
__m128i a = _mm_srli_epi16(c, 12);
345
a = _mm_slli_epi16(a, 8);
346
347
// We swizzle after combining - R0G0 R0G0 and B0A0 B0A0 -> RRGG RRGG and BBAA BBAA.
348
__m128i rg = _mm_or_si128(r, g);
349
__m128i ba = _mm_or_si128(b, a);
350
rg = _mm_or_si128(rg, _mm_slli_epi16(rg, 4));
351
ba = _mm_or_si128(ba, _mm_slli_epi16(ba, 4));
352
353
// And then we can store.
354
_mm_store_si128(&dstp[i * 2 + 0], _mm_unpacklo_epi16(rg, ba));
355
_mm_store_si128(&dstp[i * 2 + 1], _mm_unpackhi_epi16(rg, ba));
356
}
357
u32 i = sseChunks * 8;
358
#else
359
u32 i = 0;
360
#endif
361
362
u8 *dst = (u8 *)dst32;
363
for (u32 x = i; x < numPixels; x++) {
364
u16 col = src[x];
365
dst[x * 4] = Convert4To8(col & 0xf);
366
dst[x * 4 + 1] = Convert4To8((col >> 4) & 0xf);
367
dst[x * 4 + 2] = Convert4To8((col >> 8) & 0xf);
368
dst[x * 4 + 3] = Convert4To8(col >> 12);
369
}
370
}
371
372
void ConvertBGR565ToRGBA8888(u32 *dst32, const u16 *src, u32 numPixels) {
373
u8 *dst = (u8 *)dst32;
374
for (u32 x = 0; x < numPixels; x++) {
375
u16 col = src[x];
376
dst[x * 4] = Convert5To8((col >> 11) & 0x1f);
377
dst[x * 4 + 1] = Convert6To8((col >> 5) & 0x3f);
378
dst[x * 4 + 2] = Convert5To8((col) & 0x1f);
379
dst[x * 4 + 3] = 255;
380
}
381
}
382
383
void ConvertABGR1555ToRGBA8888(u32 *dst32, const u16 *src, u32 numPixels) {
384
u8 *dst = (u8 *)dst32;
385
for (u32 x = 0; x < numPixels; x++) {
386
u16 col = src[x];
387
dst[x * 4] = Convert5To8((col >> 11) & 0x1f);
388
dst[x * 4 + 1] = Convert5To8((col >> 6) & 0x1f);
389
dst[x * 4 + 2] = Convert5To8((col >> 1) & 0x1f);
390
dst[x * 4 + 3] = (col & 1) ? 255 : 0;
391
}
392
}
393
394
void ConvertABGR4444ToRGBA8888(u32 *dst32, const u16 *src, u32 numPixels) {
395
u8 *dst = (u8 *)dst32;
396
for (u32 x = 0; x < numPixels; x++) {
397
u16 col = src[x];
398
dst[x * 4] = Convert4To8(col >> 12);
399
dst[x * 4 + 1] = Convert4To8((col >> 8) & 0xf);
400
dst[x * 4 + 2] = Convert4To8((col >> 4) & 0xf);
401
dst[x * 4 + 3] = Convert4To8(col & 0xf);
402
}
403
}
404
405
void ConvertRGBA4444ToBGRA8888(u32 *dst, const u16 *src, u32 numPixels) {
406
for (u32 x = 0; x < numPixels; x++) {
407
u16 c = src[x];
408
u32 r = Convert4To8(c & 0x000f);
409
u32 g = Convert4To8((c >> 4) & 0x000f);
410
u32 b = Convert4To8((c >> 8) & 0x000f);
411
u32 a = Convert4To8((c >> 12) & 0x000f);
412
413
dst[x] = (a << 24) | (r << 16) | (g << 8) | b;
414
}
415
}
416
417
void ConvertRGBA5551ToBGRA8888(u32 *dst, const u16 *src, u32 numPixels) {
418
for (u32 x = 0; x < numPixels; x++) {
419
u16 c = src[x];
420
u32 r = Convert5To8(c & 0x001f);
421
u32 g = Convert5To8((c >> 5) & 0x001f);
422
u32 b = Convert5To8((c >> 10) & 0x001f);
423
// We force an arithmetic shift to get the sign bits.
424
u32 a = SignExtend16ToU32(c) & 0xff000000;
425
426
dst[x] = a | (r << 16) | (g << 8) | b;
427
}
428
}
429
430
void ConvertRGB565ToBGRA8888(u32 *dst, const u16 *src, u32 numPixels) {
431
for (u32 x = 0; x < numPixels; x++) {
432
u16 c = src[x];
433
u32 r = Convert5To8(c & 0x001f);
434
u32 g = Convert6To8((c >> 5) & 0x003f);
435
u32 b = Convert5To8((c >> 11) & 0x001f);
436
437
dst[x] = 0xFF000000 | (r << 16) | (g << 8) | b;
438
}
439
}
440
441
void ConvertRGBA4444ToABGR4444(u16 *dst, const u16 *src, u32 numPixels) {
442
#ifdef _M_SSE
443
const __m128i mask0040 = _mm_set1_epi16(0x00F0);
444
445
const __m128i *srcp = (const __m128i *)src;
446
__m128i *dstp = (__m128i *)dst;
447
u32 sseChunks = numPixels / 8;
448
if (((intptr_t)src & 0xF) || ((intptr_t)dst & 0xF)) {
449
sseChunks = 0;
450
}
451
for (u32 i = 0; i < sseChunks; ++i) {
452
const __m128i c = _mm_load_si128(&srcp[i]);
453
__m128i v = _mm_srli_epi16(c, 12);
454
v = _mm_or_si128(v, _mm_and_si128(_mm_srli_epi16(c, 4), mask0040));
455
v = _mm_or_si128(v, _mm_slli_epi16(_mm_and_si128(c, mask0040), 4));
456
v = _mm_or_si128(v, _mm_slli_epi16(c, 12));
457
_mm_store_si128(&dstp[i], v);
458
}
459
// The remainder is done in chunks of 2, SSE was chunks of 8.
460
u32 i = sseChunks * 8 / 2;
461
#elif PPSSPP_ARCH(ARM_NEON)
462
const uint16x8_t mask0040 = vdupq_n_u16(0x00F0);
463
464
if (((uintptr_t)dst & 15) == 0 && ((uintptr_t)src & 15) == 0) {
465
u32 simdable = (numPixels / 8) * 8;
466
for (u32 i = 0; i < simdable; i += 8) {
467
uint16x8_t c = vld1q_u16(src);
468
469
const uint16x8_t a = vshrq_n_u16(c, 12);
470
const uint16x8_t b = vandq_u16(vshrq_n_u16(c, 4), mask0040);
471
const uint16x8_t g = vshlq_n_u16(vandq_u16(c, mask0040), 4);
472
const uint16x8_t r = vshlq_n_u16(c, 12);
473
474
uint16x8_t res = vorrq_u16(vorrq_u16(r, g), vorrq_u16(b, a));
475
vst1q_u16(dst, res);
476
477
src += 8;
478
dst += 8;
479
}
480
numPixels -= simdable;
481
}
482
u32 i = 0; // already moved the pointers forward
483
#else
484
u32 i = 0;
485
#endif
486
487
const u32 *src32 = (const u32 *)src;
488
u32 *dst32 = (u32 *)dst;
489
for (; i < numPixels / 2; i++) {
490
const u32 c = src32[i];
491
dst32[i] = ((c >> 12) & 0x000F000F) |
492
((c >> 4) & 0x00F000F0) |
493
((c << 4) & 0x0F000F00) |
494
((c << 12) & 0xF000F000);
495
}
496
497
if (numPixels & 1) {
498
const u32 i = numPixels - 1;
499
const u16 c = src[i];
500
dst[i] = ((c >> 12) & 0x000F) |
501
((c >> 4) & 0x00F0) |
502
((c << 4) & 0x0F00) |
503
((c << 12) & 0xF000);
504
}
505
}
506
507
void ConvertRGBA5551ToABGR1555(u16 *dst, const u16 *src, u32 numPixels) {
508
#ifdef _M_SSE
509
const __m128i maskB = _mm_set1_epi16(0x003E);
510
const __m128i maskG = _mm_set1_epi16(0x07C0);
511
512
const __m128i *srcp = (const __m128i *)src;
513
__m128i *dstp = (__m128i *)dst;
514
u32 sseChunks = numPixels / 8;
515
if (((intptr_t)src & 0xF) || ((intptr_t)dst & 0xF)) {
516
sseChunks = 0;
517
}
518
for (u32 i = 0; i < sseChunks; ++i) {
519
const __m128i c = _mm_load_si128(&srcp[i]);
520
__m128i v = _mm_srli_epi16(c, 15);
521
v = _mm_or_si128(v, _mm_and_si128(_mm_srli_epi16(c, 9), maskB));
522
v = _mm_or_si128(v, _mm_and_si128(_mm_slli_epi16(c, 1), maskG));
523
v = _mm_or_si128(v, _mm_slli_epi16(c, 11));
524
_mm_store_si128(&dstp[i], v);
525
}
526
// The remainder is done in chunks of 2, SSE was chunks of 8.
527
u32 i = sseChunks * 8 / 2;
528
#elif PPSSPP_ARCH(ARM_NEON)
529
const uint16x8_t maskB = vdupq_n_u16(0x003E);
530
const uint16x8_t maskG = vdupq_n_u16(0x07C0);
531
532
if (((uintptr_t)dst & 15) == 0 && ((uintptr_t)src & 15) == 0) {
533
u32 simdable = (numPixels / 8) * 8;
534
for (u32 i = 0; i < simdable; i += 8) {
535
uint16x8_t c = vld1q_u16(src);
536
537
const uint16x8_t a = vshrq_n_u16(c, 15);
538
const uint16x8_t b = vandq_u16(vshrq_n_u16(c, 9), maskB);
539
const uint16x8_t g = vandq_u16(vshlq_n_u16(c, 1), maskG);
540
const uint16x8_t r = vshlq_n_u16(c, 11);
541
542
uint16x8_t res = vorrq_u16(vorrq_u16(r, g), vorrq_u16(b, a));
543
vst1q_u16(dst, res);
544
545
src += 8;
546
dst += 8;
547
}
548
numPixels -= simdable;
549
}
550
u32 i = 0;
551
#else
552
u32 i = 0;
553
#endif
554
555
const u32 *src32 = (const u32 *)src;
556
u32 *dst32 = (u32 *)dst;
557
for (; i < numPixels / 2; i++) {
558
const u32 c = src32[i];
559
dst32[i] = ((c >> 15) & 0x00010001) |
560
((c >> 9) & 0x003E003E) |
561
((c << 1) & 0x07C007C0) |
562
((c << 11) & 0xF800F800);
563
}
564
565
if (numPixels & 1) {
566
const u32 i = numPixels - 1;
567
const u16 c = src[i];
568
dst[i] = ((c >> 15) & 0x0001) |
569
((c >> 9) & 0x003E) |
570
((c << 1) & 0x07C0) |
571
((c << 11) & 0xF800);
572
}
573
}
574
575
void ConvertRGB565ToBGR565(u16 *dst, const u16 *src, u32 numPixels) {
576
#ifdef _M_SSE
577
const __m128i maskG = _mm_set1_epi16(0x07E0);
578
579
const __m128i *srcp = (const __m128i *)src;
580
__m128i *dstp = (__m128i *)dst;
581
u32 sseChunks = numPixels / 8;
582
if (((intptr_t)src & 0xF) || ((intptr_t)dst & 0xF)) {
583
sseChunks = 0;
584
}
585
for (u32 i = 0; i < sseChunks; ++i) {
586
const __m128i c = _mm_load_si128(&srcp[i]);
587
__m128i v = _mm_srli_epi16(c, 11);
588
v = _mm_or_si128(v, _mm_and_si128(c, maskG));
589
v = _mm_or_si128(v, _mm_slli_epi16(c, 11));
590
_mm_store_si128(&dstp[i], v);
591
}
592
// The remainder is done in chunks of 2, SSE was chunks of 8.
593
u32 i = sseChunks * 8 / 2;
594
#elif PPSSPP_ARCH(ARM_NEON)
595
const uint16x8_t maskG = vdupq_n_u16(0x07E0);
596
597
if (((uintptr_t)dst & 15) == 0 && ((uintptr_t)src & 15) == 0) {
598
u32 simdable = (numPixels / 8) * 8;
599
for (u32 i = 0; i < simdable; i += 8) {
600
uint16x8_t c = vld1q_u16(src);
601
602
const uint16x8_t b = vshrq_n_u16(c, 11);
603
const uint16x8_t g = vandq_u16(c, maskG);
604
const uint16x8_t r = vshlq_n_u16(c, 11);
605
606
uint16x8_t res = vorrq_u16(vorrq_u16(r, g), b);
607
vst1q_u16(dst, res);
608
609
src += 8;
610
dst += 8;
611
}
612
numPixels -= simdable;
613
}
614
615
u32 i = 0;
616
#else
617
u32 i = 0;
618
#endif
619
620
// TODO: Add a 64-bit loop too.
621
const u32 *src32 = (const u32 *)src;
622
u32 *dst32 = (u32 *)dst;
623
for (; i < numPixels / 2; i++) {
624
const u32 c = src32[i];
625
dst32[i] = ((c >> 11) & 0x001F001F) |
626
((c >> 0) & 0x07E007E0) |
627
((c << 11) & 0xF800F800);
628
}
629
630
if (numPixels & 1) {
631
const u32 i = numPixels - 1;
632
const u16 c = src[i];
633
dst[i] = ((c >> 11) & 0x001F) |
634
((c >> 0) & 0x07E0) |
635
((c << 11) & 0xF800);
636
}
637
}
638
639
void ConvertBGRA5551ToABGR1555(u16 *dst, const u16 *src, u32 numPixels) {
640
const u32 *src32 = (const u32 *)src;
641
u32 *dst32 = (u32 *)dst;
642
for (u32 i = 0; i < numPixels / 2; i++) {
643
const u32 c = src32[i];
644
dst32[i] = ((c >> 15) & 0x00010001) | ((c << 1) & 0xFFFEFFFE);
645
}
646
647
if (numPixels & 1) {
648
const u32 i = numPixels - 1;
649
const u16 c = src[i];
650
dst[i] = (c >> 15) | (c << 1);
651
}
652
}
653
654