Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
MorsGames
GitHub Repository: MorsGames/sm64plus
Path: blob/master/src/pc/mixer.c
7857 views
1
#include <stdbool.h>
2
#include <stdint.h>
3
#include <string.h>
4
#include <ultra64.h>
5
6
#include "mixer.h"
7
8
#ifdef __SSE4_1__
9
#include <immintrin.h>
10
#define HAS_SSE41 1
11
#define HAS_NEON 0
12
#elif __ARM_NEON
13
#include <arm_neon.h>
14
#define HAS_SSE41 0
15
#define HAS_NEON 1
16
#else
17
#define HAS_SSE41 0
18
#define HAS_NEON 0
19
#endif
20
21
#pragma GCC optimize ("unroll-loops")
22
23
#if HAS_SSE41
24
#define LOADLH(l, h) _mm_castpd_si128(_mm_loadh_pd(_mm_load_sd((const double *)(l)), (const double *)(h)))
25
#endif
26
27
#define ROUND_UP_64(v) (((v) + 63) & ~63)
28
#define ROUND_UP_32(v) (((v) + 31) & ~31)
29
#define ROUND_UP_16(v) (((v) + 15) & ~15)
30
#define ROUND_UP_8(v) (((v) + 7) & ~7)
31
#define ROUND_DOWN_16(v) ((v) & ~0xf)
32
33
#ifdef NEW_AUDIO_UCODE
34
#define BUF_SIZE 2880
35
#define BUF_U8(a) (rspa.buf.as_u8 + ((a) - 0x450))
36
#define BUF_S16(a) (rspa.buf.as_s16 + ((a) - 0x450) / sizeof(int16_t))
37
#else
38
#define BUF_SIZE 2512
39
#define BUF_U8(a) (rspa.buf.as_u8 + (a))
40
#define BUF_S16(a) (rspa.buf.as_s16 + (a) / sizeof(int16_t))
41
#endif
42
43
static struct {
44
uint16_t in;
45
uint16_t out;
46
uint16_t nbytes;
47
48
#ifdef NEW_AUDIO_UCODE
49
uint16_t vol[2];
50
uint16_t rate[2];
51
uint16_t vol_wet;
52
uint16_t rate_wet;
53
#else
54
int16_t vol[2];
55
56
uint16_t dry_right;
57
uint16_t wet_left;
58
uint16_t wet_right;
59
60
int16_t target[2];
61
int32_t rate[2];
62
63
int16_t vol_dry;
64
int16_t vol_wet;
65
#endif
66
67
ADPCM_STATE *adpcm_loop_state;
68
69
int16_t adpcm_table[8][2][8];
70
71
#ifdef NEW_AUDIO_UCODE
72
uint16_t filter_count;
73
int16_t filter[8];
74
#endif
75
76
union {
77
int16_t as_s16[BUF_SIZE / sizeof(int16_t)];
78
uint8_t as_u8[BUF_SIZE];
79
} buf;
80
} rspa;
81
82
static int16_t resample_table[64][4] = {
83
{0x0c39, 0x66ad, 0x0d46, 0xffdf}, {0x0b39, 0x6696, 0x0e5f, 0xffd8},
84
{0x0a44, 0x6669, 0x0f83, 0xffd0}, {0x095a, 0x6626, 0x10b4, 0xffc8},
85
{0x087d, 0x65cd, 0x11f0, 0xffbf}, {0x07ab, 0x655e, 0x1338, 0xffb6},
86
{0x06e4, 0x64d9, 0x148c, 0xffac}, {0x0628, 0x643f, 0x15eb, 0xffa1},
87
{0x0577, 0x638f, 0x1756, 0xff96}, {0x04d1, 0x62cb, 0x18cb, 0xff8a},
88
{0x0435, 0x61f3, 0x1a4c, 0xff7e}, {0x03a4, 0x6106, 0x1bd7, 0xff71},
89
{0x031c, 0x6007, 0x1d6c, 0xff64}, {0x029f, 0x5ef5, 0x1f0b, 0xff56},
90
{0x022a, 0x5dd0, 0x20b3, 0xff48}, {0x01be, 0x5c9a, 0x2264, 0xff3a},
91
{0x015b, 0x5b53, 0x241e, 0xff2c}, {0x0101, 0x59fc, 0x25e0, 0xff1e},
92
{0x00ae, 0x5896, 0x27a9, 0xff10}, {0x0063, 0x5720, 0x297a, 0xff02},
93
{0x001f, 0x559d, 0x2b50, 0xfef4}, {0xffe2, 0x540d, 0x2d2c, 0xfee8},
94
{0xffac, 0x5270, 0x2f0d, 0xfedb}, {0xff7c, 0x50c7, 0x30f3, 0xfed0},
95
{0xff53, 0x4f14, 0x32dc, 0xfec6}, {0xff2e, 0x4d57, 0x34c8, 0xfebd},
96
{0xff0f, 0x4b91, 0x36b6, 0xfeb6}, {0xfef5, 0x49c2, 0x38a5, 0xfeb0},
97
{0xfedf, 0x47ed, 0x3a95, 0xfeac}, {0xfece, 0x4611, 0x3c85, 0xfeab},
98
{0xfec0, 0x4430, 0x3e74, 0xfeac}, {0xfeb6, 0x424a, 0x4060, 0xfeaf},
99
{0xfeaf, 0x4060, 0x424a, 0xfeb6}, {0xfeac, 0x3e74, 0x4430, 0xfec0},
100
{0xfeab, 0x3c85, 0x4611, 0xfece}, {0xfeac, 0x3a95, 0x47ed, 0xfedf},
101
{0xfeb0, 0x38a5, 0x49c2, 0xfef5}, {0xfeb6, 0x36b6, 0x4b91, 0xff0f},
102
{0xfebd, 0x34c8, 0x4d57, 0xff2e}, {0xfec6, 0x32dc, 0x4f14, 0xff53},
103
{0xfed0, 0x30f3, 0x50c7, 0xff7c}, {0xfedb, 0x2f0d, 0x5270, 0xffac},
104
{0xfee8, 0x2d2c, 0x540d, 0xffe2}, {0xfef4, 0x2b50, 0x559d, 0x001f},
105
{0xff02, 0x297a, 0x5720, 0x0063}, {0xff10, 0x27a9, 0x5896, 0x00ae},
106
{0xff1e, 0x25e0, 0x59fc, 0x0101}, {0xff2c, 0x241e, 0x5b53, 0x015b},
107
{0xff3a, 0x2264, 0x5c9a, 0x01be}, {0xff48, 0x20b3, 0x5dd0, 0x022a},
108
{0xff56, 0x1f0b, 0x5ef5, 0x029f}, {0xff64, 0x1d6c, 0x6007, 0x031c},
109
{0xff71, 0x1bd7, 0x6106, 0x03a4}, {0xff7e, 0x1a4c, 0x61f3, 0x0435},
110
{0xff8a, 0x18cb, 0x62cb, 0x04d1}, {0xff96, 0x1756, 0x638f, 0x0577},
111
{0xffa1, 0x15eb, 0x643f, 0x0628}, {0xffac, 0x148c, 0x64d9, 0x06e4},
112
{0xffb6, 0x1338, 0x655e, 0x07ab}, {0xffbf, 0x11f0, 0x65cd, 0x087d},
113
{0xffc8, 0x10b4, 0x6626, 0x095a}, {0xffd0, 0x0f83, 0x6669, 0x0a44},
114
{0xffd8, 0x0e5f, 0x6696, 0x0b39}, {0xffdf, 0x0d46, 0x66ad, 0x0c39}
115
};
116
117
static inline int16_t clamp16(int32_t v) {
118
if (v < -0x8000) {
119
return -0x8000;
120
} else if (v > 0x7fff) {
121
return 0x7fff;
122
}
123
return (int16_t)v;
124
}
125
126
static inline int32_t clamp32(int64_t v) {
127
if (v < -0x7fffffff - 1) {
128
return -0x7fffffff - 1;
129
} else if (v > 0x7fffffff) {
130
return 0x7fffffff;
131
}
132
return (int32_t)v;
133
}
134
135
void aClearBufferImpl(uint16_t addr, int nbytes) {
136
nbytes = ROUND_UP_16(nbytes);
137
memset(BUF_U8(addr), 0, nbytes);
138
}
139
140
#ifdef NEW_AUDIO_UCODE
141
void aLoadBufferImpl(const void *source_addr, uint16_t dest_addr, uint16_t nbytes) {
142
memcpy(BUF_U8(dest_addr), source_addr, ROUND_DOWN_16(nbytes));
143
}
144
145
void aSaveBufferImpl(uint16_t source_addr, int16_t *dest_addr, uint16_t nbytes) {
146
memcpy(dest_addr, BUF_S16(source_addr), ROUND_DOWN_16(nbytes));
147
}
148
#else
149
void aLoadBufferImpl(const void *source_addr) {
150
memcpy(BUF_U8(rspa.in), source_addr, ROUND_UP_8(rspa.nbytes));
151
}
152
153
void aSaveBufferImpl(int16_t *dest_addr) {
154
memcpy(dest_addr, BUF_S16(rspa.out), ROUND_UP_8(rspa.nbytes));
155
}
156
#endif
157
158
void aLoadADPCMImpl(int num_entries_times_16, const int16_t *book_source_addr) {
159
memcpy(rspa.adpcm_table, book_source_addr, num_entries_times_16);
160
}
161
162
void aSetBufferImpl(uint8_t flags, uint16_t in, uint16_t out, uint16_t nbytes) {
163
#ifndef NEW_AUDIO_UCODE
164
if (flags & A_AUX) {
165
rspa.dry_right = in;
166
rspa.wet_left = out;
167
rspa.wet_right = nbytes;
168
return;
169
}
170
#endif
171
rspa.in = in;
172
rspa.out = out;
173
rspa.nbytes = nbytes;
174
}
175
176
#ifndef NEW_AUDIO_UCODE
177
void aSetVolumeImpl(uint8_t flags, int16_t v, int16_t t, int16_t r) {
178
if (flags & A_AUX) {
179
rspa.vol_dry = v;
180
rspa.vol_wet = r;
181
} else if (flags & A_VOL) {
182
if (flags & A_LEFT) {
183
rspa.vol[0] = v;
184
} else {
185
rspa.vol[1] = v;
186
}
187
} else {
188
if (flags & A_LEFT) {
189
rspa.target[0] = v;
190
rspa.rate[0] = (int32_t)((uint16_t)t << 16 | ((uint16_t)r));
191
} else {
192
rspa.target[1] = v;
193
rspa.rate[1] = (int32_t)((uint16_t)t << 16 | ((uint16_t)r));
194
}
195
}
196
}
197
#endif
198
199
#ifdef NEW_AUDIO_UCODE
200
void aInterleaveImpl(uint16_t dest, uint16_t left, uint16_t right, uint16_t c) {
201
int count = ROUND_UP_8(c) / sizeof(int16_t) / 4;
202
int16_t *l = BUF_S16(left);
203
int16_t *r = BUF_S16(right);
204
int16_t *d = BUF_S16(dest);
205
while (count > 0) {
206
int16_t l0 = *l++;
207
int16_t l1 = *l++;
208
int16_t l2 = *l++;
209
int16_t l3 = *l++;
210
int16_t r0 = *r++;
211
int16_t r1 = *r++;
212
int16_t r2 = *r++;
213
int16_t r3 = *r++;
214
*d++ = l0;
215
*d++ = r0;
216
*d++ = l1;
217
*d++ = r1;
218
*d++ = l2;
219
*d++ = r2;
220
*d++ = l3;
221
*d++ = r3;
222
--count;
223
}
224
}
225
#else
226
void aInterleaveImpl(uint16_t left, uint16_t right) {
227
int count = ROUND_UP_16(rspa.nbytes) / sizeof(int16_t) / 8;
228
int16_t *l = BUF_S16(left);
229
int16_t *r = BUF_S16(right);
230
int16_t *d = BUF_S16(rspa.out);
231
while (count > 0) {
232
int16_t l0 = *l++;
233
int16_t l1 = *l++;
234
int16_t l2 = *l++;
235
int16_t l3 = *l++;
236
int16_t l4 = *l++;
237
int16_t l5 = *l++;
238
int16_t l6 = *l++;
239
int16_t l7 = *l++;
240
int16_t r0 = *r++;
241
int16_t r1 = *r++;
242
int16_t r2 = *r++;
243
int16_t r3 = *r++;
244
int16_t r4 = *r++;
245
int16_t r5 = *r++;
246
int16_t r6 = *r++;
247
int16_t r7 = *r++;
248
*d++ = l0;
249
*d++ = r0;
250
*d++ = l1;
251
*d++ = r1;
252
*d++ = l2;
253
*d++ = r2;
254
*d++ = l3;
255
*d++ = r3;
256
*d++ = l4;
257
*d++ = r4;
258
*d++ = l5;
259
*d++ = r5;
260
*d++ = l6;
261
*d++ = r6;
262
*d++ = l7;
263
*d++ = r7;
264
--count;
265
}
266
}
267
#endif
268
269
void aDMEMMoveImpl(uint16_t in_addr, uint16_t out_addr, int nbytes) {
270
nbytes = ROUND_UP_16(nbytes);
271
memmove(BUF_U8(out_addr), BUF_U8(in_addr), nbytes);
272
}
273
274
void aSetLoopImpl(ADPCM_STATE *adpcm_loop_state) {
275
rspa.adpcm_loop_state = adpcm_loop_state;
276
}
277
278
void aADPCMdecImpl(uint8_t flags, ADPCM_STATE state) {
279
#if HAS_SSE41
280
const __m128i tblrev = _mm_setr_epi8(12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1, -1, -1);
281
const __m128i pos0 = _mm_set_epi8(3, -1, 3, -1, 2, -1, 2, -1, 1, -1, 1, -1, 0, -1, 0, -1);
282
const __m128i pos1 = _mm_set_epi8(7, -1, 7, -1, 6, -1, 6, -1, 5, -1, 5, -1, 4, -1, 4, -1);
283
const __m128i mult = _mm_set_epi16(0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01);
284
const __m128i mask = _mm_set1_epi16((int16_t)0xf000);
285
#elif HAS_NEON
286
static const int8_t pos0_data[] = {-1, 0, -1, 0, -1, 1, -1, 1, -1, 2, -1, 2, -1, 3, -1, 3};
287
static const int8_t pos1_data[] = {-1, 4, -1, 4, -1, 5, -1, 5, -1, 6, -1, 6, -1, 7, -1, 7};
288
static const int16_t mult_data[] = {0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10};
289
static const int16_t table_prefix_data[] = {0, 0, 0, 0, 0, 0, 0, 1 << 11};
290
const int8x16_t pos0 = vld1q_s8(pos0_data);
291
const int8x16_t pos1 = vld1q_s8(pos1_data);
292
const int16x8_t mult = vld1q_s16(mult_data);
293
const int16x8_t mask = vdupq_n_s16((int16_t)0xf000);
294
const int16x8_t table_prefix = vld1q_s16(table_prefix_data);
295
#endif
296
uint8_t *in = BUF_U8(rspa.in);
297
int16_t *out = BUF_S16(rspa.out);
298
int nbytes = ROUND_UP_32(rspa.nbytes);
299
if (flags & A_INIT) {
300
memset(out, 0, 16 * sizeof(int16_t));
301
} else if (flags & A_LOOP) {
302
memcpy(out, rspa.adpcm_loop_state, 16 * sizeof(int16_t));
303
} else {
304
memcpy(out, state, 16 * sizeof(int16_t));
305
}
306
out += 16;
307
#if HAS_SSE41
308
__m128i prev_interleaved = _mm_set1_epi32((uint16_t)out[-2] | ((uint16_t)out[-1] << 16));
309
//__m128i prev_interleaved = _mm_shuffle_epi32(_mm_loadu_si32(out - 2), 0); // GCC misses this?
310
#elif HAS_NEON
311
int16x8_t result = vld1q_s16(out - 8);
312
#endif
313
while (nbytes > 0) {
314
int shift = *in >> 4; // should be in 0..12
315
int table_index = *in++ & 0xf; // should be in 0..7
316
int16_t (*tbl)[8] = rspa.adpcm_table[table_index];
317
int i;
318
#if HAS_SSE41
319
// The _mm_loadu_si64 instruction was added in GCC 9, and results in the same
320
// asm as the following instructions, so better be compatible with old GCC.
321
//__m128i inv = _mm_loadu_si64(in);
322
uint64_t v; memcpy(&v, in, 8);
323
__m128i inv = _mm_set_epi64x(0, v);
324
__m128i invec[2] = {_mm_shuffle_epi8(inv, pos0), _mm_shuffle_epi8(inv, pos1)};
325
__m128i tblvec0 = _mm_loadu_si128((const __m128i *)tbl[0]);
326
__m128i tblvec1 = _mm_loadu_si128((const __m128i *)(tbl[1]));
327
__m128i tbllo = _mm_unpacklo_epi16(tblvec0, tblvec1);
328
__m128i tblhi = _mm_unpackhi_epi16(tblvec0, tblvec1);
329
__m128i shiftcount = _mm_set_epi64x(0, 12 - shift); // _mm_cvtsi64_si128 does not exist on 32-bit x86
330
__m128i tblvec1_rev[8];
331
332
tblvec1_rev[0] = _mm_insert_epi16(_mm_shuffle_epi8(tblvec1, tblrev), 1 << 11, 7);
333
tblvec1_rev[1] = _mm_bsrli_si128(tblvec1_rev[0], 2);
334
tblvec1_rev[2] = _mm_bsrli_si128(tblvec1_rev[0], 4);
335
tblvec1_rev[3] = _mm_bsrli_si128(tblvec1_rev[0], 6);
336
tblvec1_rev[4] = _mm_bsrli_si128(tblvec1_rev[0], 8);
337
tblvec1_rev[5] = _mm_bsrli_si128(tblvec1_rev[0], 10);
338
tblvec1_rev[6] = _mm_bsrli_si128(tblvec1_rev[0], 12);
339
tblvec1_rev[7] = _mm_bsrli_si128(tblvec1_rev[0], 14);
340
in += 8;
341
for (i = 0; i < 2; i++) {
342
__m128i acc0 = _mm_madd_epi16(prev_interleaved, tbllo);
343
__m128i acc1 = _mm_madd_epi16(prev_interleaved, tblhi);
344
__m128i muls[8];
345
__m128i result;
346
invec[i] = _mm_sra_epi16(_mm_and_si128(_mm_mullo_epi16(invec[i], mult), mask), shiftcount);
347
348
muls[7] = _mm_madd_epi16(tblvec1_rev[0], invec[i]);
349
muls[6] = _mm_madd_epi16(tblvec1_rev[1], invec[i]);
350
muls[5] = _mm_madd_epi16(tblvec1_rev[2], invec[i]);
351
muls[4] = _mm_madd_epi16(tblvec1_rev[3], invec[i]);
352
muls[3] = _mm_madd_epi16(tblvec1_rev[4], invec[i]);
353
muls[2] = _mm_madd_epi16(tblvec1_rev[5], invec[i]);
354
muls[1] = _mm_madd_epi16(tblvec1_rev[6], invec[i]);
355
muls[0] = _mm_madd_epi16(tblvec1_rev[7], invec[i]);
356
357
acc0 = _mm_add_epi32(acc0, _mm_hadd_epi32(_mm_hadd_epi32(muls[0], muls[1]), _mm_hadd_epi32(muls[2], muls[3])));
358
acc1 = _mm_add_epi32(acc1, _mm_hadd_epi32(_mm_hadd_epi32(muls[4], muls[5]), _mm_hadd_epi32(muls[6], muls[7])));
359
360
acc0 = _mm_srai_epi32(acc0, 11);
361
acc1 = _mm_srai_epi32(acc1, 11);
362
363
result = _mm_packs_epi32(acc0, acc1);
364
_mm_storeu_si128((__m128i *)out, result);
365
out += 8;
366
367
prev_interleaved = _mm_shuffle_epi32(result, _MM_SHUFFLE(3, 3, 3, 3));
368
}
369
#elif HAS_NEON
370
int8x8_t inv = vld1_s8((int8_t *)in);
371
int16x8_t tblvec[2] = {vld1q_s16(tbl[0]), vld1q_s16(tbl[1])};
372
int16x8_t invec[2] = {vreinterpretq_s16_s8(vcombine_s8(vtbl1_s8(inv, vget_low_s8(pos0)),
373
vtbl1_s8(inv, vget_high_s8(pos0)))),
374
vreinterpretq_s16_s8(vcombine_s8(vtbl1_s8(inv, vget_low_s8(pos1)),
375
vtbl1_s8(inv, vget_high_s8(pos1))))};
376
int16x8_t shiftcount = vdupq_n_s16(shift - 12); // negative means right shift
377
int16x8_t tblvec1[8];
378
379
in += 8;
380
tblvec1[0] = vextq_s16(table_prefix, tblvec[1], 7);
381
invec[0] = vmulq_s16(invec[0], mult);
382
tblvec1[1] = vextq_s16(table_prefix, tblvec[1], 6);
383
invec[1] = vmulq_s16(invec[1], mult);
384
tblvec1[2] = vextq_s16(table_prefix, tblvec[1], 5);
385
tblvec1[3] = vextq_s16(table_prefix, tblvec[1], 4);
386
invec[0] = vandq_s16(invec[0], mask);
387
tblvec1[4] = vextq_s16(table_prefix, tblvec[1], 3);
388
invec[1] = vandq_s16(invec[1], mask);
389
tblvec1[5] = vextq_s16(table_prefix, tblvec[1], 2);
390
tblvec1[6] = vextq_s16(table_prefix, tblvec[1], 1);
391
invec[0] = vqshlq_s16(invec[0], shiftcount);
392
invec[1] = vqshlq_s16(invec[1], shiftcount);
393
tblvec1[7] = table_prefix;
394
for (i = 0; i < 2; i++) {
395
int32x4_t acc0;
396
int32x4_t acc1;
397
398
acc1 = vmull_lane_s16(vget_high_s16(tblvec[0]), vget_high_s16(result), 2);
399
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec[1]), vget_high_s16(result), 3);
400
acc0 = vmull_lane_s16(vget_low_s16(tblvec[0]), vget_high_s16(result), 2);
401
acc0 = vmlal_lane_s16(acc0, vget_low_s16(tblvec[1]), vget_high_s16(result), 3);
402
403
acc0 = vmlal_lane_s16(acc0, vget_low_s16(tblvec1[0]), vget_low_s16(invec[i]), 0);
404
acc0 = vmlal_lane_s16(acc0, vget_low_s16(tblvec1[1]), vget_low_s16(invec[i]), 1);
405
acc0 = vmlal_lane_s16(acc0, vget_low_s16(tblvec1[2]), vget_low_s16(invec[i]), 2);
406
acc0 = vmlal_lane_s16(acc0, vget_low_s16(tblvec1[3]), vget_low_s16(invec[i]), 3);
407
408
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[0]), vget_low_s16(invec[i]), 0);
409
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[1]), vget_low_s16(invec[i]), 1);
410
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[2]), vget_low_s16(invec[i]), 2);
411
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[3]), vget_low_s16(invec[i]), 3);
412
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[4]), vget_high_s16(invec[i]), 0);
413
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[5]), vget_high_s16(invec[i]), 1);
414
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[6]), vget_high_s16(invec[i]), 2);
415
acc1 = vmlal_lane_s16(acc1, vget_high_s16(tblvec1[7]), vget_high_s16(invec[i]), 3);
416
417
result = vcombine_s16(vqshrn_n_s32(acc0, 11), vqshrn_n_s32(acc1, 11));
418
vst1q_s16(out, result);
419
out += 8;
420
}
421
#else
422
for (i = 0; i < 2; i++) {
423
int16_t ins[8];
424
int16_t prev1 = out[-1];
425
int16_t prev2 = out[-2];
426
int j, k;
427
for (j = 0; j < 4; j++) {
428
ins[j * 2] = (((*in >> 4) << 28) >> 28) << shift;
429
ins[j * 2 + 1] = (((*in++ & 0xf) << 28) >> 28) << shift;
430
}
431
for (j = 0; j < 8; j++) {
432
int32_t acc = tbl[0][j] * prev2 + tbl[1][j] * prev1 + (ins[j] << 11);
433
for (k = 0; k < j; k++) {
434
acc += tbl[1][((j - k) - 1)] * ins[k];
435
}
436
acc >>= 11;
437
*out++ = clamp16(acc);
438
}
439
}
440
#endif
441
nbytes -= 16 * sizeof(int16_t);
442
}
443
memcpy(state, out - 16, 16 * sizeof(int16_t));
444
}
445
446
void aResampleImpl(uint8_t flags, uint16_t pitch, RESAMPLE_STATE state) {
447
int16_t tmp[16];
448
int16_t *in_initial = BUF_S16(rspa.in);
449
int16_t *in = in_initial;
450
int16_t *out = BUF_S16(rspa.out);
451
int nbytes = ROUND_UP_16(rspa.nbytes);
452
uint32_t pitch_accumulator;
453
int i;
454
#if !HAS_SSE41 && !HAS_NEON
455
int16_t *tbl;
456
int32_t sample;
457
#endif
458
if (flags & A_INIT) {
459
memset(tmp, 0, 5 * sizeof(int16_t));
460
} else {
461
memcpy(tmp, state, 16 * sizeof(int16_t));
462
}
463
if (flags & 2) {
464
memcpy(in - 8, tmp + 8, 8 * sizeof(int16_t));
465
in -= tmp[5] / sizeof(int16_t);
466
}
467
in -= 4;
468
pitch_accumulator = (uint16_t)tmp[4];
469
memcpy(in, tmp, 4 * sizeof(int16_t));
470
471
#if HAS_SSE41
472
__m128i multiples = _mm_setr_epi16(0, 2, 4, 6, 8, 10, 12, 14);
473
__m128i pitchvec = _mm_set1_epi16((int16_t)pitch);
474
__m128i pitchvec_8_steps = _mm_set1_epi32((pitch << 1) * 8);
475
__m128i pitchacclo_vec = _mm_set1_epi32((uint16_t)pitch_accumulator);
476
__m128i pl = _mm_mullo_epi16(multiples, pitchvec);
477
__m128i ph = _mm_mulhi_epu16(multiples, pitchvec);
478
__m128i acc_a = _mm_add_epi32(_mm_unpacklo_epi16(pl, ph), pitchacclo_vec);
479
__m128i acc_b = _mm_add_epi32(_mm_unpackhi_epi16(pl, ph), pitchacclo_vec);
480
481
do {
482
__m128i tbl_positions = _mm_srli_epi16(_mm_packus_epi32(
483
_mm_and_si128(acc_a, _mm_set1_epi32(0xffff)),
484
_mm_and_si128(acc_b, _mm_set1_epi32(0xffff))), 10);
485
486
__m128i in_positions = _mm_packus_epi32(_mm_srli_epi32(acc_a, 16), _mm_srli_epi32(acc_b, 16));
487
__m128i tbl_entries[4];
488
__m128i samples[4];
489
490
/*for (i = 0; i < 4; i++) {
491
tbl_entries[i] = _mm_castpd_si128(_mm_loadh_pd(_mm_load_sd(
492
(const double *)resample_table[_mm_extract_epi16(tbl_positions, 2 * i)]),
493
(const double *)resample_table[_mm_extract_epi16(tbl_positions, 2 * i + 1)]));
494
samples[i] = _mm_castpd_si128(_mm_loadh_pd(_mm_load_sd(
495
(const double *)&in[_mm_extract_epi16(in_positions, 2 * i)]),
496
(const double *)&in[_mm_extract_epi16(in_positions, 2 * i + 1)]));
497
samples[i] = _mm_mulhrs_epi16(samples[i], tbl_entries[i]);
498
}*/
499
tbl_entries[0] = LOADLH(resample_table[_mm_extract_epi16(tbl_positions, 0)], resample_table[_mm_extract_epi16(tbl_positions, 1)]);
500
tbl_entries[1] = LOADLH(resample_table[_mm_extract_epi16(tbl_positions, 2)], resample_table[_mm_extract_epi16(tbl_positions, 3)]);
501
tbl_entries[2] = LOADLH(resample_table[_mm_extract_epi16(tbl_positions, 4)], resample_table[_mm_extract_epi16(tbl_positions, 5)]);
502
tbl_entries[3] = LOADLH(resample_table[_mm_extract_epi16(tbl_positions, 6)], resample_table[_mm_extract_epi16(tbl_positions, 7)]);
503
samples[0] = LOADLH(&in[_mm_extract_epi16(in_positions, 0)], &in[_mm_extract_epi16(in_positions, 1)]);
504
samples[1] = LOADLH(&in[_mm_extract_epi16(in_positions, 2)], &in[_mm_extract_epi16(in_positions, 3)]);
505
samples[2] = LOADLH(&in[_mm_extract_epi16(in_positions, 4)], &in[_mm_extract_epi16(in_positions, 5)]);
506
samples[3] = LOADLH(&in[_mm_extract_epi16(in_positions, 6)], &in[_mm_extract_epi16(in_positions, 7)]);
507
samples[0] = _mm_mulhrs_epi16(samples[0], tbl_entries[0]);
508
samples[1] = _mm_mulhrs_epi16(samples[1], tbl_entries[1]);
509
samples[2] = _mm_mulhrs_epi16(samples[2], tbl_entries[2]);
510
samples[3] = _mm_mulhrs_epi16(samples[3], tbl_entries[3]);
511
512
_mm_storeu_si128((__m128i *)out, _mm_hadds_epi16(_mm_hadds_epi16(samples[0], samples[1]), _mm_hadds_epi16(samples[2], samples[3])));
513
514
acc_a = _mm_add_epi32(acc_a, pitchvec_8_steps);
515
acc_b = _mm_add_epi32(acc_b, pitchvec_8_steps);
516
out += 8;
517
nbytes -= 8 * sizeof(int16_t);
518
} while (nbytes > 0);
519
in += (uint16_t)_mm_extract_epi16(acc_a, 1);
520
pitch_accumulator = (uint16_t)_mm_extract_epi16(acc_a, 0);
521
#elif HAS_NEON
522
static const uint16_t multiples_data[8] = {0, 2, 4, 6, 8, 10, 12, 14};
523
uint16x8_t multiples = vld1q_u16(multiples_data);
524
uint32x4_t pitchvec_8_steps = vdupq_n_u32((pitch << 1) * 8);
525
uint32x4_t pitchacclo_vec = vdupq_n_u32((uint16_t)pitch_accumulator);
526
uint32x4_t acc_a = vmlal_n_u16(pitchacclo_vec, vget_low_u16(multiples), pitch);
527
uint32x4_t acc_b = vmlal_n_u16(pitchacclo_vec, vget_high_u16(multiples), pitch);
528
529
do {
530
uint16x8x2_t unzipped = vuzpq_u16(vreinterpretq_u16_u32(acc_a), vreinterpretq_u16_u32(acc_b));
531
uint16x8_t tbl_positions = vshrq_n_u16(unzipped.val[0], 10);
532
uint16x8_t in_positions = unzipped.val[1];
533
int16x8_t tbl_entries[4];
534
int16x8_t samples[4];
535
int16x8x2_t unzipped1;
536
int16x8x2_t unzipped2;
537
538
tbl_entries[0] = vcombine_s16(vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 0)]), vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 1)]));
539
tbl_entries[1] = vcombine_s16(vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 2)]), vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 3)]));
540
tbl_entries[2] = vcombine_s16(vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 4)]), vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 5)]));
541
tbl_entries[3] = vcombine_s16(vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 6)]), vld1_s16(resample_table[vgetq_lane_u16(tbl_positions, 7)]));
542
samples[0] = vcombine_s16(vld1_s16(&in[vgetq_lane_u16(in_positions, 0)]), vld1_s16(&in[vgetq_lane_u16(in_positions, 1)]));
543
samples[1] = vcombine_s16(vld1_s16(&in[vgetq_lane_u16(in_positions, 2)]), vld1_s16(&in[vgetq_lane_u16(in_positions, 3)]));
544
samples[2] = vcombine_s16(vld1_s16(&in[vgetq_lane_u16(in_positions, 4)]), vld1_s16(&in[vgetq_lane_u16(in_positions, 5)]));
545
samples[3] = vcombine_s16(vld1_s16(&in[vgetq_lane_u16(in_positions, 6)]), vld1_s16(&in[vgetq_lane_u16(in_positions, 7)]));
546
samples[0] = vqrdmulhq_s16(samples[0], tbl_entries[0]);
547
samples[1] = vqrdmulhq_s16(samples[1], tbl_entries[1]);
548
samples[2] = vqrdmulhq_s16(samples[2], tbl_entries[2]);
549
samples[3] = vqrdmulhq_s16(samples[3], tbl_entries[3]);
550
551
unzipped1 = vuzpq_s16(samples[0], samples[1]);
552
unzipped2 = vuzpq_s16(samples[2], samples[3]);
553
samples[0] = vqaddq_s16(unzipped1.val[0], unzipped1.val[1]);
554
samples[1] = vqaddq_s16(unzipped2.val[0], unzipped2.val[1]);
555
unzipped1 = vuzpq_s16(samples[0], samples[1]);
556
samples[0] = vqaddq_s16(unzipped1.val[0], unzipped1.val[1]);
557
558
vst1q_s16(out, samples[0]);
559
560
acc_a = vaddq_u32(acc_a, pitchvec_8_steps);
561
acc_b = vaddq_u32(acc_b, pitchvec_8_steps);
562
out += 8;
563
nbytes -= 8 * sizeof(int16_t);
564
} while (nbytes > 0);
565
in += vgetq_lane_u16(vreinterpretq_u16_u32(acc_a), 1);
566
pitch_accumulator = vgetq_lane_u16(vreinterpretq_u16_u32(acc_a), 0);
567
#else
568
do {
569
for (i = 0; i < 8; i++) {
570
tbl = resample_table[pitch_accumulator * 64 >> 16];
571
sample = ((in[0] * tbl[0] + 0x4000) >> 15) +
572
((in[1] * tbl[1] + 0x4000) >> 15) +
573
((in[2] * tbl[2] + 0x4000) >> 15) +
574
((in[3] * tbl[3] + 0x4000) >> 15);
575
*out++ = clamp16(sample);
576
577
pitch_accumulator += (pitch << 1);
578
in += pitch_accumulator >> 16;
579
pitch_accumulator %= 0x10000;
580
}
581
nbytes -= 8 * sizeof(int16_t);
582
} while (nbytes > 0);
583
#endif
584
585
state[4] = (int16_t)pitch_accumulator;
586
memcpy(state, in, 4 * sizeof(int16_t));
587
i = (in - in_initial + 4) & 7;
588
in -= i;
589
if (i != 0) {
590
i = -8 - i;
591
}
592
state[5] = i;
593
memcpy(state + 8, in, 8 * sizeof(int16_t));
594
}
595
596
#ifdef NEW_AUDIO_UCODE
597
void aEnvSetup1Impl(uint8_t initial_vol_wet, uint16_t rate_wet, uint16_t rate_left, uint16_t rate_right) {
598
rspa.vol_wet = (uint16_t)(initial_vol_wet << 8);
599
rspa.rate_wet = rate_wet;
600
rspa.rate[0] = rate_left;
601
rspa.rate[1] = rate_right;
602
}
603
604
void aEnvSetup2Impl(uint16_t initial_vol_left, uint16_t initial_vol_right) {
605
rspa.vol[0] = initial_vol_left;
606
rspa.vol[1] = initial_vol_right;
607
}
608
609
void aEnvMixerImpl(uint16_t in_addr, uint16_t n_samples, bool swap_reverb,
610
bool neg_left, bool neg_right,
611
uint16_t dry_left_addr, uint16_t dry_right_addr,
612
uint16_t wet_left_addr, uint16_t wet_right_addr)
613
{
614
int16_t *in = BUF_S16(in_addr);
615
int16_t *dry[2] = {BUF_S16(dry_left_addr), BUF_S16(dry_right_addr)};
616
int16_t *wet[2] = {BUF_S16(wet_left_addr), BUF_S16(wet_right_addr)};
617
int16_t negs[2] = {neg_left ? -1 : 0, neg_right ? -1 : 0};
618
int swapped[2] = {swap_reverb ? 1 : 0, swap_reverb ? 0 : 1};
619
int n = ROUND_UP_16(n_samples);
620
621
uint16_t vols[2] = {rspa.vol[0], rspa.vol[1]};
622
uint16_t rates[2] = {rspa.rate[0], rspa.rate[1]};
623
uint16_t vol_wet = rspa.vol_wet;
624
uint16_t rate_wet = rspa.rate_wet;
625
626
do {
627
for (int i = 0; i < 8; i++) {
628
int16_t samples[2] = {*in, *in}; in++;
629
for (int j = 0; j < 2; j++) {
630
samples[j] = (samples[j] * vols[j] >> 16) ^ negs[j];
631
*dry[j] = clamp16(*dry[j] + samples[j]); dry[j]++;
632
*wet[j] = clamp16(*wet[j] + (samples[swapped[j]] * vol_wet >> 16)); wet[j]++;
633
}
634
}
635
vols[0] += rates[0];
636
vols[1] += rates[1];
637
vol_wet += rate_wet;
638
639
n -= 8;
640
} while (n > 0);
641
}
642
#else
643
void aEnvMixerImpl(uint8_t flags, ENVMIX_STATE state) {
644
int16_t *in = BUF_S16(rspa.in);
645
int16_t *dry[2] = {BUF_S16(rspa.out), BUF_S16(rspa.dry_right)};
646
int16_t *wet[2] = {BUF_S16(rspa.wet_left), BUF_S16(rspa.wet_right)};
647
int nbytes = ROUND_UP_16(rspa.nbytes);
648
649
#if HAS_SSE41
650
__m128 vols[2][2];
651
__m128i dry_factor;
652
__m128i wet_factor;
653
__m128 target[2];
654
__m128 rate[2];
655
__m128i in_loaded;
656
__m128i vol_s16;
657
bool increasing[2];
658
659
int c;
660
661
if (flags & A_INIT) {
662
float vol_init[2] = {rspa.vol[0], rspa.vol[1]};
663
float rate_float[2] = {(float)rspa.rate[0] * (1.0f / 65536.0f), (float)rspa.rate[1] * (1.0f / 65536.0f)};
664
float step_diff[2] = {vol_init[0] * (rate_float[0] - 1.0f), vol_init[1] * (rate_float[1] - 1.0f)};
665
666
for (c = 0; c < 2; c++) {
667
vols[c][0] = _mm_add_ps(
668
_mm_set_ps1(vol_init[c]),
669
_mm_mul_ps(_mm_set1_ps(step_diff[c]), _mm_setr_ps(1.0f / 8.0f, 2.0f / 8.0f, 3.0f / 8.0f, 4.0f / 8.0f)));
670
vols[c][1] = _mm_add_ps(
671
_mm_set_ps1(vol_init[c]),
672
_mm_mul_ps(_mm_set1_ps(step_diff[c]), _mm_setr_ps(5.0f / 8.0f, 6.0f / 8.0f, 7.0f / 8.0f, 8.0f / 8.0f)));
673
674
increasing[c] = rate_float[c] >= 1.0f;
675
target[c] = _mm_set1_ps(rspa.target[c]);
676
rate[c] = _mm_set1_ps(rate_float[c]);
677
}
678
679
dry_factor = _mm_set1_epi16(rspa.vol_dry);
680
wet_factor = _mm_set1_epi16(rspa.vol_wet);
681
682
memcpy(state + 32, &rate_float[0], 4);
683
memcpy(state + 34, &rate_float[1], 4);
684
state[36] = rspa.target[0];
685
state[37] = rspa.target[1];
686
state[38] = rspa.vol_dry;
687
state[39] = rspa.vol_wet;
688
} else {
689
float floats[2];
690
vols[0][0] = _mm_loadu_ps((const float *)state);
691
vols[0][1] = _mm_loadu_ps((const float *)(state + 8));
692
vols[1][0] = _mm_loadu_ps((const float *)(state + 16));
693
vols[1][1] = _mm_loadu_ps((const float *)(state + 24));
694
memcpy(floats, state + 32, 8);
695
rate[0] = _mm_set1_ps(floats[0]);
696
rate[1] = _mm_set1_ps(floats[1]);
697
increasing[0] = floats[0] >= 1.0f;
698
increasing[1] = floats[1] >= 1.0f;
699
target[0] = _mm_set1_ps(state[36]);
700
target[1] = _mm_set1_ps(state[37]);
701
dry_factor = _mm_set1_epi16(state[38]);
702
wet_factor = _mm_set1_epi16(state[39]);
703
}
704
do {
705
in_loaded = _mm_loadu_si128((const __m128i *)in);
706
in += 8;
707
for (c = 0; c < 2; c++) {
708
if (increasing[c]) {
709
vols[c][0] = _mm_min_ps(vols[c][0], target[c]);
710
vols[c][1] = _mm_min_ps(vols[c][1], target[c]);
711
} else {
712
vols[c][0] = _mm_max_ps(vols[c][0], target[c]);
713
vols[c][1] = _mm_max_ps(vols[c][1], target[c]);
714
}
715
716
vol_s16 = _mm_packs_epi32(_mm_cvtps_epi32(vols[c][0]), _mm_cvtps_epi32(vols[c][1]));
717
_mm_storeu_si128((__m128i *)dry[c],
718
_mm_adds_epi16(
719
_mm_loadu_si128((const __m128i *)dry[c]),
720
_mm_mulhrs_epi16(in_loaded, _mm_mulhrs_epi16(vol_s16, dry_factor))));
721
dry[c] += 8;
722
723
if (flags & A_AUX) {
724
_mm_storeu_si128((__m128i *)wet[c],
725
_mm_adds_epi16(
726
_mm_loadu_si128((const __m128i *)wet[c]),
727
_mm_mulhrs_epi16(in_loaded, _mm_mulhrs_epi16(vol_s16, wet_factor))));
728
wet[c] += 8;
729
}
730
731
vols[c][0] = _mm_mul_ps(vols[c][0], rate[c]);
732
vols[c][1] = _mm_mul_ps(vols[c][1], rate[c]);
733
}
734
735
nbytes -= 8 * sizeof(int16_t);
736
} while (nbytes > 0);
737
738
_mm_storeu_ps((float *)state, vols[0][0]);
739
_mm_storeu_ps((float *)(state + 8), vols[0][1]);
740
_mm_storeu_ps((float *)(state + 16), vols[1][0]);
741
_mm_storeu_ps((float *)(state + 24), vols[1][1]);
742
#elif HAS_NEON
743
float32x4_t vols[2][2];
744
int16_t dry_factor;
745
int16_t wet_factor;
746
float32x4_t target[2];
747
float rate[2];
748
int16x8_t in_loaded;
749
int16x8_t vol_s16;
750
bool increasing[2];
751
752
int c;
753
754
if (flags & A_INIT) {
755
float vol_init[2] = {rspa.vol[0], rspa.vol[1]};
756
float rate_float[2] = {(float)rspa.rate[0] * (1.0f / 65536.0f), (float)rspa.rate[1] * (1.0f / 65536.0f)};
757
float step_diff[2] = {vol_init[0] * (rate_float[0] - 1.0f), vol_init[1] * (rate_float[1] - 1.0f)};
758
static const float step_dividers_data[2][4] = {{1.0f / 8.0f, 2.0f / 8.0f, 3.0f / 8.0f, 4.0f / 8.0f},
759
{5.0f / 8.0f, 6.0f / 8.0f, 7.0f / 8.0f, 8.0f / 8.0f}};
760
float32x4_t step_dividers[2] = {vld1q_f32(step_dividers_data[0]), vld1q_f32(step_dividers_data[1])};
761
762
for (c = 0; c < 2; c++) {
763
vols[c][0] = vaddq_f32(vdupq_n_f32(vol_init[c]), vmulq_n_f32(step_dividers[0], step_diff[c]));
764
vols[c][1] = vaddq_f32(vdupq_n_f32(vol_init[c]), vmulq_n_f32(step_dividers[1], step_diff[c]));
765
increasing[c] = rate_float[c] >= 1.0f;
766
target[c] = vdupq_n_f32(rspa.target[c]);
767
rate[c] = rate_float[c];
768
}
769
770
dry_factor = rspa.vol_dry;
771
wet_factor = rspa.vol_wet;
772
773
memcpy(state + 32, &rate_float[0], 4);
774
memcpy(state + 34, &rate_float[1], 4);
775
state[36] = rspa.target[0];
776
state[37] = rspa.target[1];
777
state[38] = rspa.vol_dry;
778
state[39] = rspa.vol_wet;
779
} else {
780
vols[0][0] = vreinterpretq_f32_s16(vld1q_s16(state));
781
vols[0][1] = vreinterpretq_f32_s16(vld1q_s16(state + 8));
782
vols[1][0] = vreinterpretq_f32_s16(vld1q_s16(state + 16));
783
vols[1][1] = vreinterpretq_f32_s16(vld1q_s16(state + 24));
784
memcpy(&rate[0], state + 32, 4);
785
memcpy(&rate[1], state + 34, 4);
786
increasing[0] = rate[0] >= 1.0f;
787
increasing[1] = rate[1] >= 1.0f;
788
target[0] = vdupq_n_f32(state[36]);
789
target[1] = vdupq_n_f32(state[37]);
790
dry_factor = state[38];
791
wet_factor = state[39];
792
}
793
794
do {
795
in_loaded = vld1q_s16(in);
796
in += 8;
797
for (c = 0; c < 2; c++) {
798
if (increasing[c]) {
799
vols[c][0] = vminq_f32(vols[c][0], target[c]);
800
vols[c][1] = vminq_f32(vols[c][1], target[c]);
801
} else {
802
vols[c][0] = vmaxq_f32(vols[c][0], target[c]);
803
vols[c][1] = vmaxq_f32(vols[c][1], target[c]);
804
}
805
806
vol_s16 = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(vols[c][0])), vqmovn_s32(vcvtq_s32_f32(vols[c][1])));
807
vst1q_s16(dry[c], vqaddq_s16(vld1q_s16(dry[c]), vqrdmulhq_s16(in_loaded, vqrdmulhq_n_s16(vol_s16, dry_factor))));
808
dry[c] += 8;
809
if (flags & A_AUX) {
810
vst1q_s16(wet[c], vqaddq_s16(vld1q_s16(wet[c]), vqrdmulhq_s16(in_loaded, vqrdmulhq_n_s16(vol_s16, wet_factor))));
811
wet[c] += 8;
812
}
813
vols[c][0] = vmulq_n_f32(vols[c][0], rate[c]);
814
vols[c][1] = vmulq_n_f32(vols[c][1], rate[c]);
815
}
816
817
nbytes -= 8 * sizeof(int16_t);
818
} while (nbytes > 0);
819
820
vst1q_s16(state, vreinterpretq_s16_f32(vols[0][0]));
821
vst1q_s16(state + 8, vreinterpretq_s16_f32(vols[0][1]));
822
vst1q_s16(state + 16, vreinterpretq_s16_f32(vols[1][0]));
823
vst1q_s16(state + 24, vreinterpretq_s16_f32(vols[1][1]));
824
#else
825
int16_t target[2];
826
int32_t rate[2];
827
int16_t vol_dry, vol_wet;
828
829
int32_t step_diff[2];
830
int32_t vols[2][8];
831
832
int c, i;
833
834
if (flags & A_INIT) {
835
target[0] = rspa.target[0];
836
target[1] = rspa.target[1];
837
rate[0] = rspa.rate[0];
838
rate[1] = rspa.rate[1];
839
vol_dry = rspa.vol_dry;
840
vol_wet = rspa.vol_wet;
841
step_diff[0] = rspa.vol[0] * (rate[0] - 0x10000) / 8;
842
step_diff[1] = rspa.vol[0] * (rate[1] - 0x10000) / 8;
843
844
for (i = 0; i < 8; i++) {
845
vols[0][i] = clamp32((int64_t)(rspa.vol[0] << 16) + step_diff[0] * (i + 1));
846
vols[1][i] = clamp32((int64_t)(rspa.vol[1] << 16) + step_diff[1] * (i + 1));
847
}
848
} else {
849
memcpy(vols[0], state, 32);
850
memcpy(vols[1], state + 16, 32);
851
target[0] = state[32];
852
target[1] = state[35];
853
rate[0] = (state[33] << 16) | (uint16_t)state[34];
854
rate[1] = (state[36] << 16) | (uint16_t)state[37];
855
vol_dry = state[38];
856
vol_wet = state[39];
857
}
858
859
do {
860
for (c = 0; c < 2; c++) {
861
for (i = 0; i < 8; i++) {
862
if ((rate[c] >> 16) > 0) {
863
// Increasing volume
864
if ((vols[c][i] >> 16) > target[c]) {
865
vols[c][i] = target[c] << 16;
866
}
867
} else {
868
// Decreasing volume
869
if ((vols[c][i] >> 16) < target[c]) {
870
vols[c][i] = target[c] << 16;
871
}
872
}
873
dry[c][i] = clamp16((dry[c][i] * 0x7fff + in[i] * (((vols[c][i] >> 16) * vol_dry + 0x4000) >> 15) + 0x4000) >> 15);
874
if (flags & A_AUX) {
875
wet[c][i] = clamp16((wet[c][i] * 0x7fff + in[i] * (((vols[c][i] >> 16) * vol_wet + 0x4000) >> 15) + 0x4000) >> 15);
876
}
877
vols[c][i] = clamp32((int64_t)vols[c][i] * rate[c] >> 16);
878
}
879
880
dry[c] += 8;
881
if (flags & A_AUX) {
882
wet[c] += 8;
883
}
884
}
885
886
nbytes -= 16;
887
in += 8;
888
} while (nbytes > 0);
889
890
memcpy(state, vols[0], 32);
891
memcpy(state + 16, vols[1], 32);
892
state[32] = target[0];
893
state[35] = target[1];
894
state[33] = (int16_t)(rate[0] >> 16);
895
state[34] = (int16_t)rate[0];
896
state[36] = (int16_t)(rate[1] >> 16);
897
state[37] = (int16_t)rate[1];
898
state[38] = vol_dry;
899
state[39] = vol_wet;
900
#endif
901
}
902
#endif
903
904
#ifdef NEW_AUDIO_UCODE
905
void aMixImpl(int16_t gain, uint16_t in_addr, uint16_t out_addr, uint16_t count) {
906
int nbytes = ROUND_UP_32(ROUND_DOWN_16(count));
907
#else
908
void aMixImpl(int16_t gain, uint16_t in_addr, uint16_t out_addr) {
909
int nbytes = ROUND_UP_32(rspa.nbytes);
910
#endif
911
int16_t *in = BUF_S16(in_addr);
912
int16_t *out = BUF_S16(out_addr);
913
#if HAS_SSE41
914
__m128i gain_vec = _mm_set1_epi16(gain);
915
#elif !HAS_NEON
916
int i;
917
int32_t sample;
918
#endif
919
920
#if !HAS_NEON
921
if (gain == -0x8000) {
922
while (nbytes > 0) {
923
#if HAS_SSE41
924
__m128i out1, out2, in1, in2;
925
out1 = _mm_loadu_si128((const __m128i *)out);
926
out2 = _mm_loadu_si128((const __m128i *)(out + 8));
927
in1 = _mm_loadu_si128((const __m128i *)in);
928
in2 = _mm_loadu_si128((const __m128i *)(in + 8));
929
930
out1 = _mm_subs_epi16(out1, in1);
931
out2 = _mm_subs_epi16(out2, in2);
932
933
_mm_storeu_si128((__m128i *)out, out1);
934
_mm_storeu_si128((__m128i *)(out + 8), out2);
935
936
out += 16;
937
in += 16;
938
#else
939
for (i = 0; i < 16; i++) {
940
sample = *out - *in++;
941
*out++ = clamp16(sample);
942
}
943
#endif
944
945
nbytes -= 16 * sizeof(int16_t);
946
}
947
}
948
#endif
949
950
while (nbytes > 0) {
951
#if HAS_SSE41
952
__m128i out1, out2, in1, in2;
953
out1 = _mm_loadu_si128((const __m128i *)out);
954
out2 = _mm_loadu_si128((const __m128i *)(out + 8));
955
in1 = _mm_loadu_si128((const __m128i *)in);
956
in2 = _mm_loadu_si128((const __m128i *)(in + 8));
957
958
out1 = _mm_adds_epi16(out1, _mm_mulhrs_epi16(in1, gain_vec));
959
out2 = _mm_adds_epi16(out2, _mm_mulhrs_epi16(in2, gain_vec));
960
961
_mm_storeu_si128((__m128i *)out, out1);
962
_mm_storeu_si128((__m128i *)(out + 8), out2);
963
964
out += 16;
965
in += 16;
966
#elif HAS_NEON
967
int16x8_t out1, out2, in1, in2;
968
out1 = vld1q_s16(out);
969
out2 = vld1q_s16(out + 8);
970
in1 = vld1q_s16(in);
971
in2 = vld1q_s16(in + 8);
972
973
out1 = vqaddq_s16(out1, vqrdmulhq_n_s16(in1, gain));
974
out2 = vqaddq_s16(out2, vqrdmulhq_n_s16(in2, gain));
975
976
vst1q_s16(out, out1);
977
vst1q_s16(out + 8, out2);
978
979
out += 16;
980
in += 16;
981
#else
982
for (i = 0; i < 16; i++) {
983
sample = ((*out * 0x7fff + *in++ * gain) + 0x4000) >> 15;
984
*out++ = clamp16(sample);
985
}
986
#endif
987
988
nbytes -= 16 * sizeof(int16_t);
989
}
990
}
991
992
#ifdef NEW_AUDIO_UCODE
993
void aS8DecImpl(uint8_t flags, ADPCM_STATE state) {
994
uint8_t *in = BUF_U8(rspa.in);
995
int16_t *out = BUF_S16(rspa.out);
996
int nbytes = ROUND_UP_32(rspa.nbytes);
997
if (flags & A_INIT) {
998
memset(out, 0, 16 * sizeof(int16_t));
999
} else if (flags & A_LOOP) {
1000
memcpy(out, rspa.adpcm_loop_state, 16 * sizeof(int16_t));
1001
} else {
1002
memcpy(out, state, 16 * sizeof(int16_t));
1003
}
1004
out += 16;
1005
1006
while (nbytes > 0) {
1007
*out++ = (int16_t)(*in++ << 8);
1008
*out++ = (int16_t)(*in++ << 8);
1009
*out++ = (int16_t)(*in++ << 8);
1010
*out++ = (int16_t)(*in++ << 8);
1011
*out++ = (int16_t)(*in++ << 8);
1012
*out++ = (int16_t)(*in++ << 8);
1013
*out++ = (int16_t)(*in++ << 8);
1014
*out++ = (int16_t)(*in++ << 8);
1015
*out++ = (int16_t)(*in++ << 8);
1016
*out++ = (int16_t)(*in++ << 8);
1017
*out++ = (int16_t)(*in++ << 8);
1018
*out++ = (int16_t)(*in++ << 8);
1019
*out++ = (int16_t)(*in++ << 8);
1020
*out++ = (int16_t)(*in++ << 8);
1021
*out++ = (int16_t)(*in++ << 8);
1022
*out++ = (int16_t)(*in++ << 8);
1023
1024
nbytes -= 16 * sizeof(int16_t);
1025
}
1026
1027
memcpy(state, out - 16, 16 * sizeof(int16_t));
1028
}
1029
1030
void aAddMixerImpl(uint16_t in_addr, uint16_t out_addr, uint16_t count) {
1031
int16_t *in = BUF_S16(in_addr);
1032
int16_t *out = BUF_S16(out_addr);
1033
int nbytes = ROUND_UP_64(ROUND_DOWN_16(count));
1034
1035
do {
1036
*out = clamp16(*out + *in++); out++;
1037
*out = clamp16(*out + *in++); out++;
1038
*out = clamp16(*out + *in++); out++;
1039
*out = clamp16(*out + *in++); out++;
1040
*out = clamp16(*out + *in++); out++;
1041
*out = clamp16(*out + *in++); out++;
1042
*out = clamp16(*out + *in++); out++;
1043
*out = clamp16(*out + *in++); out++;
1044
*out = clamp16(*out + *in++); out++;
1045
*out = clamp16(*out + *in++); out++;
1046
*out = clamp16(*out + *in++); out++;
1047
*out = clamp16(*out + *in++); out++;
1048
*out = clamp16(*out + *in++); out++;
1049
*out = clamp16(*out + *in++); out++;
1050
*out = clamp16(*out + *in++); out++;
1051
*out = clamp16(*out + *in++); out++;
1052
1053
nbytes -= 16 * sizeof(int16_t);
1054
} while (nbytes > 0);
1055
}
1056
1057
void aDuplicateImpl(uint16_t in_addr, uint16_t out_addr, uint16_t count) {
1058
uint8_t *in = BUF_U8(in_addr);
1059
uint8_t *out = BUF_U8(out_addr);
1060
1061
uint8_t tmp[128];
1062
memcpy(tmp, in, 128);
1063
do {
1064
memcpy(out, tmp, 128);
1065
out += 128;
1066
} while (count-- > 0);
1067
}
1068
1069
void aDMEMMove2Impl(uint8_t t, uint16_t in_addr, uint16_t out_addr, uint16_t count) {
1070
uint8_t *in = BUF_U8(in_addr);
1071
uint8_t *out = BUF_U8(out_addr);
1072
int nbytes = ROUND_UP_32(count);
1073
1074
do {
1075
memmove(out, in, nbytes);
1076
in += nbytes;
1077
out += nbytes;
1078
} while (t-- > 0);
1079
}
1080
1081
void aResampleZohImpl(uint16_t pitch, uint16_t start_fract) {
1082
int16_t *in = BUF_S16(rspa.in);
1083
int16_t *out = BUF_S16(rspa.out);
1084
int nbytes = ROUND_UP_8(rspa.nbytes);
1085
uint32_t pos = start_fract;
1086
uint32_t pitch_add = pitch << 2;
1087
1088
do {
1089
*out++ = in[pos >> 17]; pos += pitch_add;
1090
*out++ = in[pos >> 17]; pos += pitch_add;
1091
*out++ = in[pos >> 17]; pos += pitch_add;
1092
*out++ = in[pos >> 17]; pos += pitch_add;
1093
1094
nbytes -= 4 * sizeof(int16_t);
1095
} while (nbytes > 0);
1096
}
1097
1098
void aDownsampleHalfImpl(uint16_t n_samples, uint16_t in_addr, uint16_t out_addr) {
1099
int16_t *in = BUF_S16(in_addr);
1100
int16_t *out = BUF_S16(out_addr);
1101
int n = ROUND_UP_8(n_samples);
1102
1103
do {
1104
*out++ = *in++; in++;
1105
*out++ = *in++; in++;
1106
*out++ = *in++; in++;
1107
*out++ = *in++; in++;
1108
*out++ = *in++; in++;
1109
*out++ = *in++; in++;
1110
*out++ = *in++; in++;
1111
*out++ = *in++; in++;
1112
1113
n -= 8;
1114
} while (n > 0);
1115
}
1116
1117
void aFilterImpl(uint8_t flags, uint16_t count_or_buf, int16_t state_or_filter[8]) {
1118
if (flags > A_INIT) {
1119
rspa.filter_count = ROUND_UP_16(count_or_buf);
1120
memcpy(rspa.filter, state_or_filter, sizeof(rspa.filter));
1121
} else {
1122
int16_t tmp[16];
1123
int count = rspa.filter_count;
1124
int16_t *buf = BUF_S16(count_or_buf);
1125
1126
if (flags == A_INIT) {
1127
memset(tmp, 0, 8 * sizeof(int16_t));
1128
} else {
1129
memcpy(tmp, state_or_filter, 8 * sizeof(int16_t));
1130
}
1131
1132
do {
1133
memcpy(tmp + 8, buf, 8 * sizeof(int16_t));
1134
for (int i = 0; i < 8; i++) {
1135
int64_t sample = 0x4000; // round term
1136
int16_t in = tmp[8 + i];
1137
for (int j = 1; j <= 8; j++) {
1138
sample += in * tmp[8 + i - j];
1139
}
1140
buf[i] = clamp16((int32_t)(sample >> 15));
1141
}
1142
memcpy(tmp, tmp + 8, 8 * sizeof(int16_t));
1143
1144
buf += 8;
1145
count -= 8 * sizeof(int16_t);
1146
} while (count > 0);
1147
1148
memcpy(state_or_filter, tmp, 8 * sizeof(int16_t));
1149
}
1150
}
1151
1152
void aHiLoGainImpl(uint8_t g, uint16_t count, uint16_t addr) {
1153
int16_t *samples = BUF_S16(addr);
1154
int nbytes = ROUND_UP_32(count);
1155
1156
do {
1157
*samples = clamp16((*samples * g) >> 4); samples++;
1158
*samples = clamp16((*samples * g) >> 4); samples++;
1159
*samples = clamp16((*samples * g) >> 4); samples++;
1160
*samples = clamp16((*samples * g) >> 4); samples++;
1161
*samples = clamp16((*samples * g) >> 4); samples++;
1162
*samples = clamp16((*samples * g) >> 4); samples++;
1163
*samples = clamp16((*samples * g) >> 4); samples++;
1164
*samples = clamp16((*samples * g) >> 4); samples++;
1165
1166
nbytes -= 8;
1167
} while (nbytes > 0);
1168
}
1169
1170
void aUnknown25Impl(uint8_t f, uint16_t count, uint16_t out_addr, uint16_t in_addr) {
1171
int nbytes = ROUND_UP_64(count);
1172
int16_t *in = BUF_S16(in_addr + f);
1173
int16_t *out = BUF_S16(out_addr);
1174
int16_t tbl[32];
1175
1176
memcpy(tbl, in, 32 * sizeof(int16_t));
1177
do {
1178
for (int i = 0; i < 32; i++) {
1179
out[i] = clamp16(out[i] * tbl[i]);
1180
}
1181
out += 32;
1182
nbytes -= 32 * sizeof(int16_t);
1183
} while (nbytes > 0);
1184
}
1185
#endif
1186
1187