Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
26439 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
4
*
5
* Copyright (C) 2012 Johannes Goetzfried
6
* <[email protected]>
7
*
8
* Copyright © 2012 Jussi Kivilinna <[email protected]>
9
*/
10
11
#include <linux/linkage.h>
12
#include <asm/frame.h>
13
14
.file "cast5-avx-x86_64-asm_64.S"
15
16
.extern cast_s1
17
.extern cast_s2
18
.extern cast_s3
19
.extern cast_s4
20
21
/* structure of crypto context */
22
#define km 0
23
#define kr (16*4)
24
#define rr ((16*4)+16)
25
26
/* s-boxes */
27
#define s1 cast_s1
28
#define s2 cast_s2
29
#define s3 cast_s3
30
#define s4 cast_s4
31
32
/**********************************************************************
33
16-way AVX cast5
34
**********************************************************************/
35
#define CTX %r15
36
37
#define RL1 %xmm0
38
#define RR1 %xmm1
39
#define RL2 %xmm2
40
#define RR2 %xmm3
41
#define RL3 %xmm4
42
#define RR3 %xmm5
43
#define RL4 %xmm6
44
#define RR4 %xmm7
45
46
#define RX %xmm8
47
48
#define RKM %xmm9
49
#define RKR %xmm10
50
#define RKRF %xmm11
51
#define RKRR %xmm12
52
53
#define R32 %xmm13
54
#define R1ST %xmm14
55
56
#define RTMP %xmm15
57
58
#define RID1 %rdi
59
#define RID1d %edi
60
#define RID2 %rsi
61
#define RID2d %esi
62
63
#define RGI1 %rdx
64
#define RGI1bl %dl
65
#define RGI1bh %dh
66
#define RGI2 %rcx
67
#define RGI2bl %cl
68
#define RGI2bh %ch
69
70
#define RGI3 %rax
71
#define RGI3bl %al
72
#define RGI3bh %ah
73
#define RGI4 %rbx
74
#define RGI4bl %bl
75
#define RGI4bh %bh
76
77
#define RFS1 %r8
78
#define RFS1d %r8d
79
#define RFS2 %r9
80
#define RFS2d %r9d
81
#define RFS3 %r10
82
#define RFS3d %r10d
83
84
85
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
86
movzbl src ## bh, RID1d; \
87
leaq s1(%rip), RID2; \
88
movl (RID2,RID1,4), dst ## d; \
89
movzbl src ## bl, RID2d; \
90
leaq s2(%rip), RID1; \
91
op1 (RID1,RID2,4), dst ## d; \
92
shrq $16, src; \
93
movzbl src ## bh, RID1d; \
94
leaq s3(%rip), RID2; \
95
op2 (RID2,RID1,4), dst ## d; \
96
movzbl src ## bl, RID2d; \
97
interleave_op(il_reg); \
98
leaq s4(%rip), RID1; \
99
op3 (RID1,RID2,4), dst ## d;
100
101
#define dummy(d) /* do nothing */
102
103
#define shr_next(reg) \
104
shrq $16, reg;
105
106
#define F_head(a, x, gi1, gi2, op0) \
107
op0 a, RKM, x; \
108
vpslld RKRF, x, RTMP; \
109
vpsrld RKRR, x, x; \
110
vpor RTMP, x, x; \
111
\
112
vmovq x, gi1; \
113
vpextrq $1, x, gi2;
114
115
#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
116
lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
117
lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
118
\
119
lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
120
shlq $32, RFS2; \
121
orq RFS1, RFS2; \
122
lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
123
shlq $32, RFS1; \
124
orq RFS1, RFS3; \
125
\
126
vmovq RFS2, x; \
127
vpinsrq $1, RFS3, x, x;
128
129
#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
130
F_head(b1, RX, RGI1, RGI2, op0); \
131
F_head(b2, RX, RGI3, RGI4, op0); \
132
\
133
F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
134
F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
135
\
136
vpxor a1, RX, a1; \
137
vpxor a2, RTMP, a2;
138
139
#define F1_2(a1, b1, a2, b2) \
140
F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
141
#define F2_2(a1, b1, a2, b2) \
142
F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
143
#define F3_2(a1, b1, a2, b2) \
144
F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
145
146
#define subround(a1, b1, a2, b2, f) \
147
F ## f ## _2(a1, b1, a2, b2);
148
149
#define round(l, r, n, f) \
150
vbroadcastss (km+(4*n))(CTX), RKM; \
151
vpand R1ST, RKR, RKRF; \
152
vpsubq RKRF, R32, RKRR; \
153
vpsrldq $1, RKR, RKR; \
154
subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
155
subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
156
157
#define enc_preload_rkr() \
158
vbroadcastss .L16_mask(%rip), RKR; \
159
/* add 16-bit rotation to key rotations (mod 32) */ \
160
vpxor kr(CTX), RKR, RKR;
161
162
#define dec_preload_rkr() \
163
vbroadcastss .L16_mask(%rip), RKR; \
164
/* add 16-bit rotation to key rotations (mod 32) */ \
165
vpxor kr(CTX), RKR, RKR; \
166
vpshufb .Lbswap128_mask(%rip), RKR, RKR;
167
168
#define transpose_2x4(x0, x1, t0, t1) \
169
vpunpckldq x1, x0, t0; \
170
vpunpckhdq x1, x0, t1; \
171
\
172
vpunpcklqdq t1, t0, x0; \
173
vpunpckhqdq t1, t0, x1;
174
175
#define inpack_blocks(x0, x1, t0, t1, rmask) \
176
vpshufb rmask, x0, x0; \
177
vpshufb rmask, x1, x1; \
178
\
179
transpose_2x4(x0, x1, t0, t1)
180
181
#define outunpack_blocks(x0, x1, t0, t1, rmask) \
182
transpose_2x4(x0, x1, t0, t1) \
183
\
184
vpshufb rmask, x0, x0; \
185
vpshufb rmask, x1, x1;
186
187
.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
188
.align 16
189
.Lbswap_mask:
190
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
191
.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
192
.align 16
193
.Lbswap128_mask:
194
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
195
.section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
196
.align 16
197
.Lbswap_iv_mask:
198
.byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
199
200
.section .rodata.cst4.16_mask, "aM", @progbits, 4
201
.align 4
202
.L16_mask:
203
.byte 16, 16, 16, 16
204
.section .rodata.cst4.32_mask, "aM", @progbits, 4
205
.align 4
206
.L32_mask:
207
.byte 32, 0, 0, 0
208
.section .rodata.cst4.first_mask, "aM", @progbits, 4
209
.align 4
210
.Lfirst_mask:
211
.byte 0x1f, 0, 0, 0
212
213
.text
214
215
SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
216
/* input:
217
* %rdi: ctx
218
* RL1: blocks 1 and 2
219
* RR1: blocks 3 and 4
220
* RL2: blocks 5 and 6
221
* RR2: blocks 7 and 8
222
* RL3: blocks 9 and 10
223
* RR3: blocks 11 and 12
224
* RL4: blocks 13 and 14
225
* RR4: blocks 15 and 16
226
* output:
227
* RL1: encrypted blocks 1 and 2
228
* RR1: encrypted blocks 3 and 4
229
* RL2: encrypted blocks 5 and 6
230
* RR2: encrypted blocks 7 and 8
231
* RL3: encrypted blocks 9 and 10
232
* RR3: encrypted blocks 11 and 12
233
* RL4: encrypted blocks 13 and 14
234
* RR4: encrypted blocks 15 and 16
235
*/
236
237
pushq %r15;
238
pushq %rbx;
239
240
movq %rdi, CTX;
241
242
vmovdqa .Lbswap_mask(%rip), RKM;
243
vmovd .Lfirst_mask(%rip), R1ST;
244
vmovd .L32_mask(%rip), R32;
245
enc_preload_rkr();
246
247
inpack_blocks(RL1, RR1, RTMP, RX, RKM);
248
inpack_blocks(RL2, RR2, RTMP, RX, RKM);
249
inpack_blocks(RL3, RR3, RTMP, RX, RKM);
250
inpack_blocks(RL4, RR4, RTMP, RX, RKM);
251
252
round(RL, RR, 0, 1);
253
round(RR, RL, 1, 2);
254
round(RL, RR, 2, 3);
255
round(RR, RL, 3, 1);
256
round(RL, RR, 4, 2);
257
round(RR, RL, 5, 3);
258
round(RL, RR, 6, 1);
259
round(RR, RL, 7, 2);
260
round(RL, RR, 8, 3);
261
round(RR, RL, 9, 1);
262
round(RL, RR, 10, 2);
263
round(RR, RL, 11, 3);
264
265
movzbl rr(CTX), %eax;
266
testl %eax, %eax;
267
jnz .L__skip_enc;
268
269
round(RL, RR, 12, 1);
270
round(RR, RL, 13, 2);
271
round(RL, RR, 14, 3);
272
round(RR, RL, 15, 1);
273
274
.L__skip_enc:
275
popq %rbx;
276
popq %r15;
277
278
vmovdqa .Lbswap_mask(%rip), RKM;
279
280
outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
281
outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
282
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
283
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
284
285
RET;
286
SYM_FUNC_END(__cast5_enc_blk16)
287
288
SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
289
/* input:
290
* %rdi: ctx
291
* RL1: encrypted blocks 1 and 2
292
* RR1: encrypted blocks 3 and 4
293
* RL2: encrypted blocks 5 and 6
294
* RR2: encrypted blocks 7 and 8
295
* RL3: encrypted blocks 9 and 10
296
* RR3: encrypted blocks 11 and 12
297
* RL4: encrypted blocks 13 and 14
298
* RR4: encrypted blocks 15 and 16
299
* output:
300
* RL1: decrypted blocks 1 and 2
301
* RR1: decrypted blocks 3 and 4
302
* RL2: decrypted blocks 5 and 6
303
* RR2: decrypted blocks 7 and 8
304
* RL3: decrypted blocks 9 and 10
305
* RR3: decrypted blocks 11 and 12
306
* RL4: decrypted blocks 13 and 14
307
* RR4: decrypted blocks 15 and 16
308
*/
309
310
pushq %r15;
311
pushq %rbx;
312
313
movq %rdi, CTX;
314
315
vmovdqa .Lbswap_mask(%rip), RKM;
316
vmovd .Lfirst_mask(%rip), R1ST;
317
vmovd .L32_mask(%rip), R32;
318
dec_preload_rkr();
319
320
inpack_blocks(RL1, RR1, RTMP, RX, RKM);
321
inpack_blocks(RL2, RR2, RTMP, RX, RKM);
322
inpack_blocks(RL3, RR3, RTMP, RX, RKM);
323
inpack_blocks(RL4, RR4, RTMP, RX, RKM);
324
325
movzbl rr(CTX), %eax;
326
testl %eax, %eax;
327
jnz .L__skip_dec;
328
329
round(RL, RR, 15, 1);
330
round(RR, RL, 14, 3);
331
round(RL, RR, 13, 2);
332
round(RR, RL, 12, 1);
333
334
.L__dec_tail:
335
round(RL, RR, 11, 3);
336
round(RR, RL, 10, 2);
337
round(RL, RR, 9, 1);
338
round(RR, RL, 8, 3);
339
round(RL, RR, 7, 2);
340
round(RR, RL, 6, 1);
341
round(RL, RR, 5, 3);
342
round(RR, RL, 4, 2);
343
round(RL, RR, 3, 1);
344
round(RR, RL, 2, 3);
345
round(RL, RR, 1, 2);
346
round(RR, RL, 0, 1);
347
348
vmovdqa .Lbswap_mask(%rip), RKM;
349
popq %rbx;
350
popq %r15;
351
352
outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
353
outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
354
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
355
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
356
357
RET;
358
359
.L__skip_dec:
360
vpsrldq $4, RKR, RKR;
361
jmp .L__dec_tail;
362
SYM_FUNC_END(__cast5_dec_blk16)
363
364
SYM_FUNC_START(cast5_ecb_enc_16way)
365
/* input:
366
* %rdi: ctx
367
* %rsi: dst
368
* %rdx: src
369
*/
370
FRAME_BEGIN
371
pushq %r15;
372
373
movq %rdi, CTX;
374
movq %rsi, %r11;
375
376
vmovdqu (0*4*4)(%rdx), RL1;
377
vmovdqu (1*4*4)(%rdx), RR1;
378
vmovdqu (2*4*4)(%rdx), RL2;
379
vmovdqu (3*4*4)(%rdx), RR2;
380
vmovdqu (4*4*4)(%rdx), RL3;
381
vmovdqu (5*4*4)(%rdx), RR3;
382
vmovdqu (6*4*4)(%rdx), RL4;
383
vmovdqu (7*4*4)(%rdx), RR4;
384
385
call __cast5_enc_blk16;
386
387
vmovdqu RR1, (0*4*4)(%r11);
388
vmovdqu RL1, (1*4*4)(%r11);
389
vmovdqu RR2, (2*4*4)(%r11);
390
vmovdqu RL2, (3*4*4)(%r11);
391
vmovdqu RR3, (4*4*4)(%r11);
392
vmovdqu RL3, (5*4*4)(%r11);
393
vmovdqu RR4, (6*4*4)(%r11);
394
vmovdqu RL4, (7*4*4)(%r11);
395
396
popq %r15;
397
FRAME_END
398
RET;
399
SYM_FUNC_END(cast5_ecb_enc_16way)
400
401
SYM_FUNC_START(cast5_ecb_dec_16way)
402
/* input:
403
* %rdi: ctx
404
* %rsi: dst
405
* %rdx: src
406
*/
407
408
FRAME_BEGIN
409
pushq %r15;
410
411
movq %rdi, CTX;
412
movq %rsi, %r11;
413
414
vmovdqu (0*4*4)(%rdx), RL1;
415
vmovdqu (1*4*4)(%rdx), RR1;
416
vmovdqu (2*4*4)(%rdx), RL2;
417
vmovdqu (3*4*4)(%rdx), RR2;
418
vmovdqu (4*4*4)(%rdx), RL3;
419
vmovdqu (5*4*4)(%rdx), RR3;
420
vmovdqu (6*4*4)(%rdx), RL4;
421
vmovdqu (7*4*4)(%rdx), RR4;
422
423
call __cast5_dec_blk16;
424
425
vmovdqu RR1, (0*4*4)(%r11);
426
vmovdqu RL1, (1*4*4)(%r11);
427
vmovdqu RR2, (2*4*4)(%r11);
428
vmovdqu RL2, (3*4*4)(%r11);
429
vmovdqu RR3, (4*4*4)(%r11);
430
vmovdqu RL3, (5*4*4)(%r11);
431
vmovdqu RR4, (6*4*4)(%r11);
432
vmovdqu RL4, (7*4*4)(%r11);
433
434
popq %r15;
435
FRAME_END
436
RET;
437
SYM_FUNC_END(cast5_ecb_dec_16way)
438
439
SYM_FUNC_START(cast5_cbc_dec_16way)
440
/* input:
441
* %rdi: ctx
442
* %rsi: dst
443
* %rdx: src
444
*/
445
FRAME_BEGIN
446
pushq %r12;
447
pushq %r15;
448
449
movq %rdi, CTX;
450
movq %rsi, %r11;
451
movq %rdx, %r12;
452
453
vmovdqu (0*16)(%rdx), RL1;
454
vmovdqu (1*16)(%rdx), RR1;
455
vmovdqu (2*16)(%rdx), RL2;
456
vmovdqu (3*16)(%rdx), RR2;
457
vmovdqu (4*16)(%rdx), RL3;
458
vmovdqu (5*16)(%rdx), RR3;
459
vmovdqu (6*16)(%rdx), RL4;
460
vmovdqu (7*16)(%rdx), RR4;
461
462
call __cast5_dec_blk16;
463
464
/* xor with src */
465
vmovq (%r12), RX;
466
vpshufd $0x4f, RX, RX;
467
vpxor RX, RR1, RR1;
468
vpxor 0*16+8(%r12), RL1, RL1;
469
vpxor 1*16+8(%r12), RR2, RR2;
470
vpxor 2*16+8(%r12), RL2, RL2;
471
vpxor 3*16+8(%r12), RR3, RR3;
472
vpxor 4*16+8(%r12), RL3, RL3;
473
vpxor 5*16+8(%r12), RR4, RR4;
474
vpxor 6*16+8(%r12), RL4, RL4;
475
476
vmovdqu RR1, (0*16)(%r11);
477
vmovdqu RL1, (1*16)(%r11);
478
vmovdqu RR2, (2*16)(%r11);
479
vmovdqu RL2, (3*16)(%r11);
480
vmovdqu RR3, (4*16)(%r11);
481
vmovdqu RL3, (5*16)(%r11);
482
vmovdqu RR4, (6*16)(%r11);
483
vmovdqu RL4, (7*16)(%r11);
484
485
popq %r15;
486
popq %r12;
487
FRAME_END
488
RET;
489
SYM_FUNC_END(cast5_cbc_dec_16way)
490
491