Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
26451 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* Serpent Cipher 8-way parallel algorithm (x86_64/AVX)
4
*
5
* Copyright (C) 2012 Johannes Goetzfried
6
* <[email protected]>
7
*
8
* Copyright © 2011-2013 Jussi Kivilinna <[email protected]>
9
*/
10
11
#include <linux/linkage.h>
12
#include <linux/cfi_types.h>
13
#include <asm/frame.h>
14
#include "glue_helper-asm-avx.S"
15
16
.file "serpent-avx-x86_64-asm_64.S"
17
18
.section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
19
.align 16
20
.Lbswap128_mask:
21
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
22
23
.text
24
25
#define CTX %rdi
26
27
/**********************************************************************
28
8-way AVX serpent
29
**********************************************************************/
30
#define RA1 %xmm0
31
#define RB1 %xmm1
32
#define RC1 %xmm2
33
#define RD1 %xmm3
34
#define RE1 %xmm4
35
36
#define tp %xmm5
37
38
#define RA2 %xmm6
39
#define RB2 %xmm7
40
#define RC2 %xmm8
41
#define RD2 %xmm9
42
#define RE2 %xmm10
43
44
#define RNOT %xmm11
45
46
#define RK0 %xmm12
47
#define RK1 %xmm13
48
#define RK2 %xmm14
49
#define RK3 %xmm15
50
51
52
#define S0_1(x0, x1, x2, x3, x4) \
53
vpor x0, x3, tp; \
54
vpxor x3, x0, x0; \
55
vpxor x2, x3, x4; \
56
vpxor RNOT, x4, x4; \
57
vpxor x1, tp, x3; \
58
vpand x0, x1, x1; \
59
vpxor x4, x1, x1; \
60
vpxor x0, x2, x2;
61
#define S0_2(x0, x1, x2, x3, x4) \
62
vpxor x3, x0, x0; \
63
vpor x0, x4, x4; \
64
vpxor x2, x0, x0; \
65
vpand x1, x2, x2; \
66
vpxor x2, x3, x3; \
67
vpxor RNOT, x1, x1; \
68
vpxor x4, x2, x2; \
69
vpxor x2, x1, x1;
70
71
#define S1_1(x0, x1, x2, x3, x4) \
72
vpxor x0, x1, tp; \
73
vpxor x3, x0, x0; \
74
vpxor RNOT, x3, x3; \
75
vpand tp, x1, x4; \
76
vpor tp, x0, x0; \
77
vpxor x2, x3, x3; \
78
vpxor x3, x0, x0; \
79
vpxor x3, tp, x1;
80
#define S1_2(x0, x1, x2, x3, x4) \
81
vpxor x4, x3, x3; \
82
vpor x4, x1, x1; \
83
vpxor x2, x4, x4; \
84
vpand x0, x2, x2; \
85
vpxor x1, x2, x2; \
86
vpor x0, x1, x1; \
87
vpxor RNOT, x0, x0; \
88
vpxor x2, x0, x0; \
89
vpxor x1, x4, x4;
90
91
#define S2_1(x0, x1, x2, x3, x4) \
92
vpxor RNOT, x3, x3; \
93
vpxor x0, x1, x1; \
94
vpand x2, x0, tp; \
95
vpxor x3, tp, tp; \
96
vpor x0, x3, x3; \
97
vpxor x1, x2, x2; \
98
vpxor x1, x3, x3; \
99
vpand tp, x1, x1;
100
#define S2_2(x0, x1, x2, x3, x4) \
101
vpxor x2, tp, tp; \
102
vpand x3, x2, x2; \
103
vpor x1, x3, x3; \
104
vpxor RNOT, tp, tp; \
105
vpxor tp, x3, x3; \
106
vpxor tp, x0, x4; \
107
vpxor x2, tp, x0; \
108
vpor x2, x1, x1;
109
110
#define S3_1(x0, x1, x2, x3, x4) \
111
vpxor x3, x1, tp; \
112
vpor x0, x3, x3; \
113
vpand x0, x1, x4; \
114
vpxor x2, x0, x0; \
115
vpxor tp, x2, x2; \
116
vpand x3, tp, x1; \
117
vpxor x3, x2, x2; \
118
vpor x4, x0, x0; \
119
vpxor x3, x4, x4;
120
#define S3_2(x0, x1, x2, x3, x4) \
121
vpxor x0, x1, x1; \
122
vpand x3, x0, x0; \
123
vpand x4, x3, x3; \
124
vpxor x2, x3, x3; \
125
vpor x1, x4, x4; \
126
vpand x1, x2, x2; \
127
vpxor x3, x4, x4; \
128
vpxor x3, x0, x0; \
129
vpxor x2, x3, x3;
130
131
#define S4_1(x0, x1, x2, x3, x4) \
132
vpand x0, x3, tp; \
133
vpxor x3, x0, x0; \
134
vpxor x2, tp, tp; \
135
vpor x3, x2, x2; \
136
vpxor x1, x0, x0; \
137
vpxor tp, x3, x4; \
138
vpor x0, x2, x2; \
139
vpxor x1, x2, x2;
140
#define S4_2(x0, x1, x2, x3, x4) \
141
vpand x0, x1, x1; \
142
vpxor x4, x1, x1; \
143
vpand x2, x4, x4; \
144
vpxor tp, x2, x2; \
145
vpxor x0, x4, x4; \
146
vpor x1, tp, x3; \
147
vpxor RNOT, x1, x1; \
148
vpxor x0, x3, x3;
149
150
#define S5_1(x0, x1, x2, x3, x4) \
151
vpor x0, x1, tp; \
152
vpxor tp, x2, x2; \
153
vpxor RNOT, x3, x3; \
154
vpxor x0, x1, x4; \
155
vpxor x2, x0, x0; \
156
vpand x4, tp, x1; \
157
vpor x3, x4, x4; \
158
vpxor x0, x4, x4;
159
#define S5_2(x0, x1, x2, x3, x4) \
160
vpand x3, x0, x0; \
161
vpxor x3, x1, x1; \
162
vpxor x2, x3, x3; \
163
vpxor x1, x0, x0; \
164
vpand x4, x2, x2; \
165
vpxor x2, x1, x1; \
166
vpand x0, x2, x2; \
167
vpxor x2, x3, x3;
168
169
#define S6_1(x0, x1, x2, x3, x4) \
170
vpxor x0, x3, x3; \
171
vpxor x2, x1, tp; \
172
vpxor x0, x2, x2; \
173
vpand x3, x0, x0; \
174
vpor x3, tp, tp; \
175
vpxor RNOT, x1, x4; \
176
vpxor tp, x0, x0; \
177
vpxor x2, tp, x1;
178
#define S6_2(x0, x1, x2, x3, x4) \
179
vpxor x4, x3, x3; \
180
vpxor x0, x4, x4; \
181
vpand x0, x2, x2; \
182
vpxor x1, x4, x4; \
183
vpxor x3, x2, x2; \
184
vpand x1, x3, x3; \
185
vpxor x0, x3, x3; \
186
vpxor x2, x1, x1;
187
188
#define S7_1(x0, x1, x2, x3, x4) \
189
vpxor RNOT, x1, tp; \
190
vpxor RNOT, x0, x0; \
191
vpand x2, tp, x1; \
192
vpxor x3, x1, x1; \
193
vpor tp, x3, x3; \
194
vpxor x2, tp, x4; \
195
vpxor x3, x2, x2; \
196
vpxor x0, x3, x3; \
197
vpor x1, x0, x0;
198
#define S7_2(x0, x1, x2, x3, x4) \
199
vpand x0, x2, x2; \
200
vpxor x4, x0, x0; \
201
vpxor x3, x4, x4; \
202
vpand x0, x3, x3; \
203
vpxor x1, x4, x4; \
204
vpxor x4, x2, x2; \
205
vpxor x1, x3, x3; \
206
vpor x0, x4, x4; \
207
vpxor x1, x4, x4;
208
209
#define SI0_1(x0, x1, x2, x3, x4) \
210
vpxor x0, x1, x1; \
211
vpor x1, x3, tp; \
212
vpxor x1, x3, x4; \
213
vpxor RNOT, x0, x0; \
214
vpxor tp, x2, x2; \
215
vpxor x0, tp, x3; \
216
vpand x1, x0, x0; \
217
vpxor x2, x0, x0;
218
#define SI0_2(x0, x1, x2, x3, x4) \
219
vpand x3, x2, x2; \
220
vpxor x4, x3, x3; \
221
vpxor x3, x2, x2; \
222
vpxor x3, x1, x1; \
223
vpand x0, x3, x3; \
224
vpxor x0, x1, x1; \
225
vpxor x2, x0, x0; \
226
vpxor x3, x4, x4;
227
228
#define SI1_1(x0, x1, x2, x3, x4) \
229
vpxor x3, x1, x1; \
230
vpxor x2, x0, tp; \
231
vpxor RNOT, x2, x2; \
232
vpor x1, x0, x4; \
233
vpxor x3, x4, x4; \
234
vpand x1, x3, x3; \
235
vpxor x2, x1, x1; \
236
vpand x4, x2, x2;
237
#define SI1_2(x0, x1, x2, x3, x4) \
238
vpxor x1, x4, x4; \
239
vpor x3, x1, x1; \
240
vpxor tp, x3, x3; \
241
vpxor tp, x2, x2; \
242
vpor x4, tp, x0; \
243
vpxor x4, x2, x2; \
244
vpxor x0, x1, x1; \
245
vpxor x1, x4, x4;
246
247
#define SI2_1(x0, x1, x2, x3, x4) \
248
vpxor x1, x2, x2; \
249
vpxor RNOT, x3, tp; \
250
vpor x2, tp, tp; \
251
vpxor x3, x2, x2; \
252
vpxor x0, x3, x4; \
253
vpxor x1, tp, x3; \
254
vpor x2, x1, x1; \
255
vpxor x0, x2, x2;
256
#define SI2_2(x0, x1, x2, x3, x4) \
257
vpxor x4, x1, x1; \
258
vpor x3, x4, x4; \
259
vpxor x3, x2, x2; \
260
vpxor x2, x4, x4; \
261
vpand x1, x2, x2; \
262
vpxor x3, x2, x2; \
263
vpxor x4, x3, x3; \
264
vpxor x0, x4, x4;
265
266
#define SI3_1(x0, x1, x2, x3, x4) \
267
vpxor x1, x2, x2; \
268
vpand x2, x1, tp; \
269
vpxor x0, tp, tp; \
270
vpor x1, x0, x0; \
271
vpxor x3, x1, x4; \
272
vpxor x3, x0, x0; \
273
vpor tp, x3, x3; \
274
vpxor x2, tp, x1;
275
#define SI3_2(x0, x1, x2, x3, x4) \
276
vpxor x3, x1, x1; \
277
vpxor x2, x0, x0; \
278
vpxor x3, x2, x2; \
279
vpand x1, x3, x3; \
280
vpxor x0, x1, x1; \
281
vpand x2, x0, x0; \
282
vpxor x3, x4, x4; \
283
vpxor x0, x3, x3; \
284
vpxor x1, x0, x0;
285
286
#define SI4_1(x0, x1, x2, x3, x4) \
287
vpxor x3, x2, x2; \
288
vpand x1, x0, tp; \
289
vpxor x2, tp, tp; \
290
vpor x3, x2, x2; \
291
vpxor RNOT, x0, x4; \
292
vpxor tp, x1, x1; \
293
vpxor x2, tp, x0; \
294
vpand x4, x2, x2;
295
#define SI4_2(x0, x1, x2, x3, x4) \
296
vpxor x0, x2, x2; \
297
vpor x4, x0, x0; \
298
vpxor x3, x0, x0; \
299
vpand x2, x3, x3; \
300
vpxor x3, x4, x4; \
301
vpxor x1, x3, x3; \
302
vpand x0, x1, x1; \
303
vpxor x1, x4, x4; \
304
vpxor x3, x0, x0;
305
306
#define SI5_1(x0, x1, x2, x3, x4) \
307
vpor x2, x1, tp; \
308
vpxor x1, x2, x2; \
309
vpxor x3, tp, tp; \
310
vpand x1, x3, x3; \
311
vpxor x3, x2, x2; \
312
vpor x0, x3, x3; \
313
vpxor RNOT, x0, x0; \
314
vpxor x2, x3, x3; \
315
vpor x0, x2, x2;
316
#define SI5_2(x0, x1, x2, x3, x4) \
317
vpxor tp, x1, x4; \
318
vpxor x4, x2, x2; \
319
vpand x0, x4, x4; \
320
vpxor tp, x0, x0; \
321
vpxor x3, tp, x1; \
322
vpand x2, x0, x0; \
323
vpxor x3, x2, x2; \
324
vpxor x2, x0, x0; \
325
vpxor x4, x2, x2; \
326
vpxor x3, x4, x4;
327
328
#define SI6_1(x0, x1, x2, x3, x4) \
329
vpxor x2, x0, x0; \
330
vpand x3, x0, tp; \
331
vpxor x3, x2, x2; \
332
vpxor x2, tp, tp; \
333
vpxor x1, x3, x3; \
334
vpor x0, x2, x2; \
335
vpxor x3, x2, x2; \
336
vpand tp, x3, x3;
337
#define SI6_2(x0, x1, x2, x3, x4) \
338
vpxor RNOT, tp, tp; \
339
vpxor x1, x3, x3; \
340
vpand x2, x1, x1; \
341
vpxor tp, x0, x4; \
342
vpxor x4, x3, x3; \
343
vpxor x2, x4, x4; \
344
vpxor x1, tp, x0; \
345
vpxor x0, x2, x2;
346
347
#define SI7_1(x0, x1, x2, x3, x4) \
348
vpand x0, x3, tp; \
349
vpxor x2, x0, x0; \
350
vpor x3, x2, x2; \
351
vpxor x1, x3, x4; \
352
vpxor RNOT, x0, x0; \
353
vpor tp, x1, x1; \
354
vpxor x0, x4, x4; \
355
vpand x2, x0, x0; \
356
vpxor x1, x0, x0;
357
#define SI7_2(x0, x1, x2, x3, x4) \
358
vpand x2, x1, x1; \
359
vpxor x2, tp, x3; \
360
vpxor x3, x4, x4; \
361
vpand x3, x2, x2; \
362
vpor x0, x3, x3; \
363
vpxor x4, x1, x1; \
364
vpxor x4, x3, x3; \
365
vpand x0, x4, x4; \
366
vpxor x2, x4, x4;
367
368
#define get_key(i, j, t) \
369
vbroadcastss (4*(i)+(j))*4(CTX), t;
370
371
#define K2(x0, x1, x2, x3, x4, i) \
372
get_key(i, 0, RK0); \
373
get_key(i, 1, RK1); \
374
get_key(i, 2, RK2); \
375
get_key(i, 3, RK3); \
376
vpxor RK0, x0 ## 1, x0 ## 1; \
377
vpxor RK1, x1 ## 1, x1 ## 1; \
378
vpxor RK2, x2 ## 1, x2 ## 1; \
379
vpxor RK3, x3 ## 1, x3 ## 1; \
380
vpxor RK0, x0 ## 2, x0 ## 2; \
381
vpxor RK1, x1 ## 2, x1 ## 2; \
382
vpxor RK2, x2 ## 2, x2 ## 2; \
383
vpxor RK3, x3 ## 2, x3 ## 2;
384
385
#define LK2(x0, x1, x2, x3, x4, i) \
386
vpslld $13, x0 ## 1, x4 ## 1; \
387
vpsrld $(32 - 13), x0 ## 1, x0 ## 1; \
388
vpor x4 ## 1, x0 ## 1, x0 ## 1; \
389
vpxor x0 ## 1, x1 ## 1, x1 ## 1; \
390
vpslld $3, x2 ## 1, x4 ## 1; \
391
vpsrld $(32 - 3), x2 ## 1, x2 ## 1; \
392
vpor x4 ## 1, x2 ## 1, x2 ## 1; \
393
vpxor x2 ## 1, x1 ## 1, x1 ## 1; \
394
vpslld $13, x0 ## 2, x4 ## 2; \
395
vpsrld $(32 - 13), x0 ## 2, x0 ## 2; \
396
vpor x4 ## 2, x0 ## 2, x0 ## 2; \
397
vpxor x0 ## 2, x1 ## 2, x1 ## 2; \
398
vpslld $3, x2 ## 2, x4 ## 2; \
399
vpsrld $(32 - 3), x2 ## 2, x2 ## 2; \
400
vpor x4 ## 2, x2 ## 2, x2 ## 2; \
401
vpxor x2 ## 2, x1 ## 2, x1 ## 2; \
402
vpslld $1, x1 ## 1, x4 ## 1; \
403
vpsrld $(32 - 1), x1 ## 1, x1 ## 1; \
404
vpor x4 ## 1, x1 ## 1, x1 ## 1; \
405
vpslld $3, x0 ## 1, x4 ## 1; \
406
vpxor x2 ## 1, x3 ## 1, x3 ## 1; \
407
vpxor x4 ## 1, x3 ## 1, x3 ## 1; \
408
get_key(i, 1, RK1); \
409
vpslld $1, x1 ## 2, x4 ## 2; \
410
vpsrld $(32 - 1), x1 ## 2, x1 ## 2; \
411
vpor x4 ## 2, x1 ## 2, x1 ## 2; \
412
vpslld $3, x0 ## 2, x4 ## 2; \
413
vpxor x2 ## 2, x3 ## 2, x3 ## 2; \
414
vpxor x4 ## 2, x3 ## 2, x3 ## 2; \
415
get_key(i, 3, RK3); \
416
vpslld $7, x3 ## 1, x4 ## 1; \
417
vpsrld $(32 - 7), x3 ## 1, x3 ## 1; \
418
vpor x4 ## 1, x3 ## 1, x3 ## 1; \
419
vpslld $7, x1 ## 1, x4 ## 1; \
420
vpxor x1 ## 1, x0 ## 1, x0 ## 1; \
421
vpxor x3 ## 1, x0 ## 1, x0 ## 1; \
422
vpxor x3 ## 1, x2 ## 1, x2 ## 1; \
423
vpxor x4 ## 1, x2 ## 1, x2 ## 1; \
424
get_key(i, 0, RK0); \
425
vpslld $7, x3 ## 2, x4 ## 2; \
426
vpsrld $(32 - 7), x3 ## 2, x3 ## 2; \
427
vpor x4 ## 2, x3 ## 2, x3 ## 2; \
428
vpslld $7, x1 ## 2, x4 ## 2; \
429
vpxor x1 ## 2, x0 ## 2, x0 ## 2; \
430
vpxor x3 ## 2, x0 ## 2, x0 ## 2; \
431
vpxor x3 ## 2, x2 ## 2, x2 ## 2; \
432
vpxor x4 ## 2, x2 ## 2, x2 ## 2; \
433
get_key(i, 2, RK2); \
434
vpxor RK1, x1 ## 1, x1 ## 1; \
435
vpxor RK3, x3 ## 1, x3 ## 1; \
436
vpslld $5, x0 ## 1, x4 ## 1; \
437
vpsrld $(32 - 5), x0 ## 1, x0 ## 1; \
438
vpor x4 ## 1, x0 ## 1, x0 ## 1; \
439
vpslld $22, x2 ## 1, x4 ## 1; \
440
vpsrld $(32 - 22), x2 ## 1, x2 ## 1; \
441
vpor x4 ## 1, x2 ## 1, x2 ## 1; \
442
vpxor RK0, x0 ## 1, x0 ## 1; \
443
vpxor RK2, x2 ## 1, x2 ## 1; \
444
vpxor RK1, x1 ## 2, x1 ## 2; \
445
vpxor RK3, x3 ## 2, x3 ## 2; \
446
vpslld $5, x0 ## 2, x4 ## 2; \
447
vpsrld $(32 - 5), x0 ## 2, x0 ## 2; \
448
vpor x4 ## 2, x0 ## 2, x0 ## 2; \
449
vpslld $22, x2 ## 2, x4 ## 2; \
450
vpsrld $(32 - 22), x2 ## 2, x2 ## 2; \
451
vpor x4 ## 2, x2 ## 2, x2 ## 2; \
452
vpxor RK0, x0 ## 2, x0 ## 2; \
453
vpxor RK2, x2 ## 2, x2 ## 2;
454
455
#define KL2(x0, x1, x2, x3, x4, i) \
456
vpxor RK0, x0 ## 1, x0 ## 1; \
457
vpxor RK2, x2 ## 1, x2 ## 1; \
458
vpsrld $5, x0 ## 1, x4 ## 1; \
459
vpslld $(32 - 5), x0 ## 1, x0 ## 1; \
460
vpor x4 ## 1, x0 ## 1, x0 ## 1; \
461
vpxor RK3, x3 ## 1, x3 ## 1; \
462
vpxor RK1, x1 ## 1, x1 ## 1; \
463
vpsrld $22, x2 ## 1, x4 ## 1; \
464
vpslld $(32 - 22), x2 ## 1, x2 ## 1; \
465
vpor x4 ## 1, x2 ## 1, x2 ## 1; \
466
vpxor x3 ## 1, x2 ## 1, x2 ## 1; \
467
vpxor RK0, x0 ## 2, x0 ## 2; \
468
vpxor RK2, x2 ## 2, x2 ## 2; \
469
vpsrld $5, x0 ## 2, x4 ## 2; \
470
vpslld $(32 - 5), x0 ## 2, x0 ## 2; \
471
vpor x4 ## 2, x0 ## 2, x0 ## 2; \
472
vpxor RK3, x3 ## 2, x3 ## 2; \
473
vpxor RK1, x1 ## 2, x1 ## 2; \
474
vpsrld $22, x2 ## 2, x4 ## 2; \
475
vpslld $(32 - 22), x2 ## 2, x2 ## 2; \
476
vpor x4 ## 2, x2 ## 2, x2 ## 2; \
477
vpxor x3 ## 2, x2 ## 2, x2 ## 2; \
478
vpxor x3 ## 1, x0 ## 1, x0 ## 1; \
479
vpslld $7, x1 ## 1, x4 ## 1; \
480
vpxor x1 ## 1, x0 ## 1, x0 ## 1; \
481
vpxor x4 ## 1, x2 ## 1, x2 ## 1; \
482
vpsrld $1, x1 ## 1, x4 ## 1; \
483
vpslld $(32 - 1), x1 ## 1, x1 ## 1; \
484
vpor x4 ## 1, x1 ## 1, x1 ## 1; \
485
vpxor x3 ## 2, x0 ## 2, x0 ## 2; \
486
vpslld $7, x1 ## 2, x4 ## 2; \
487
vpxor x1 ## 2, x0 ## 2, x0 ## 2; \
488
vpxor x4 ## 2, x2 ## 2, x2 ## 2; \
489
vpsrld $1, x1 ## 2, x4 ## 2; \
490
vpslld $(32 - 1), x1 ## 2, x1 ## 2; \
491
vpor x4 ## 2, x1 ## 2, x1 ## 2; \
492
vpsrld $7, x3 ## 1, x4 ## 1; \
493
vpslld $(32 - 7), x3 ## 1, x3 ## 1; \
494
vpor x4 ## 1, x3 ## 1, x3 ## 1; \
495
vpxor x0 ## 1, x1 ## 1, x1 ## 1; \
496
vpslld $3, x0 ## 1, x4 ## 1; \
497
vpxor x4 ## 1, x3 ## 1, x3 ## 1; \
498
vpsrld $7, x3 ## 2, x4 ## 2; \
499
vpslld $(32 - 7), x3 ## 2, x3 ## 2; \
500
vpor x4 ## 2, x3 ## 2, x3 ## 2; \
501
vpxor x0 ## 2, x1 ## 2, x1 ## 2; \
502
vpslld $3, x0 ## 2, x4 ## 2; \
503
vpxor x4 ## 2, x3 ## 2, x3 ## 2; \
504
vpsrld $13, x0 ## 1, x4 ## 1; \
505
vpslld $(32 - 13), x0 ## 1, x0 ## 1; \
506
vpor x4 ## 1, x0 ## 1, x0 ## 1; \
507
vpxor x2 ## 1, x1 ## 1, x1 ## 1; \
508
vpxor x2 ## 1, x3 ## 1, x3 ## 1; \
509
vpsrld $3, x2 ## 1, x4 ## 1; \
510
vpslld $(32 - 3), x2 ## 1, x2 ## 1; \
511
vpor x4 ## 1, x2 ## 1, x2 ## 1; \
512
vpsrld $13, x0 ## 2, x4 ## 2; \
513
vpslld $(32 - 13), x0 ## 2, x0 ## 2; \
514
vpor x4 ## 2, x0 ## 2, x0 ## 2; \
515
vpxor x2 ## 2, x1 ## 2, x1 ## 2; \
516
vpxor x2 ## 2, x3 ## 2, x3 ## 2; \
517
vpsrld $3, x2 ## 2, x4 ## 2; \
518
vpslld $(32 - 3), x2 ## 2, x2 ## 2; \
519
vpor x4 ## 2, x2 ## 2, x2 ## 2;
520
521
#define S(SBOX, x0, x1, x2, x3, x4) \
522
SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
523
SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
524
SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
525
SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2);
526
527
#define SP(SBOX, x0, x1, x2, x3, x4, i) \
528
get_key(i, 0, RK0); \
529
SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
530
get_key(i, 2, RK2); \
531
SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
532
get_key(i, 3, RK3); \
533
SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
534
get_key(i, 1, RK1); \
535
SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
536
537
#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
538
vpunpckldq x1, x0, t0; \
539
vpunpckhdq x1, x0, t2; \
540
vpunpckldq x3, x2, t1; \
541
vpunpckhdq x3, x2, x3; \
542
\
543
vpunpcklqdq t1, t0, x0; \
544
vpunpckhqdq t1, t0, x1; \
545
vpunpcklqdq x3, t2, x2; \
546
vpunpckhqdq x3, t2, x3;
547
548
#define read_blocks(x0, x1, x2, x3, t0, t1, t2) \
549
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
550
551
#define write_blocks(x0, x1, x2, x3, t0, t1, t2) \
552
transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
553
554
SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
555
/* input:
556
* %rdi: ctx, CTX
557
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
558
* output:
559
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
560
*/
561
562
vpcmpeqd RNOT, RNOT, RNOT;
563
564
read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
565
read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
566
567
K2(RA, RB, RC, RD, RE, 0);
568
S(S0, RA, RB, RC, RD, RE); LK2(RC, RB, RD, RA, RE, 1);
569
S(S1, RC, RB, RD, RA, RE); LK2(RE, RD, RA, RC, RB, 2);
570
S(S2, RE, RD, RA, RC, RB); LK2(RB, RD, RE, RC, RA, 3);
571
S(S3, RB, RD, RE, RC, RA); LK2(RC, RA, RD, RB, RE, 4);
572
S(S4, RC, RA, RD, RB, RE); LK2(RA, RD, RB, RE, RC, 5);
573
S(S5, RA, RD, RB, RE, RC); LK2(RC, RA, RD, RE, RB, 6);
574
S(S6, RC, RA, RD, RE, RB); LK2(RD, RB, RA, RE, RC, 7);
575
S(S7, RD, RB, RA, RE, RC); LK2(RC, RA, RE, RD, RB, 8);
576
S(S0, RC, RA, RE, RD, RB); LK2(RE, RA, RD, RC, RB, 9);
577
S(S1, RE, RA, RD, RC, RB); LK2(RB, RD, RC, RE, RA, 10);
578
S(S2, RB, RD, RC, RE, RA); LK2(RA, RD, RB, RE, RC, 11);
579
S(S3, RA, RD, RB, RE, RC); LK2(RE, RC, RD, RA, RB, 12);
580
S(S4, RE, RC, RD, RA, RB); LK2(RC, RD, RA, RB, RE, 13);
581
S(S5, RC, RD, RA, RB, RE); LK2(RE, RC, RD, RB, RA, 14);
582
S(S6, RE, RC, RD, RB, RA); LK2(RD, RA, RC, RB, RE, 15);
583
S(S7, RD, RA, RC, RB, RE); LK2(RE, RC, RB, RD, RA, 16);
584
S(S0, RE, RC, RB, RD, RA); LK2(RB, RC, RD, RE, RA, 17);
585
S(S1, RB, RC, RD, RE, RA); LK2(RA, RD, RE, RB, RC, 18);
586
S(S2, RA, RD, RE, RB, RC); LK2(RC, RD, RA, RB, RE, 19);
587
S(S3, RC, RD, RA, RB, RE); LK2(RB, RE, RD, RC, RA, 20);
588
S(S4, RB, RE, RD, RC, RA); LK2(RE, RD, RC, RA, RB, 21);
589
S(S5, RE, RD, RC, RA, RB); LK2(RB, RE, RD, RA, RC, 22);
590
S(S6, RB, RE, RD, RA, RC); LK2(RD, RC, RE, RA, RB, 23);
591
S(S7, RD, RC, RE, RA, RB); LK2(RB, RE, RA, RD, RC, 24);
592
S(S0, RB, RE, RA, RD, RC); LK2(RA, RE, RD, RB, RC, 25);
593
S(S1, RA, RE, RD, RB, RC); LK2(RC, RD, RB, RA, RE, 26);
594
S(S2, RC, RD, RB, RA, RE); LK2(RE, RD, RC, RA, RB, 27);
595
S(S3, RE, RD, RC, RA, RB); LK2(RA, RB, RD, RE, RC, 28);
596
S(S4, RA, RB, RD, RE, RC); LK2(RB, RD, RE, RC, RA, 29);
597
S(S5, RB, RD, RE, RC, RA); LK2(RA, RB, RD, RC, RE, 30);
598
S(S6, RA, RB, RD, RC, RE); LK2(RD, RE, RB, RC, RA, 31);
599
S(S7, RD, RE, RB, RC, RA); K2(RA, RB, RC, RD, RE, 32);
600
601
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
602
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
603
604
RET;
605
SYM_FUNC_END(__serpent_enc_blk8_avx)
606
607
SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
608
/* input:
609
* %rdi: ctx, CTX
610
* RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
611
* output:
612
* RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2: decrypted blocks
613
*/
614
615
vpcmpeqd RNOT, RNOT, RNOT;
616
617
read_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
618
read_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
619
620
K2(RA, RB, RC, RD, RE, 32);
621
SP(SI7, RA, RB, RC, RD, RE, 31); KL2(RB, RD, RA, RE, RC, 31);
622
SP(SI6, RB, RD, RA, RE, RC, 30); KL2(RA, RC, RE, RB, RD, 30);
623
SP(SI5, RA, RC, RE, RB, RD, 29); KL2(RC, RD, RA, RE, RB, 29);
624
SP(SI4, RC, RD, RA, RE, RB, 28); KL2(RC, RA, RB, RE, RD, 28);
625
SP(SI3, RC, RA, RB, RE, RD, 27); KL2(RB, RC, RD, RE, RA, 27);
626
SP(SI2, RB, RC, RD, RE, RA, 26); KL2(RC, RA, RE, RD, RB, 26);
627
SP(SI1, RC, RA, RE, RD, RB, 25); KL2(RB, RA, RE, RD, RC, 25);
628
SP(SI0, RB, RA, RE, RD, RC, 24); KL2(RE, RC, RA, RB, RD, 24);
629
SP(SI7, RE, RC, RA, RB, RD, 23); KL2(RC, RB, RE, RD, RA, 23);
630
SP(SI6, RC, RB, RE, RD, RA, 22); KL2(RE, RA, RD, RC, RB, 22);
631
SP(SI5, RE, RA, RD, RC, RB, 21); KL2(RA, RB, RE, RD, RC, 21);
632
SP(SI4, RA, RB, RE, RD, RC, 20); KL2(RA, RE, RC, RD, RB, 20);
633
SP(SI3, RA, RE, RC, RD, RB, 19); KL2(RC, RA, RB, RD, RE, 19);
634
SP(SI2, RC, RA, RB, RD, RE, 18); KL2(RA, RE, RD, RB, RC, 18);
635
SP(SI1, RA, RE, RD, RB, RC, 17); KL2(RC, RE, RD, RB, RA, 17);
636
SP(SI0, RC, RE, RD, RB, RA, 16); KL2(RD, RA, RE, RC, RB, 16);
637
SP(SI7, RD, RA, RE, RC, RB, 15); KL2(RA, RC, RD, RB, RE, 15);
638
SP(SI6, RA, RC, RD, RB, RE, 14); KL2(RD, RE, RB, RA, RC, 14);
639
SP(SI5, RD, RE, RB, RA, RC, 13); KL2(RE, RC, RD, RB, RA, 13);
640
SP(SI4, RE, RC, RD, RB, RA, 12); KL2(RE, RD, RA, RB, RC, 12);
641
SP(SI3, RE, RD, RA, RB, RC, 11); KL2(RA, RE, RC, RB, RD, 11);
642
SP(SI2, RA, RE, RC, RB, RD, 10); KL2(RE, RD, RB, RC, RA, 10);
643
SP(SI1, RE, RD, RB, RC, RA, 9); KL2(RA, RD, RB, RC, RE, 9);
644
SP(SI0, RA, RD, RB, RC, RE, 8); KL2(RB, RE, RD, RA, RC, 8);
645
SP(SI7, RB, RE, RD, RA, RC, 7); KL2(RE, RA, RB, RC, RD, 7);
646
SP(SI6, RE, RA, RB, RC, RD, 6); KL2(RB, RD, RC, RE, RA, 6);
647
SP(SI5, RB, RD, RC, RE, RA, 5); KL2(RD, RA, RB, RC, RE, 5);
648
SP(SI4, RD, RA, RB, RC, RE, 4); KL2(RD, RB, RE, RC, RA, 4);
649
SP(SI3, RD, RB, RE, RC, RA, 3); KL2(RE, RD, RA, RC, RB, 3);
650
SP(SI2, RE, RD, RA, RC, RB, 2); KL2(RD, RB, RC, RA, RE, 2);
651
SP(SI1, RD, RB, RC, RA, RE, 1); KL2(RE, RB, RC, RA, RD, 1);
652
S(SI0, RE, RB, RC, RA, RD); K2(RC, RD, RB, RE, RA, 0);
653
654
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
655
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
656
657
RET;
658
SYM_FUNC_END(__serpent_dec_blk8_avx)
659
660
SYM_TYPED_FUNC_START(serpent_ecb_enc_8way_avx)
661
/* input:
662
* %rdi: ctx, CTX
663
* %rsi: dst
664
* %rdx: src
665
*/
666
FRAME_BEGIN
667
668
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
669
670
call __serpent_enc_blk8_avx;
671
672
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
673
674
FRAME_END
675
RET;
676
SYM_FUNC_END(serpent_ecb_enc_8way_avx)
677
678
SYM_TYPED_FUNC_START(serpent_ecb_dec_8way_avx)
679
/* input:
680
* %rdi: ctx, CTX
681
* %rsi: dst
682
* %rdx: src
683
*/
684
FRAME_BEGIN
685
686
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
687
688
call __serpent_dec_blk8_avx;
689
690
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
691
692
FRAME_END
693
RET;
694
SYM_FUNC_END(serpent_ecb_dec_8way_avx)
695
696
SYM_TYPED_FUNC_START(serpent_cbc_dec_8way_avx)
697
/* input:
698
* %rdi: ctx, CTX
699
* %rsi: dst
700
* %rdx: src
701
*/
702
FRAME_BEGIN
703
704
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
705
706
call __serpent_dec_blk8_avx;
707
708
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
709
710
FRAME_END
711
RET;
712
SYM_FUNC_END(serpent_cbc_dec_8way_avx)
713
714