Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/crypto/aes-gcm-vaes-avx512.S
54866 views
1
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2
//
3
// AES-GCM implementation for x86_64 CPUs that support the following CPU
4
// features: VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2
5
//
6
// Copyright 2024 Google LLC
7
//
8
// Author: Eric Biggers <ebiggers@google.com>
9
//
10
//------------------------------------------------------------------------------
11
//
12
// This file is dual-licensed, meaning that you can use it under your choice of
13
// either of the following two licenses:
14
//
15
// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy
16
// of the License at
17
//
18
// http://www.apache.org/licenses/LICENSE-2.0
19
//
20
// Unless required by applicable law or agreed to in writing, software
21
// distributed under the License is distributed on an "AS IS" BASIS,
22
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23
// See the License for the specific language governing permissions and
24
// limitations under the License.
25
//
26
// or
27
//
28
// Redistribution and use in source and binary forms, with or without
29
// modification, are permitted provided that the following conditions are met:
30
//
31
// 1. Redistributions of source code must retain the above copyright notice,
32
// this list of conditions and the following disclaimer.
33
//
34
// 2. Redistributions in binary form must reproduce the above copyright
35
// notice, this list of conditions and the following disclaimer in the
36
// documentation and/or other materials provided with the distribution.
37
//
38
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
39
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
42
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
43
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
44
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
45
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
46
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
48
// POSSIBILITY OF SUCH DAMAGE.
49
50
#include <linux/linkage.h>
51
52
.section .rodata
53
.p2align 6
54
55
// A shuffle mask that reflects the bytes of 16-byte blocks
56
.Lbswap_mask:
57
.octa 0x000102030405060708090a0b0c0d0e0f
58
59
// This is the GHASH reducing polynomial without its constant term, i.e.
60
// x^128 + x^7 + x^2 + x, represented using the backwards mapping
61
// between bits and polynomial coefficients.
62
//
63
// Alternatively, it can be interpreted as the naturally-ordered
64
// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the
65
// "reversed" GHASH reducing polynomial without its x^128 term.
66
.Lgfpoly:
67
.octa 0xc2000000000000000000000000000001
68
69
// Same as above, but with the (1 << 64) bit set.
70
.Lgfpoly_and_internal_carrybit:
71
.octa 0xc2000000000000010000000000000001
72
73
// Values needed to prepare the initial vector of counter blocks.
74
.Lctr_pattern:
75
.octa 0
76
.octa 1
77
.octa 2
78
.octa 3
79
80
// The number of AES blocks per vector, as a 128-bit value.
81
.Linc_4blocks:
82
.octa 4
83
84
// Number of powers of the hash key stored in the key struct. The powers are
85
// stored from highest (H^NUM_H_POWERS) to lowest (H^1).
86
#define NUM_H_POWERS 16
87
88
// Offset to AES key length (in bytes) in the key struct
89
#define OFFSETOF_AESKEYLEN 0
90
91
// Offset to AES round keys in the key struct
92
#define OFFSETOF_AESROUNDKEYS 16
93
94
// Offset to start of hash key powers array in the key struct
95
#define OFFSETOF_H_POWERS 320
96
97
// Offset to end of hash key powers array in the key struct.
98
//
99
// This is immediately followed by three zeroized padding blocks, which are
100
// included so that partial vectors can be handled more easily. E.g. if two
101
// blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most padding
102
// blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
103
#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
104
105
.text
106
107
// The _ghash_mul_step macro does one step of GHASH multiplication of the
108
// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the
109
// reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the
110
// same size as \a and \b. To complete all steps, this must invoked with \i=0
111
// through \i=9. The division into steps allows users of this macro to
112
// optionally interleave the computation with other instructions. Users of this
113
// macro must preserve the parameter registers across steps.
114
//
115
// The multiplications are done in GHASH's representation of the finite field
116
// GF(2^128). Elements of GF(2^128) are represented as binary polynomials
117
// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial
118
// G. The GCM specification uses G = x^128 + x^7 + x^2 + x + 1. Addition is
119
// just XOR, while multiplication is more complex and has two parts: (a) do
120
// carryless multiplication of two 128-bit input polynomials to get a 256-bit
121
// intermediate product polynomial, and (b) reduce the intermediate product to
122
// 128 bits by adding multiples of G that cancel out terms in it. (Adding
123
// multiples of G doesn't change which field element the polynomial represents.)
124
//
125
// Unfortunately, the GCM specification maps bits to/from polynomial
126
// coefficients backwards from the natural order. In each byte it specifies the
127
// highest bit to be the lowest order polynomial coefficient, *not* the highest!
128
// This makes it nontrivial to work with the GHASH polynomials. We could
129
// reflect the bits, but x86 doesn't have an instruction that does that.
130
//
131
// Instead, we operate on the values without bit-reflecting them. This *mostly*
132
// just works, since XOR and carryless multiplication are symmetric with respect
133
// to bit order, but it has some consequences. First, due to GHASH's byte
134
// order, by skipping bit reflection, *byte* reflection becomes necessary to
135
// give the polynomial terms a consistent order. E.g., considering an N-bit
136
// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0
137
// through N-1 of the byte-reflected value represent the coefficients of x^(N-1)
138
// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value
139
// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked
140
// with. Fortunately, x86's vpshufb instruction can do byte reflection.
141
//
142
// Second, forgoing the bit reflection causes an extra multiple of x (still
143
// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each
144
// multiplication. This is because an M-bit by N-bit carryless multiplication
145
// really produces a (M+N-1)-bit product, but in practice it's zero-extended to
146
// M+N bits. In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits
147
// to polynomial coefficients backwards, this zero-extension actually changes
148
// the product by introducing an extra factor of x. Therefore, users of this
149
// macro must ensure that one of the inputs has an extra factor of x^-1, i.e.
150
// the multiplicative inverse of x, to cancel out the extra x.
151
//
152
// Third, the backwards coefficients convention is just confusing to work with,
153
// since it makes "low" and "high" in the polynomial math mean the opposite of
154
// their normal meaning in computer programming. This can be solved by using an
155
// alternative interpretation: the polynomial coefficients are understood to be
156
// in the natural order, and the multiplication is actually \a * \b * x^-128 mod
157
// x^128 + x^127 + x^126 + x^121 + 1. This doesn't change the inputs, outputs,
158
// or the implementation at all; it just changes the mathematical interpretation
159
// of what each instruction is doing. Starting from here, we'll use this
160
// alternative interpretation, as it's easier to understand the code that way.
161
//
162
// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>
163
// 128-bit carryless multiplication, so we break the 128 x 128 multiplication
164
// into parts as follows (the _L and _H suffixes denote low and high 64 bits):
165
//
166
// LO = a_L * b_L
167
// MI = (a_L * b_H) + (a_H * b_L)
168
// HI = a_H * b_H
169
//
170
// The 256-bit product is x^128*HI + x^64*MI + LO. LO, MI, and HI are 128-bit.
171
// Note that MI "overlaps" with LO and HI. We don't consolidate MI into LO and
172
// HI right away, since the way the reduction works makes that unnecessary.
173
//
174
// For the reduction, we cancel out the low 128 bits by adding multiples of G =
175
// x^128 + x^127 + x^126 + x^121 + 1. This is done by two iterations, each of
176
// which cancels out the next lowest 64 bits. Consider a value x^64*A + B,
177
// where A and B are 128-bit. Adding B_L*G to that value gives:
178
//
179
// x^64*A + B + B_L*G
180
// = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)
181
// = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L
182
// = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L
183
// = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))
184
//
185
// So: if we sum A, B with its halves swapped, and the low half of B times x^63
186
// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the
187
// original value x^64*A + B. I.e., the low 64 bits got canceled out.
188
//
189
// We just need to apply this twice: first to fold LO into MI, and second to
190
// fold the updated MI into HI.
191
//
192
// The needed three-argument XORs are done using the vpternlogd instruction with
193
// immediate 0x96, since this is faster than two vpxord instructions.
194
//
195
// A potential optimization, assuming that b is fixed per-key (if a is fixed
196
// per-key it would work the other way around), is to use one iteration of the
197
// reduction described above to precompute a value c such that x^64*c = b mod G,
198
// and then multiply a_L by c (and implicitly by x^64) instead of by b:
199
//
200
// MI = (a_L * c_L) + (a_H * b_L)
201
// HI = (a_L * c_H) + (a_H * b_H)
202
//
203
// This would eliminate the LO part of the intermediate product, which would
204
// eliminate the need to fold LO into MI. This would save two instructions,
205
// including a vpclmulqdq. However, we currently don't use this optimization
206
// because it would require twice as many per-key precomputed values.
207
//
208
// Using Karatsuba multiplication instead of "schoolbook" multiplication
209
// similarly would save a vpclmulqdq but does not seem to be worth it.
210
.macro _ghash_mul_step i, a, b, dst, gfpoly, t0, t1, t2
211
.if \i == 0
212
vpclmulqdq $0x00, \a, \b, \t0 // LO = a_L * b_L
213
vpclmulqdq $0x01, \a, \b, \t1 // MI_0 = a_L * b_H
214
.elseif \i == 1
215
vpclmulqdq $0x10, \a, \b, \t2 // MI_1 = a_H * b_L
216
.elseif \i == 2
217
vpxord \t2, \t1, \t1 // MI = MI_0 + MI_1
218
.elseif \i == 3
219
vpclmulqdq $0x01, \t0, \gfpoly, \t2 // LO_L*(x^63 + x^62 + x^57)
220
.elseif \i == 4
221
vpshufd $0x4e, \t0, \t0 // Swap halves of LO
222
.elseif \i == 5
223
vpternlogd $0x96, \t2, \t0, \t1 // Fold LO into MI
224
.elseif \i == 6
225
vpclmulqdq $0x11, \a, \b, \dst // HI = a_H * b_H
226
.elseif \i == 7
227
vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)
228
.elseif \i == 8
229
vpshufd $0x4e, \t1, \t1 // Swap halves of MI
230
.elseif \i == 9
231
vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI
232
.endif
233
.endm
234
235
// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store
236
// the reduced products in \dst. See _ghash_mul_step for full explanation.
237
.macro _ghash_mul a, b, dst, gfpoly, t0, t1, t2
238
.irp i, 0,1,2,3,4,5,6,7,8,9
239
_ghash_mul_step \i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2
240
.endr
241
.endm
242
243
// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the
244
// *unreduced* products to \lo, \mi, and \hi.
245
.macro _ghash_mul_noreduce a, b, lo, mi, hi, t0, t1, t2, t3
246
vpclmulqdq $0x00, \a, \b, \t0 // a_L * b_L
247
vpclmulqdq $0x01, \a, \b, \t1 // a_L * b_H
248
vpclmulqdq $0x10, \a, \b, \t2 // a_H * b_L
249
vpclmulqdq $0x11, \a, \b, \t3 // a_H * b_H
250
vpxord \t0, \lo, \lo
251
vpternlogd $0x96, \t2, \t1, \mi
252
vpxord \t3, \hi, \hi
253
.endm
254
255
// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit
256
// reduced products in \hi. See _ghash_mul_step for explanation of reduction.
257
.macro _ghash_reduce lo, mi, hi, gfpoly, t0
258
vpclmulqdq $0x01, \lo, \gfpoly, \t0
259
vpshufd $0x4e, \lo, \lo
260
vpternlogd $0x96, \t0, \lo, \mi
261
vpclmulqdq $0x01, \mi, \gfpoly, \t0
262
vpshufd $0x4e, \mi, \mi
263
vpternlogd $0x96, \t0, \mi, \hi
264
.endm
265
266
// This is a specialized version of _ghash_mul that computes \a * \a, i.e. it
267
// squares \a. It skips computing MI = (a_L * a_H) + (a_H * a_L) = 0.
268
.macro _ghash_square a, dst, gfpoly, t0, t1
269
vpclmulqdq $0x00, \a, \a, \t0 // LO = a_L * a_L
270
vpclmulqdq $0x11, \a, \a, \dst // HI = a_H * a_H
271
vpclmulqdq $0x01, \t0, \gfpoly, \t1 // LO_L*(x^63 + x^62 + x^57)
272
vpshufd $0x4e, \t0, \t0 // Swap halves of LO
273
vpxord \t0, \t1, \t1 // Fold LO into MI
274
vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)
275
vpshufd $0x4e, \t1, \t1 // Swap halves of MI
276
vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI
277
.endm
278
279
// void aes_gcm_precompute_vaes_avx512(struct aes_gcm_key_vaes_avx512 *key);
280
//
281
// Given the expanded AES key |key->base.aes_key|, derive the GHASH subkey and
282
// initialize |key->h_powers| and |key->padding|.
283
SYM_FUNC_START(aes_gcm_precompute_vaes_avx512)
284
285
// Function arguments
286
.set KEY, %rdi
287
288
// Additional local variables.
289
// %zmm[0-2] and %rax are used as temporaries.
290
.set POWERS_PTR, %rsi
291
.set RNDKEYLAST_PTR, %rdx
292
.set H_CUR, %zmm3
293
.set H_CUR_YMM, %ymm3
294
.set H_CUR_XMM, %xmm3
295
.set H_INC, %zmm4
296
.set H_INC_YMM, %ymm4
297
.set H_INC_XMM, %xmm4
298
.set GFPOLY, %zmm5
299
.set GFPOLY_YMM, %ymm5
300
.set GFPOLY_XMM, %xmm5
301
302
// Get pointer to lowest set of key powers (located at end of array).
303
lea OFFSETOFEND_H_POWERS-64(KEY), POWERS_PTR
304
305
// Encrypt an all-zeroes block to get the raw hash subkey.
306
movl OFFSETOF_AESKEYLEN(KEY), %eax
307
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,%rax,4), RNDKEYLAST_PTR
308
vmovdqu OFFSETOF_AESROUNDKEYS(KEY), %xmm0
309
add $OFFSETOF_AESROUNDKEYS+16, KEY
310
1:
311
vaesenc (KEY), %xmm0, %xmm0
312
add $16, KEY
313
cmp KEY, RNDKEYLAST_PTR
314
jne 1b
315
vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0
316
317
// Reflect the bytes of the raw hash subkey.
318
vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM
319
320
// Zeroize the padding blocks.
321
vpxor %xmm0, %xmm0, %xmm0
322
vmovdqu %ymm0, 64(POWERS_PTR)
323
vmovdqu %xmm0, 64+2*16(POWERS_PTR)
324
325
// Finish preprocessing the first key power, H^1. Since this GHASH
326
// implementation operates directly on values with the backwards bit
327
// order specified by the GCM standard, it's necessary to preprocess the
328
// raw key as follows. First, reflect its bytes. Second, multiply it
329
// by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards
330
// interpretation of polynomial coefficients), which can also be
331
// interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121
332
// + 1 using the alternative, natural interpretation of polynomial
333
// coefficients. For details, see the comment above _ghash_mul_step.
334
//
335
// Either way, for the multiplication the concrete operation performed
336
// is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2
337
// << 120) | 1 if a 1 bit was carried out. However, there's no 128-bit
338
// wide shift instruction, so instead double each of the two 64-bit
339
// halves and incorporate the internal carry bit into the value XOR'd.
340
vpshufd $0xd3, H_CUR_XMM, %xmm0
341
vpsrad $31, %xmm0, %xmm0
342
vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
343
// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
344
vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
345
346
// Load the gfpoly constant.
347
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
348
349
// Square H^1 to get H^2.
350
//
351
// Note that as with H^1, all higher key powers also need an extra
352
// factor of x^-1 (or x using the natural interpretation). Nothing
353
// special needs to be done to make this happen, though: H^1 * H^1 would
354
// end up with two factors of x^-1, but the multiplication consumes one.
355
// So the product H^2 ends up with the desired one factor of x^-1.
356
_ghash_square H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1
357
358
// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].
359
vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM
360
vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM
361
362
// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].
363
_ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \
364
%ymm0, %ymm1, %ymm2
365
vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR
366
vshufi64x2 $0, H_INC, H_INC, H_INC
367
368
// Store the lowest set of key powers.
369
vmovdqu8 H_CUR, (POWERS_PTR)
370
371
// Compute and store the remaining key powers.
372
// Repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
373
// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].
374
mov $3, %eax
375
.Lprecompute_next:
376
sub $64, POWERS_PTR
377
_ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, %zmm0, %zmm1, %zmm2
378
vmovdqu8 H_CUR, (POWERS_PTR)
379
dec %eax
380
jnz .Lprecompute_next
381
382
vzeroupper // This is needed after using ymm or zmm registers.
383
RET
384
SYM_FUNC_END(aes_gcm_precompute_vaes_avx512)
385
386
// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store
387
// the result in \dst_xmm. This implicitly zeroizes the other lanes of dst.
388
.macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm
389
vextracti32x4 $1, \src, \t0_xmm
390
vextracti32x4 $2, \src, \t1_xmm
391
vextracti32x4 $3, \src, \t2_xmm
392
vpxord \t0_xmm, \src_xmm, \dst_xmm
393
vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm
394
.endm
395
396
// Do one step of the GHASH update of the data blocks given in the vector
397
// registers GHASHDATA[0-3]. \i specifies the step to do, 0 through 9. The
398
// division into steps allows users of this macro to optionally interleave the
399
// computation with other instructions. This macro uses the vector register
400
// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;
401
// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and
402
// GHASHTMP[0-2] as temporaries. This macro handles the byte-reflection of the
403
// data blocks. The parameter registers must be preserved across steps.
404
//
405
// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +
406
// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the
407
// operations are vectorized operations on 512-bit vectors of 128-bit blocks.
408
// The vectorized terms correspond to the following non-vectorized terms:
409
//
410
// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^16*(blk0 + GHASH_ACC_XMM),
411
// H^15*(blk1 + 0), H^14*(blk2 + 0), and H^13*(blk3 + 0)
412
// H_POW3*GHASHDATA1 => H^12*blk4, H^11*blk5, H^10*blk6, and H^9*blk7
413
// H_POW2*GHASHDATA2 => H^8*blk8, H^7*blk9, H^6*blk10, and H^5*blk11
414
// H_POW1*GHASHDATA3 => H^4*blk12, H^3*blk13, H^2*blk14, and H^1*blk15
415
//
416
// More concretely, this code does:
417
// - Do vectorized "schoolbook" multiplications to compute the intermediate
418
// 256-bit product of each block and its corresponding hash key power.
419
// - Sum (XOR) the intermediate 256-bit products across vectors.
420
// - Do a vectorized reduction of these 256-bit intermediate values to
421
// 128-bits each.
422
// - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.
423
//
424
// See _ghash_mul_step for the full explanation of the operations performed for
425
// each individual finite field multiplication and reduction.
426
.macro _ghash_step_4x i
427
.if \i == 0
428
vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
429
vpxord GHASH_ACC, GHASHDATA0, GHASHDATA0
430
vpshufb BSWAP_MASK, GHASHDATA1, GHASHDATA1
431
vpshufb BSWAP_MASK, GHASHDATA2, GHASHDATA2
432
.elseif \i == 1
433
vpshufb BSWAP_MASK, GHASHDATA3, GHASHDATA3
434
vpclmulqdq $0x00, H_POW4, GHASHDATA0, GHASH_ACC // LO_0
435
vpclmulqdq $0x00, H_POW3, GHASHDATA1, GHASHTMP0 // LO_1
436
vpclmulqdq $0x00, H_POW2, GHASHDATA2, GHASHTMP1 // LO_2
437
.elseif \i == 2
438
vpxord GHASHTMP0, GHASH_ACC, GHASH_ACC // sum(LO_{1,0})
439
vpclmulqdq $0x00, H_POW1, GHASHDATA3, GHASHTMP2 // LO_3
440
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC // LO = sum(LO_{3,2,1,0})
441
vpclmulqdq $0x01, H_POW4, GHASHDATA0, GHASHTMP0 // MI_0
442
.elseif \i == 3
443
vpclmulqdq $0x01, H_POW3, GHASHDATA1, GHASHTMP1 // MI_1
444
vpclmulqdq $0x01, H_POW2, GHASHDATA2, GHASHTMP2 // MI_2
445
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{2,1,0})
446
vpclmulqdq $0x01, H_POW1, GHASHDATA3, GHASHTMP1 // MI_3
447
.elseif \i == 4
448
vpclmulqdq $0x10, H_POW4, GHASHDATA0, GHASHTMP2 // MI_4
449
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{4,3,2,1,0})
450
vpclmulqdq $0x10, H_POW3, GHASHDATA1, GHASHTMP1 // MI_5
451
vpclmulqdq $0x10, H_POW2, GHASHDATA2, GHASHTMP2 // MI_6
452
.elseif \i == 5
453
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{6,5,4,3,2,1,0})
454
vpclmulqdq $0x01, GHASH_ACC, GFPOLY, GHASHTMP2 // LO_L*(x^63 + x^62 + x^57)
455
vpclmulqdq $0x10, H_POW1, GHASHDATA3, GHASHTMP1 // MI_7
456
vpxord GHASHTMP1, GHASHTMP0, GHASHTMP0 // MI = sum(MI_{7,6,5,4,3,2,1,0})
457
.elseif \i == 6
458
vpshufd $0x4e, GHASH_ACC, GHASH_ACC // Swap halves of LO
459
vpclmulqdq $0x11, H_POW4, GHASHDATA0, GHASHDATA0 // HI_0
460
vpclmulqdq $0x11, H_POW3, GHASHDATA1, GHASHDATA1 // HI_1
461
vpclmulqdq $0x11, H_POW2, GHASHDATA2, GHASHDATA2 // HI_2
462
.elseif \i == 7
463
vpternlogd $0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0 // Fold LO into MI
464
vpclmulqdq $0x11, H_POW1, GHASHDATA3, GHASHDATA3 // HI_3
465
vpternlogd $0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})
466
vpclmulqdq $0x01, GHASHTMP0, GFPOLY, GHASHTMP1 // MI_L*(x^63 + x^62 + x^57)
467
.elseif \i == 8
468
vpxord GHASHDATA3, GHASHDATA0, GHASH_ACC // HI = sum(HI_{3,2,1,0})
469
vpshufd $0x4e, GHASHTMP0, GHASHTMP0 // Swap halves of MI
470
vpternlogd $0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC // Fold MI into HI
471
.elseif \i == 9
472
_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
473
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
474
.endif
475
.endm
476
477
// Update GHASH with four vectors of data blocks. See _ghash_step_4x for full
478
// explanation.
479
.macro _ghash_4x
480
.irp i, 0,1,2,3,4,5,6,7,8,9
481
_ghash_step_4x \i
482
.endr
483
.endm
484
485
// void aes_gcm_aad_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
486
// u8 ghash_acc[16],
487
// const u8 *aad, int aadlen);
488
//
489
// This function processes the AAD (Additional Authenticated Data) in GCM.
490
// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
491
// data given by |aad| and |aadlen|. On the first call, |ghash_acc| must be all
492
// zeroes. |aadlen| must be a multiple of 16, except on the last call where it
493
// can be any length. The caller must do any buffering needed to ensure this.
494
//
495
// This handles large amounts of AAD efficiently, while also keeping overhead
496
// low for small amounts which is the common case. TLS and IPsec use less than
497
// one block of AAD, but (uncommonly) other use cases may use much more.
498
SYM_FUNC_START(aes_gcm_aad_update_vaes_avx512)
499
500
// Function arguments
501
.set KEY, %rdi
502
.set GHASH_ACC_PTR, %rsi
503
.set AAD, %rdx
504
.set AADLEN, %ecx
505
.set AADLEN64, %rcx // Zero-extend AADLEN before using!
506
507
// Additional local variables.
508
// %rax and %k1 are used as temporary registers.
509
.set GHASHDATA0, %zmm0
510
.set GHASHDATA0_XMM, %xmm0
511
.set GHASHDATA1, %zmm1
512
.set GHASHDATA1_XMM, %xmm1
513
.set GHASHDATA2, %zmm2
514
.set GHASHDATA2_XMM, %xmm2
515
.set GHASHDATA3, %zmm3
516
.set BSWAP_MASK, %zmm4
517
.set BSWAP_MASK_XMM, %xmm4
518
.set GHASH_ACC, %zmm5
519
.set GHASH_ACC_XMM, %xmm5
520
.set H_POW4, %zmm6
521
.set H_POW3, %zmm7
522
.set H_POW2, %zmm8
523
.set H_POW1, %zmm9
524
.set H_POW1_XMM, %xmm9
525
.set GFPOLY, %zmm10
526
.set GFPOLY_XMM, %xmm10
527
.set GHASHTMP0, %zmm11
528
.set GHASHTMP1, %zmm12
529
.set GHASHTMP2, %zmm13
530
531
// Load the GHASH accumulator.
532
vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
533
534
// Check for the common case of AADLEN <= 16, as well as AADLEN == 0.
535
cmp $16, AADLEN
536
jg .Laad_more_than_16bytes
537
test AADLEN, AADLEN
538
jz .Laad_done
539
540
// Fast path: update GHASH with 1 <= AADLEN <= 16 bytes of AAD.
541
vmovdqu .Lbswap_mask(%rip), BSWAP_MASK_XMM
542
vmovdqu .Lgfpoly(%rip), GFPOLY_XMM
543
mov $-1, %eax
544
bzhi AADLEN, %eax, %eax
545
kmovd %eax, %k1
546
vmovdqu8 (AAD), GHASHDATA0_XMM{%k1}{z}
547
vmovdqu OFFSETOFEND_H_POWERS-16(KEY), H_POW1_XMM
548
vpshufb BSWAP_MASK_XMM, GHASHDATA0_XMM, GHASHDATA0_XMM
549
vpxor GHASHDATA0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
550
_ghash_mul H_POW1_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM, GFPOLY_XMM, \
551
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
552
jmp .Laad_done
553
554
.Laad_more_than_16bytes:
555
vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK
556
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
557
558
// If AADLEN >= 256, update GHASH with 256 bytes of AAD at a time.
559
sub $256, AADLEN
560
jl .Laad_loop_4x_done
561
vmovdqu8 OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
562
vmovdqu8 OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
563
vmovdqu8 OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
564
vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
565
.Laad_loop_4x:
566
vmovdqu8 0*64(AAD), GHASHDATA0
567
vmovdqu8 1*64(AAD), GHASHDATA1
568
vmovdqu8 2*64(AAD), GHASHDATA2
569
vmovdqu8 3*64(AAD), GHASHDATA3
570
_ghash_4x
571
add $256, AAD
572
sub $256, AADLEN
573
jge .Laad_loop_4x
574
.Laad_loop_4x_done:
575
576
// If AADLEN >= 64, update GHASH with 64 bytes of AAD at a time.
577
add $192, AADLEN
578
jl .Laad_loop_1x_done
579
vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
580
.Laad_loop_1x:
581
vmovdqu8 (AAD), GHASHDATA0
582
vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
583
vpxord GHASHDATA0, GHASH_ACC, GHASH_ACC
584
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
585
GHASHDATA0, GHASHDATA1, GHASHDATA2
586
_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
587
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
588
add $64, AAD
589
sub $64, AADLEN
590
jge .Laad_loop_1x
591
.Laad_loop_1x_done:
592
593
// Update GHASH with the remaining 0 <= AADLEN < 64 bytes of AAD.
594
add $64, AADLEN
595
jz .Laad_done
596
mov $-1, %rax
597
bzhi AADLEN64, %rax, %rax
598
kmovq %rax, %k1
599
vmovdqu8 (AAD), GHASHDATA0{%k1}{z}
600
neg AADLEN64
601
and $~15, AADLEN64 // -round_up(AADLEN, 16)
602
vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
603
vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
604
vpxord GHASHDATA0, GHASH_ACC, GHASH_ACC
605
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
606
GHASHDATA0, GHASHDATA1, GHASHDATA2
607
_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
608
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
609
610
.Laad_done:
611
// Store the updated GHASH accumulator back to memory.
612
vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
613
614
vzeroupper // This is needed after using ymm or zmm registers.
615
RET
616
SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
617
618
// Do one non-last round of AES encryption on the blocks in %zmm[0-3] using the
619
// round key that has been broadcast to all 128-bit lanes of \round_key.
620
.macro _vaesenc_4x round_key
621
vaesenc \round_key, %zmm0, %zmm0
622
vaesenc \round_key, %zmm1, %zmm1
623
vaesenc \round_key, %zmm2, %zmm2
624
vaesenc \round_key, %zmm3, %zmm3
625
.endm
626
627
// Start the AES encryption of four vectors of counter blocks.
628
.macro _ctr_begin_4x
629
630
// Increment LE_CTR four times to generate four vectors of little-endian
631
// counter blocks, swap each to big-endian, and store them in %zmm[0-3].
632
vpshufb BSWAP_MASK, LE_CTR, %zmm0
633
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
634
vpshufb BSWAP_MASK, LE_CTR, %zmm1
635
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
636
vpshufb BSWAP_MASK, LE_CTR, %zmm2
637
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
638
vpshufb BSWAP_MASK, LE_CTR, %zmm3
639
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
640
641
// AES "round zero": XOR in the zero-th round key.
642
vpxord RNDKEY0, %zmm0, %zmm0
643
vpxord RNDKEY0, %zmm1, %zmm1
644
vpxord RNDKEY0, %zmm2, %zmm2
645
vpxord RNDKEY0, %zmm3, %zmm3
646
.endm
647
648
// Do the last AES round for four vectors of counter blocks %zmm[0-3], XOR
649
// source data with the resulting keystream, and write the result to DST and
650
// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)
651
.macro _aesenclast_and_xor_4x
652
// XOR the source data with the last round key, saving the result in
653
// GHASHDATA[0-3]. This reduces latency by taking advantage of the
654
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
655
vpxord 0*64(SRC), RNDKEYLAST, GHASHDATA0
656
vpxord 1*64(SRC), RNDKEYLAST, GHASHDATA1
657
vpxord 2*64(SRC), RNDKEYLAST, GHASHDATA2
658
vpxord 3*64(SRC), RNDKEYLAST, GHASHDATA3
659
660
// Do the last AES round. This handles the XOR with the source data
661
// too, as per the optimization described above.
662
vaesenclast GHASHDATA0, %zmm0, GHASHDATA0
663
vaesenclast GHASHDATA1, %zmm1, GHASHDATA1
664
vaesenclast GHASHDATA2, %zmm2, GHASHDATA2
665
vaesenclast GHASHDATA3, %zmm3, GHASHDATA3
666
667
// Store the en/decrypted data to DST.
668
vmovdqu8 GHASHDATA0, 0*64(DST)
669
vmovdqu8 GHASHDATA1, 1*64(DST)
670
vmovdqu8 GHASHDATA2, 2*64(DST)
671
vmovdqu8 GHASHDATA3, 3*64(DST)
672
.endm
673
674
// void aes_gcm_{enc,dec}_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
675
// const u32 le_ctr[4], u8 ghash_acc[16],
676
// const u8 *src, u8 *dst, int datalen);
677
//
678
// This macro generates a GCM encryption or decryption update function with the
679
// above prototype (with \enc selecting which one). The function computes the
680
// next portion of the CTR keystream, XOR's it with |datalen| bytes from |src|,
681
// and writes the resulting encrypted or decrypted data to |dst|. It also
682
// updates the GHASH accumulator |ghash_acc| using the next |datalen| ciphertext
683
// bytes.
684
//
685
// |datalen| must be a multiple of 16, except on the last call where it can be
686
// any length. The caller must do any buffering needed to ensure this. Both
687
// in-place and out-of-place en/decryption are supported.
688
//
689
// |le_ctr| must give the current counter in little-endian format. This
690
// function loads the counter from |le_ctr| and increments the loaded counter as
691
// needed, but it does *not* store the updated counter back to |le_ctr|. The
692
// caller must update |le_ctr| if any more data segments follow. Internally,
693
// only the low 32-bit word of the counter is incremented, following the GCM
694
// standard.
695
.macro _aes_gcm_update enc
696
697
// Function arguments
698
.set KEY, %rdi
699
.set LE_CTR_PTR, %rsi
700
.set GHASH_ACC_PTR, %rdx
701
.set SRC, %rcx
702
.set DST, %r8
703
.set DATALEN, %r9d
704
.set DATALEN64, %r9 // Zero-extend DATALEN before using!
705
706
// Additional local variables
707
708
// %rax and %k1 are used as temporary registers. LE_CTR_PTR is also
709
// available as a temporary register after the counter is loaded.
710
711
// AES key length in bytes
712
.set AESKEYLEN, %r10d
713
.set AESKEYLEN64, %r10
714
715
// Pointer to the last AES round key for the chosen AES variant
716
.set RNDKEYLAST_PTR, %r11
717
718
// In the main loop, %zmm[0-3] are used as AES input and output.
719
// Elsewhere they are used as temporary registers.
720
721
// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.
722
.set GHASHDATA0, %zmm4
723
.set GHASHDATA0_XMM, %xmm4
724
.set GHASHDATA1, %zmm5
725
.set GHASHDATA1_XMM, %xmm5
726
.set GHASHDATA2, %zmm6
727
.set GHASHDATA2_XMM, %xmm6
728
.set GHASHDATA3, %zmm7
729
730
// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
731
// using vpshufb, copied to all 128-bit lanes.
732
.set BSWAP_MASK, %zmm8
733
734
// RNDKEY temporarily holds the next AES round key.
735
.set RNDKEY, %zmm9
736
737
// GHASH_ACC is the accumulator variable for GHASH. When fully reduced,
738
// only the lowest 128-bit lane can be nonzero. When not fully reduced,
739
// more than one lane may be used, and they need to be XOR'd together.
740
.set GHASH_ACC, %zmm10
741
.set GHASH_ACC_XMM, %xmm10
742
743
// LE_CTR_INC is the vector of 32-bit words that need to be added to a
744
// vector of little-endian counter blocks to advance it forwards.
745
.set LE_CTR_INC, %zmm11
746
747
// LE_CTR contains the next set of little-endian counter blocks.
748
.set LE_CTR, %zmm12
749
750
// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
751
// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
752
// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
753
.set RNDKEY0, %zmm13
754
.set RNDKEYLAST, %zmm14
755
.set RNDKEY_M9, %zmm15
756
.set RNDKEY_M8, %zmm16
757
.set RNDKEY_M7, %zmm17
758
.set RNDKEY_M6, %zmm18
759
.set RNDKEY_M5, %zmm19
760
.set RNDKEY_M4, %zmm20
761
.set RNDKEY_M3, %zmm21
762
.set RNDKEY_M2, %zmm22
763
.set RNDKEY_M1, %zmm23
764
765
// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
766
// cannot coincide with anything used for AES encryption, since for
767
// performance reasons GHASH and AES encryption are interleaved.
768
.set GHASHTMP0, %zmm24
769
.set GHASHTMP1, %zmm25
770
.set GHASHTMP2, %zmm26
771
772
// H_POW[4-1] contain the powers of the hash key H^16...H^1. The
773
// descending numbering reflects the order of the key powers.
774
.set H_POW4, %zmm27
775
.set H_POW3, %zmm28
776
.set H_POW2, %zmm29
777
.set H_POW1, %zmm30
778
779
// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.
780
.set GFPOLY, %zmm31
781
782
// Load some constants.
783
vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK
784
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
785
786
// Load the GHASH accumulator and the starting counter.
787
vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
788
vbroadcasti32x4 (LE_CTR_PTR), LE_CTR
789
790
// Load the AES key length in bytes.
791
movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
792
793
// Make RNDKEYLAST_PTR point to the last AES round key. This is the
794
// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
795
// respectively. Then load the zero-th and last round keys.
796
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
797
vbroadcasti32x4 OFFSETOF_AESROUNDKEYS(KEY), RNDKEY0
798
vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST
799
800
// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
801
vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR
802
803
// Load 4 into all 128-bit lanes of LE_CTR_INC.
804
vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC
805
806
// If there are at least 256 bytes of data, then continue into the loop
807
// that processes 256 bytes of data at a time. Otherwise skip it.
808
//
809
// Pre-subtracting 256 from DATALEN saves an instruction from the main
810
// loop and also ensures that at least one write always occurs to
811
// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
812
sub $256, DATALEN
813
jl .Lcrypt_loop_4x_done\@
814
815
// Load powers of the hash key.
816
vmovdqu8 OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
817
vmovdqu8 OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
818
vmovdqu8 OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
819
vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
820
821
// Main loop: en/decrypt and hash 4 vectors at a time.
822
//
823
// When possible, interleave the AES encryption of the counter blocks
824
// with the GHASH update of the ciphertext blocks. This improves
825
// performance on many CPUs because the execution ports used by the VAES
826
// instructions often differ from those used by vpclmulqdq and other
827
// instructions used in GHASH. For example, many Intel CPUs dispatch
828
// vaesenc to ports 0 and 1 and vpclmulqdq to port 5.
829
//
830
// The interleaving is easiest to do during decryption, since during
831
// decryption the ciphertext blocks are immediately available. For
832
// encryption, instead encrypt the first set of blocks, then hash those
833
// blocks while encrypting the next set of blocks, repeat that as
834
// needed, and finally hash the last set of blocks.
835
836
.if \enc
837
// Encrypt the first 4 vectors of plaintext blocks. Leave the resulting
838
// ciphertext in GHASHDATA[0-3] for GHASH.
839
_ctr_begin_4x
840
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
841
1:
842
vbroadcasti32x4 (%rax), RNDKEY
843
_vaesenc_4x RNDKEY
844
add $16, %rax
845
cmp %rax, RNDKEYLAST_PTR
846
jne 1b
847
_aesenclast_and_xor_4x
848
add $256, SRC
849
add $256, DST
850
sub $256, DATALEN
851
jl .Lghash_last_ciphertext_4x\@
852
.endif
853
854
// Cache as many additional AES round keys as possible.
855
.irp i, 9,8,7,6,5,4,3,2,1
856
vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
857
.endr
858
859
.Lcrypt_loop_4x\@:
860
861
// If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If
862
// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.
863
.if !\enc
864
vmovdqu8 0*64(SRC), GHASHDATA0
865
vmovdqu8 1*64(SRC), GHASHDATA1
866
vmovdqu8 2*64(SRC), GHASHDATA2
867
vmovdqu8 3*64(SRC), GHASHDATA3
868
.endif
869
870
// Start the AES encryption of the counter blocks.
871
_ctr_begin_4x
872
cmp $24, AESKEYLEN
873
jl 128f // AES-128?
874
je 192f // AES-192?
875
// AES-256
876
vbroadcasti32x4 -13*16(RNDKEYLAST_PTR), RNDKEY
877
_vaesenc_4x RNDKEY
878
vbroadcasti32x4 -12*16(RNDKEYLAST_PTR), RNDKEY
879
_vaesenc_4x RNDKEY
880
192:
881
vbroadcasti32x4 -11*16(RNDKEYLAST_PTR), RNDKEY
882
_vaesenc_4x RNDKEY
883
vbroadcasti32x4 -10*16(RNDKEYLAST_PTR), RNDKEY
884
_vaesenc_4x RNDKEY
885
128:
886
887
// Finish the AES encryption of the counter blocks in %zmm[0-3],
888
// interleaved with the GHASH update of the ciphertext blocks in
889
// GHASHDATA[0-3].
890
.irp i, 9,8,7,6,5,4,3,2,1
891
_ghash_step_4x (9 - \i)
892
_vaesenc_4x RNDKEY_M\i
893
.endr
894
_ghash_step_4x 9
895
_aesenclast_and_xor_4x
896
add $256, SRC
897
add $256, DST
898
sub $256, DATALEN
899
jge .Lcrypt_loop_4x\@
900
901
.if \enc
902
.Lghash_last_ciphertext_4x\@:
903
// Update GHASH with the last set of ciphertext blocks.
904
_ghash_4x
905
.endif
906
907
.Lcrypt_loop_4x_done\@:
908
909
// Undo the extra subtraction by 256 and check whether data remains.
910
add $256, DATALEN
911
jz .Ldone\@
912
913
// The data length isn't a multiple of 256 bytes. Process the remaining
914
// data of length 1 <= DATALEN < 256, up to one 64-byte vector at a
915
// time. Going one vector at a time may seem inefficient compared to
916
// having separate code paths for each possible number of vectors
917
// remaining. However, using a loop keeps the code size down, and it
918
// performs surprising well; modern CPUs will start executing the next
919
// iteration before the previous one finishes and also predict the
920
// number of loop iterations. For a similar reason, we roll up the AES
921
// rounds.
922
//
923
// On the last iteration, the remaining length may be less than 64
924
// bytes. Handle this using masking.
925
//
926
// Since there are enough key powers available for all remaining data,
927
// there is no need to do a GHASH reduction after each iteration.
928
// Instead, multiply each remaining block by its own key power, and only
929
// do a GHASH reduction at the very end.
930
931
// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N
932
// is the number of blocks that remain.
933
.set POWERS_PTR, LE_CTR_PTR // LE_CTR_PTR is free to be reused.
934
mov DATALEN, %eax
935
neg %rax
936
and $~15, %rax // -round_up(DATALEN, 16)
937
lea OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR
938
939
// Start collecting the unreduced GHASH intermediate value LO, MI, HI.
940
.set LO, GHASHDATA0
941
.set LO_XMM, GHASHDATA0_XMM
942
.set MI, GHASHDATA1
943
.set MI_XMM, GHASHDATA1_XMM
944
.set HI, GHASHDATA2
945
.set HI_XMM, GHASHDATA2_XMM
946
vpxor LO_XMM, LO_XMM, LO_XMM
947
vpxor MI_XMM, MI_XMM, MI_XMM
948
vpxor HI_XMM, HI_XMM, HI_XMM
949
950
.Lcrypt_loop_1x\@:
951
952
// Select the appropriate mask for this iteration: all 1's if
953
// DATALEN >= 64, otherwise DATALEN 1's. Do this branchlessly using the
954
// bzhi instruction from BMI2. (This relies on DATALEN <= 255.)
955
mov $-1, %rax
956
bzhi DATALEN64, %rax, %rax
957
kmovq %rax, %k1
958
959
// Encrypt a vector of counter blocks. This does not need to be masked.
960
vpshufb BSWAP_MASK, LE_CTR, %zmm0
961
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
962
vpxord RNDKEY0, %zmm0, %zmm0
963
lea OFFSETOF_AESROUNDKEYS+16(KEY), %rax
964
1:
965
vbroadcasti32x4 (%rax), RNDKEY
966
vaesenc RNDKEY, %zmm0, %zmm0
967
add $16, %rax
968
cmp %rax, RNDKEYLAST_PTR
969
jne 1b
970
vaesenclast RNDKEYLAST, %zmm0, %zmm0
971
972
// XOR the data with the appropriate number of keystream bytes.
973
vmovdqu8 (SRC), %zmm1{%k1}{z}
974
vpxord %zmm1, %zmm0, %zmm0
975
vmovdqu8 %zmm0, (DST){%k1}
976
977
// Update GHASH with the ciphertext block(s), without reducing.
978
//
979
// In the case of DATALEN < 64, the ciphertext is zero-padded to 64
980
// bytes. (If decrypting, it's done by the above masked load. If
981
// encrypting, it's done by the below masked register-to-register move.)
982
// Note that if DATALEN <= 48, there will be additional padding beyond
983
// the padding of the last block specified by GHASH itself; i.e., there
984
// may be whole block(s) that get processed by the GHASH multiplication
985
// and reduction instructions but should not actually be included in the
986
// GHASH. However, any such blocks are all-zeroes, and the values that
987
// they're multiplied with are also all-zeroes. Therefore they just add
988
// 0 * 0 = 0 to the final GHASH result, which makes no difference.
989
vmovdqu8 (POWERS_PTR), H_POW1
990
.if \enc
991
vmovdqu8 %zmm0, %zmm1{%k1}{z}
992
.endif
993
vpshufb BSWAP_MASK, %zmm1, %zmm0
994
vpxord GHASH_ACC, %zmm0, %zmm0
995
_ghash_mul_noreduce H_POW1, %zmm0, LO, MI, HI, \
996
GHASHDATA3, %zmm1, %zmm2, %zmm3
997
vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
998
999
add $64, POWERS_PTR
1000
add $64, SRC
1001
add $64, DST
1002
sub $64, DATALEN
1003
jg .Lcrypt_loop_1x\@
1004
1005
// Finally, do the GHASH reduction.
1006
_ghash_reduce LO, MI, HI, GFPOLY, %zmm0
1007
_horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2
1008
1009
.Ldone\@:
1010
// Store the updated GHASH accumulator back to memory.
1011
vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
1012
1013
vzeroupper // This is needed after using ymm or zmm registers.
1014
RET
1015
.endm
1016
1017
// void aes_gcm_enc_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
1018
// const u32 le_ctr[4], u8 ghash_acc[16],
1019
// u64 total_aadlen, u64 total_datalen);
1020
// bool aes_gcm_dec_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
1021
// const u32 le_ctr[4],
1022
// const u8 ghash_acc[16],
1023
// u64 total_aadlen, u64 total_datalen,
1024
// const u8 tag[16], int taglen);
1025
//
1026
// This macro generates one of the above two functions (with \enc selecting
1027
// which one). Both functions finish computing the GCM authentication tag by
1028
// updating GHASH with the lengths block and encrypting the GHASH accumulator.
1029
// |total_aadlen| and |total_datalen| must be the total length of the additional
1030
// authenticated data and the en/decrypted data in bytes, respectively.
1031
//
1032
// The encryption function then stores the full-length (16-byte) computed
1033
// authentication tag to |ghash_acc|. The decryption function instead loads the
1034
// expected authentication tag (the one that was transmitted) from the 16-byte
1035
// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the
1036
// computed tag in constant time, and returns true if and only if they match.
1037
.macro _aes_gcm_final enc
1038
1039
// Function arguments
1040
.set KEY, %rdi
1041
.set LE_CTR_PTR, %rsi
1042
.set GHASH_ACC_PTR, %rdx
1043
.set TOTAL_AADLEN, %rcx
1044
.set TOTAL_DATALEN, %r8
1045
.set TAG, %r9
1046
.set TAGLEN, %r10d // Originally at 8(%rsp)
1047
1048
// Additional local variables.
1049
// %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.
1050
.set AESKEYLEN, %r11d
1051
.set AESKEYLEN64, %r11
1052
.set GFPOLY, %xmm4
1053
.set BSWAP_MASK, %xmm5
1054
.set LE_CTR, %xmm6
1055
.set GHASH_ACC, %xmm7
1056
.set H_POW1, %xmm8
1057
1058
// Load some constants.
1059
vmovdqa .Lgfpoly(%rip), GFPOLY
1060
vmovdqa .Lbswap_mask(%rip), BSWAP_MASK
1061
1062
// Load the AES key length in bytes.
1063
movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
1064
1065
// Set up a counter block with 1 in the low 32-bit word. This is the
1066
// counter that produces the ciphertext needed to encrypt the auth tag.
1067
// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.
1068
vpblendd $0xe, (LE_CTR_PTR), GFPOLY, LE_CTR
1069
1070
// Build the lengths block and XOR it with the GHASH accumulator.
1071
// Although the lengths block is defined as the AAD length followed by
1072
// the en/decrypted data length, both in big-endian byte order, a byte
1073
// reflection of the full block is needed because of the way we compute
1074
// GHASH (see _ghash_mul_step). By using little-endian values in the
1075
// opposite order, we avoid having to reflect any bytes here.
1076
vmovq TOTAL_DATALEN, %xmm0
1077
vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm0
1078
vpsllq $3, %xmm0, %xmm0 // Bytes to bits
1079
vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC
1080
1081
// Load the first hash key power (H^1), which is stored last.
1082
vmovdqu8 OFFSETOFEND_H_POWERS-16(KEY), H_POW1
1083
1084
.if !\enc
1085
// Prepare a mask of TAGLEN one bits.
1086
movl 8(%rsp), TAGLEN
1087
mov $-1, %eax
1088
bzhi TAGLEN, %eax, %eax
1089
kmovd %eax, %k1
1090
.endif
1091
1092
// Make %rax point to the last AES round key for the chosen AES variant.
1093
lea OFFSETOF_AESROUNDKEYS+6*16(KEY,AESKEYLEN64,4), %rax
1094
1095
// Start the AES encryption of the counter block by swapping the counter
1096
// block to big-endian and XOR-ing it with the zero-th AES round key.
1097
vpshufb BSWAP_MASK, LE_CTR, %xmm0
1098
vpxor OFFSETOF_AESROUNDKEYS(KEY), %xmm0, %xmm0
1099
1100
// Complete the AES encryption and multiply GHASH_ACC by H^1.
1101
// Interleave the AES and GHASH instructions to improve performance.
1102
cmp $24, AESKEYLEN
1103
jl 128f // AES-128?
1104
je 192f // AES-192?
1105
// AES-256
1106
vaesenc -13*16(%rax), %xmm0, %xmm0
1107
vaesenc -12*16(%rax), %xmm0, %xmm0
1108
192:
1109
vaesenc -11*16(%rax), %xmm0, %xmm0
1110
vaesenc -10*16(%rax), %xmm0, %xmm0
1111
128:
1112
.irp i, 0,1,2,3,4,5,6,7,8
1113
_ghash_mul_step \i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1114
%xmm1, %xmm2, %xmm3
1115
vaesenc (\i-9)*16(%rax), %xmm0, %xmm0
1116
.endr
1117
_ghash_mul_step 9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1118
%xmm1, %xmm2, %xmm3
1119
1120
// Undo the byte reflection of the GHASH accumulator.
1121
vpshufb BSWAP_MASK, GHASH_ACC, GHASH_ACC
1122
1123
// Do the last AES round and XOR the resulting keystream block with the
1124
// GHASH accumulator to produce the full computed authentication tag.
1125
//
1126
// Reduce latency by taking advantage of the property vaesenclast(key,
1127
// a) ^ b == vaesenclast(key ^ b, a). I.e., XOR GHASH_ACC into the last
1128
// round key, instead of XOR'ing the final AES output with GHASH_ACC.
1129
//
1130
// enc_final then returns the computed auth tag, while dec_final
1131
// compares it with the transmitted one and returns a bool. To compare
1132
// the tags, dec_final XORs them together and uses vptest to check
1133
// whether the result is all-zeroes. This should be constant-time.
1134
// dec_final applies the vaesenclast optimization to this additional
1135
// value XOR'd too, using vpternlogd to XOR the last round key, GHASH
1136
// accumulator, and transmitted auth tag together in one instruction.
1137
.if \enc
1138
vpxor (%rax), GHASH_ACC, %xmm1
1139
vaesenclast %xmm1, %xmm0, GHASH_ACC
1140
vmovdqu GHASH_ACC, (GHASH_ACC_PTR)
1141
.else
1142
vmovdqu (TAG), %xmm1
1143
vpternlogd $0x96, (%rax), GHASH_ACC, %xmm1
1144
vaesenclast %xmm1, %xmm0, %xmm0
1145
xor %eax, %eax
1146
vmovdqu8 %xmm0, %xmm0{%k1}{z} // Truncate to TAGLEN bytes
1147
vptest %xmm0, %xmm0
1148
sete %al
1149
.endif
1150
// No need for vzeroupper here, since only used xmm registers were used.
1151
RET
1152
.endm
1153
1154
SYM_FUNC_START(aes_gcm_enc_update_vaes_avx512)
1155
_aes_gcm_update 1
1156
SYM_FUNC_END(aes_gcm_enc_update_vaes_avx512)
1157
SYM_FUNC_START(aes_gcm_dec_update_vaes_avx512)
1158
_aes_gcm_update 0
1159
SYM_FUNC_END(aes_gcm_dec_update_vaes_avx512)
1160
1161
SYM_FUNC_START(aes_gcm_enc_final_vaes_avx512)
1162
_aes_gcm_final 1
1163
SYM_FUNC_END(aes_gcm_enc_final_vaes_avx512)
1164
SYM_FUNC_START(aes_gcm_dec_final_vaes_avx512)
1165
_aes_gcm_final 0
1166
SYM_FUNC_END(aes_gcm_dec_final_vaes_avx512)
1167
1168