Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/crypto/aes-gcm-avx10-x86_64.S
26451 views
1
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2
//
3
// VAES and VPCLMULQDQ optimized AES-GCM for x86_64
4
//
5
// Copyright 2024 Google LLC
6
//
7
// Author: Eric Biggers <ebiggers@google.com>
8
//
9
//------------------------------------------------------------------------------
10
//
11
// This file is dual-licensed, meaning that you can use it under your choice of
12
// either of the following two licenses:
13
//
14
// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy
15
// of the License at
16
//
17
// http://www.apache.org/licenses/LICENSE-2.0
18
//
19
// Unless required by applicable law or agreed to in writing, software
20
// distributed under the License is distributed on an "AS IS" BASIS,
21
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
// See the License for the specific language governing permissions and
23
// limitations under the License.
24
//
25
// or
26
//
27
// Redistribution and use in source and binary forms, with or without
28
// modification, are permitted provided that the following conditions are met:
29
//
30
// 1. Redistributions of source code must retain the above copyright notice,
31
// this list of conditions and the following disclaimer.
32
//
33
// 2. Redistributions in binary form must reproduce the above copyright
34
// notice, this list of conditions and the following disclaimer in the
35
// documentation and/or other materials provided with the distribution.
36
//
37
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
41
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47
// POSSIBILITY OF SUCH DAMAGE.
48
//
49
//------------------------------------------------------------------------------
50
//
51
// This file implements AES-GCM (Galois/Counter Mode) for x86_64 CPUs that
52
// support VAES (vector AES), VPCLMULQDQ (vector carryless multiplication), and
53
// either AVX512 or AVX10. Some of the functions, notably the encryption and
54
// decryption update functions which are the most performance-critical, are
55
// provided in two variants generated from a macro: one using 256-bit vectors
56
// (suffix: vaes_avx10_256) and one using 512-bit vectors (vaes_avx10_512). The
57
// other, "shared" functions (vaes_avx10) use at most 256-bit vectors.
58
//
59
// The functions that use 512-bit vectors are intended for CPUs that support
60
// 512-bit vectors *and* where using them doesn't cause significant
61
// downclocking. They require the following CPU features:
62
//
63
// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/512)
64
//
65
// The other functions require the following CPU features:
66
//
67
// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/256)
68
//
69
// All functions use the "System V" ABI. The Windows ABI is not supported.
70
//
71
// Note that we use "avx10" in the names of the functions as a shorthand to
72
// really mean "AVX10 or a certain set of AVX512 features". Due to Intel's
73
// introduction of AVX512 and then its replacement by AVX10, there doesn't seem
74
// to be a simple way to name things that makes sense on all CPUs.
75
//
76
// Note that the macros that support both 256-bit and 512-bit vectors could
77
// fairly easily be changed to support 128-bit too. However, this would *not*
78
// be sufficient to allow the code to run on CPUs without AVX512 or AVX10,
79
// because the code heavily uses several features of these extensions other than
80
// the vector length: the increase in the number of SIMD registers from 16 to
81
// 32, masking support, and new instructions such as vpternlogd (which can do a
82
// three-argument XOR). These features are very useful for AES-GCM.
83
84
#include <linux/linkage.h>
85
86
.section .rodata
87
.p2align 6
88
89
// A shuffle mask that reflects the bytes of 16-byte blocks
90
.Lbswap_mask:
91
.octa 0x000102030405060708090a0b0c0d0e0f
92
93
// This is the GHASH reducing polynomial without its constant term, i.e.
94
// x^128 + x^7 + x^2 + x, represented using the backwards mapping
95
// between bits and polynomial coefficients.
96
//
97
// Alternatively, it can be interpreted as the naturally-ordered
98
// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the
99
// "reversed" GHASH reducing polynomial without its x^128 term.
100
.Lgfpoly:
101
.octa 0xc2000000000000000000000000000001
102
103
// Same as above, but with the (1 << 64) bit set.
104
.Lgfpoly_and_internal_carrybit:
105
.octa 0xc2000000000000010000000000000001
106
107
// The below constants are used for incrementing the counter blocks.
108
// ctr_pattern points to the four 128-bit values [0, 1, 2, 3].
109
// inc_2blocks and inc_4blocks point to the single 128-bit values 2 and
110
// 4. Note that the same '2' is reused in ctr_pattern and inc_2blocks.
111
.Lctr_pattern:
112
.octa 0
113
.octa 1
114
.Linc_2blocks:
115
.octa 2
116
.octa 3
117
.Linc_4blocks:
118
.octa 4
119
120
// Number of powers of the hash key stored in the key struct. The powers are
121
// stored from highest (H^NUM_H_POWERS) to lowest (H^1).
122
#define NUM_H_POWERS 16
123
124
// Offset to AES key length (in bytes) in the key struct
125
#define OFFSETOF_AESKEYLEN 480
126
127
// Offset to start of hash key powers array in the key struct
128
#define OFFSETOF_H_POWERS 512
129
130
// Offset to end of hash key powers array in the key struct.
131
//
132
// This is immediately followed by three zeroized padding blocks, which are
133
// included so that partial vectors can be handled more easily. E.g. if VL=64
134
// and two blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most
135
// padding blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
136
#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
137
138
.text
139
140
// Set the vector length in bytes. This sets the VL variable and defines
141
// register aliases V0-V31 that map to the ymm or zmm registers.
142
.macro _set_veclen vl
143
.set VL, \vl
144
.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
145
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
146
.if VL == 32
147
.set V\i, %ymm\i
148
.elseif VL == 64
149
.set V\i, %zmm\i
150
.else
151
.error "Unsupported vector length"
152
.endif
153
.endr
154
.endm
155
156
// The _ghash_mul_step macro does one step of GHASH multiplication of the
157
// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the
158
// reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the
159
// same size as \a and \b. To complete all steps, this must invoked with \i=0
160
// through \i=9. The division into steps allows users of this macro to
161
// optionally interleave the computation with other instructions. Users of this
162
// macro must preserve the parameter registers across steps.
163
//
164
// The multiplications are done in GHASH's representation of the finite field
165
// GF(2^128). Elements of GF(2^128) are represented as binary polynomials
166
// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial
167
// G. The GCM specification uses G = x^128 + x^7 + x^2 + x + 1. Addition is
168
// just XOR, while multiplication is more complex and has two parts: (a) do
169
// carryless multiplication of two 128-bit input polynomials to get a 256-bit
170
// intermediate product polynomial, and (b) reduce the intermediate product to
171
// 128 bits by adding multiples of G that cancel out terms in it. (Adding
172
// multiples of G doesn't change which field element the polynomial represents.)
173
//
174
// Unfortunately, the GCM specification maps bits to/from polynomial
175
// coefficients backwards from the natural order. In each byte it specifies the
176
// highest bit to be the lowest order polynomial coefficient, *not* the highest!
177
// This makes it nontrivial to work with the GHASH polynomials. We could
178
// reflect the bits, but x86 doesn't have an instruction that does that.
179
//
180
// Instead, we operate on the values without bit-reflecting them. This *mostly*
181
// just works, since XOR and carryless multiplication are symmetric with respect
182
// to bit order, but it has some consequences. First, due to GHASH's byte
183
// order, by skipping bit reflection, *byte* reflection becomes necessary to
184
// give the polynomial terms a consistent order. E.g., considering an N-bit
185
// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0
186
// through N-1 of the byte-reflected value represent the coefficients of x^(N-1)
187
// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value
188
// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked
189
// with. Fortunately, x86's vpshufb instruction can do byte reflection.
190
//
191
// Second, forgoing the bit reflection causes an extra multiple of x (still
192
// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each
193
// multiplication. This is because an M-bit by N-bit carryless multiplication
194
// really produces a (M+N-1)-bit product, but in practice it's zero-extended to
195
// M+N bits. In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits
196
// to polynomial coefficients backwards, this zero-extension actually changes
197
// the product by introducing an extra factor of x. Therefore, users of this
198
// macro must ensure that one of the inputs has an extra factor of x^-1, i.e.
199
// the multiplicative inverse of x, to cancel out the extra x.
200
//
201
// Third, the backwards coefficients convention is just confusing to work with,
202
// since it makes "low" and "high" in the polynomial math mean the opposite of
203
// their normal meaning in computer programming. This can be solved by using an
204
// alternative interpretation: the polynomial coefficients are understood to be
205
// in the natural order, and the multiplication is actually \a * \b * x^-128 mod
206
// x^128 + x^127 + x^126 + x^121 + 1. This doesn't change the inputs, outputs,
207
// or the implementation at all; it just changes the mathematical interpretation
208
// of what each instruction is doing. Starting from here, we'll use this
209
// alternative interpretation, as it's easier to understand the code that way.
210
//
211
// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>
212
// 128-bit carryless multiplication, so we break the 128 x 128 multiplication
213
// into parts as follows (the _L and _H suffixes denote low and high 64 bits):
214
//
215
// LO = a_L * b_L
216
// MI = (a_L * b_H) + (a_H * b_L)
217
// HI = a_H * b_H
218
//
219
// The 256-bit product is x^128*HI + x^64*MI + LO. LO, MI, and HI are 128-bit.
220
// Note that MI "overlaps" with LO and HI. We don't consolidate MI into LO and
221
// HI right away, since the way the reduction works makes that unnecessary.
222
//
223
// For the reduction, we cancel out the low 128 bits by adding multiples of G =
224
// x^128 + x^127 + x^126 + x^121 + 1. This is done by two iterations, each of
225
// which cancels out the next lowest 64 bits. Consider a value x^64*A + B,
226
// where A and B are 128-bit. Adding B_L*G to that value gives:
227
//
228
// x^64*A + B + B_L*G
229
// = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)
230
// = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L
231
// = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L
232
// = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))
233
//
234
// So: if we sum A, B with its halves swapped, and the low half of B times x^63
235
// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the
236
// original value x^64*A + B. I.e., the low 64 bits got canceled out.
237
//
238
// We just need to apply this twice: first to fold LO into MI, and second to
239
// fold the updated MI into HI.
240
//
241
// The needed three-argument XORs are done using the vpternlogd instruction with
242
// immediate 0x96, since this is faster than two vpxord instructions.
243
//
244
// A potential optimization, assuming that b is fixed per-key (if a is fixed
245
// per-key it would work the other way around), is to use one iteration of the
246
// reduction described above to precompute a value c such that x^64*c = b mod G,
247
// and then multiply a_L by c (and implicitly by x^64) instead of by b:
248
//
249
// MI = (a_L * c_L) + (a_H * b_L)
250
// HI = (a_L * c_H) + (a_H * b_H)
251
//
252
// This would eliminate the LO part of the intermediate product, which would
253
// eliminate the need to fold LO into MI. This would save two instructions,
254
// including a vpclmulqdq. However, we currently don't use this optimization
255
// because it would require twice as many per-key precomputed values.
256
//
257
// Using Karatsuba multiplication instead of "schoolbook" multiplication
258
// similarly would save a vpclmulqdq but does not seem to be worth it.
259
.macro _ghash_mul_step i, a, b, dst, gfpoly, t0, t1, t2
260
.if \i == 0
261
vpclmulqdq $0x00, \a, \b, \t0 // LO = a_L * b_L
262
vpclmulqdq $0x01, \a, \b, \t1 // MI_0 = a_L * b_H
263
.elseif \i == 1
264
vpclmulqdq $0x10, \a, \b, \t2 // MI_1 = a_H * b_L
265
.elseif \i == 2
266
vpxord \t2, \t1, \t1 // MI = MI_0 + MI_1
267
.elseif \i == 3
268
vpclmulqdq $0x01, \t0, \gfpoly, \t2 // LO_L*(x^63 + x^62 + x^57)
269
.elseif \i == 4
270
vpshufd $0x4e, \t0, \t0 // Swap halves of LO
271
.elseif \i == 5
272
vpternlogd $0x96, \t2, \t0, \t1 // Fold LO into MI
273
.elseif \i == 6
274
vpclmulqdq $0x11, \a, \b, \dst // HI = a_H * b_H
275
.elseif \i == 7
276
vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)
277
.elseif \i == 8
278
vpshufd $0x4e, \t1, \t1 // Swap halves of MI
279
.elseif \i == 9
280
vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI
281
.endif
282
.endm
283
284
// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store
285
// the reduced products in \dst. See _ghash_mul_step for full explanation.
286
.macro _ghash_mul a, b, dst, gfpoly, t0, t1, t2
287
.irp i, 0,1,2,3,4,5,6,7,8,9
288
_ghash_mul_step \i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2
289
.endr
290
.endm
291
292
// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the
293
// *unreduced* products to \lo, \mi, and \hi.
294
.macro _ghash_mul_noreduce a, b, lo, mi, hi, t0, t1, t2, t3
295
vpclmulqdq $0x00, \a, \b, \t0 // a_L * b_L
296
vpclmulqdq $0x01, \a, \b, \t1 // a_L * b_H
297
vpclmulqdq $0x10, \a, \b, \t2 // a_H * b_L
298
vpclmulqdq $0x11, \a, \b, \t3 // a_H * b_H
299
vpxord \t0, \lo, \lo
300
vpternlogd $0x96, \t2, \t1, \mi
301
vpxord \t3, \hi, \hi
302
.endm
303
304
// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit
305
// reduced products in \hi. See _ghash_mul_step for explanation of reduction.
306
.macro _ghash_reduce lo, mi, hi, gfpoly, t0
307
vpclmulqdq $0x01, \lo, \gfpoly, \t0
308
vpshufd $0x4e, \lo, \lo
309
vpternlogd $0x96, \t0, \lo, \mi
310
vpclmulqdq $0x01, \mi, \gfpoly, \t0
311
vpshufd $0x4e, \mi, \mi
312
vpternlogd $0x96, \t0, \mi, \hi
313
.endm
314
315
// void aes_gcm_precompute_##suffix(struct aes_gcm_key_avx10 *key);
316
//
317
// Given the expanded AES key |key->aes_key|, this function derives the GHASH
318
// subkey and initializes |key->ghash_key_powers| with powers of it.
319
//
320
// The number of key powers initialized is NUM_H_POWERS, and they are stored in
321
// the order H^NUM_H_POWERS to H^1. The zeroized padding blocks after the key
322
// powers themselves are also initialized.
323
//
324
// This macro supports both VL=32 and VL=64. _set_veclen must have been invoked
325
// with the desired length. In the VL=32 case, the function computes twice as
326
// many key powers than are actually used by the VL=32 GCM update functions.
327
// This is done to keep the key format the same regardless of vector length.
328
.macro _aes_gcm_precompute
329
330
// Function arguments
331
.set KEY, %rdi
332
333
// Additional local variables. V0-V2 and %rax are used as temporaries.
334
.set POWERS_PTR, %rsi
335
.set RNDKEYLAST_PTR, %rdx
336
.set H_CUR, V3
337
.set H_CUR_YMM, %ymm3
338
.set H_CUR_XMM, %xmm3
339
.set H_INC, V4
340
.set H_INC_YMM, %ymm4
341
.set H_INC_XMM, %xmm4
342
.set GFPOLY, V5
343
.set GFPOLY_YMM, %ymm5
344
.set GFPOLY_XMM, %xmm5
345
346
// Get pointer to lowest set of key powers (located at end of array).
347
lea OFFSETOFEND_H_POWERS-VL(KEY), POWERS_PTR
348
349
// Encrypt an all-zeroes block to get the raw hash subkey.
350
movl OFFSETOF_AESKEYLEN(KEY), %eax
351
lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR
352
vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block
353
add $16, KEY
354
1:
355
vaesenc (KEY), %xmm0, %xmm0
356
add $16, KEY
357
cmp KEY, RNDKEYLAST_PTR
358
jne 1b
359
vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0
360
361
// Reflect the bytes of the raw hash subkey.
362
vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM
363
364
// Zeroize the padding blocks.
365
vpxor %xmm0, %xmm0, %xmm0
366
vmovdqu %ymm0, VL(POWERS_PTR)
367
vmovdqu %xmm0, VL+2*16(POWERS_PTR)
368
369
// Finish preprocessing the first key power, H^1. Since this GHASH
370
// implementation operates directly on values with the backwards bit
371
// order specified by the GCM standard, it's necessary to preprocess the
372
// raw key as follows. First, reflect its bytes. Second, multiply it
373
// by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards
374
// interpretation of polynomial coefficients), which can also be
375
// interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121
376
// + 1 using the alternative, natural interpretation of polynomial
377
// coefficients. For details, see the comment above _ghash_mul_step.
378
//
379
// Either way, for the multiplication the concrete operation performed
380
// is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2
381
// << 120) | 1 if a 1 bit was carried out. However, there's no 128-bit
382
// wide shift instruction, so instead double each of the two 64-bit
383
// halves and incorporate the internal carry bit into the value XOR'd.
384
vpshufd $0xd3, H_CUR_XMM, %xmm0
385
vpsrad $31, %xmm0, %xmm0
386
vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
387
// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
388
vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
389
390
// Load the gfpoly constant.
391
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
392
393
// Square H^1 to get H^2.
394
//
395
// Note that as with H^1, all higher key powers also need an extra
396
// factor of x^-1 (or x using the natural interpretation). Nothing
397
// special needs to be done to make this happen, though: H^1 * H^1 would
398
// end up with two factors of x^-1, but the multiplication consumes one.
399
// So the product H^2 ends up with the desired one factor of x^-1.
400
_ghash_mul H_CUR_XMM, H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, \
401
%xmm0, %xmm1, %xmm2
402
403
// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].
404
vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM
405
vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM
406
407
.if VL == 64
408
// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].
409
_ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \
410
%ymm0, %ymm1, %ymm2
411
vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR
412
vshufi64x2 $0, H_INC, H_INC, H_INC
413
.endif
414
415
// Store the lowest set of key powers.
416
vmovdqu8 H_CUR, (POWERS_PTR)
417
418
// Compute and store the remaining key powers. With VL=32, repeatedly
419
// multiply [H^(i+1), H^i] by [H^2, H^2] to get [H^(i+3), H^(i+2)].
420
// With VL=64, repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
421
// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].
422
mov $(NUM_H_POWERS*16/VL) - 1, %eax
423
.Lprecompute_next\@:
424
sub $VL, POWERS_PTR
425
_ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, V0, V1, V2
426
vmovdqu8 H_CUR, (POWERS_PTR)
427
dec %eax
428
jnz .Lprecompute_next\@
429
430
vzeroupper // This is needed after using ymm or zmm registers.
431
RET
432
.endm
433
434
// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store
435
// the result in \dst_xmm. This implicitly zeroizes the other lanes of dst.
436
.macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm
437
vextracti32x4 $1, \src, \t0_xmm
438
.if VL == 32
439
vpxord \t0_xmm, \src_xmm, \dst_xmm
440
.elseif VL == 64
441
vextracti32x4 $2, \src, \t1_xmm
442
vextracti32x4 $3, \src, \t2_xmm
443
vpxord \t0_xmm, \src_xmm, \dst_xmm
444
vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm
445
.else
446
.error "Unsupported vector length"
447
.endif
448
.endm
449
450
// Do one step of the GHASH update of the data blocks given in the vector
451
// registers GHASHDATA[0-3]. \i specifies the step to do, 0 through 9. The
452
// division into steps allows users of this macro to optionally interleave the
453
// computation with other instructions. This macro uses the vector register
454
// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;
455
// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and
456
// GHASHTMP[0-2] as temporaries. This macro handles the byte-reflection of the
457
// data blocks. The parameter registers must be preserved across steps.
458
//
459
// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +
460
// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the
461
// operations are vectorized operations on vectors of 16-byte blocks. E.g.,
462
// with VL=32 there are 2 blocks per vector and the vectorized terms correspond
463
// to the following non-vectorized terms:
464
//
465
// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^8*(blk0 + GHASH_ACC_XMM) and H^7*(blk1 + 0)
466
// H_POW3*GHASHDATA1 => H^6*blk2 and H^5*blk3
467
// H_POW2*GHASHDATA2 => H^4*blk4 and H^3*blk5
468
// H_POW1*GHASHDATA3 => H^2*blk6 and H^1*blk7
469
//
470
// With VL=64, we use 4 blocks/vector, H^16 through H^1, and blk0 through blk15.
471
//
472
// More concretely, this code does:
473
// - Do vectorized "schoolbook" multiplications to compute the intermediate
474
// 256-bit product of each block and its corresponding hash key power.
475
// There are 4*VL/16 of these intermediate products.
476
// - Sum (XOR) the intermediate 256-bit products across vectors. This leaves
477
// VL/16 256-bit intermediate values.
478
// - Do a vectorized reduction of these 256-bit intermediate values to
479
// 128-bits each. This leaves VL/16 128-bit intermediate values.
480
// - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.
481
//
482
// See _ghash_mul_step for the full explanation of the operations performed for
483
// each individual finite field multiplication and reduction.
484
.macro _ghash_step_4x i
485
.if \i == 0
486
vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
487
vpxord GHASH_ACC, GHASHDATA0, GHASHDATA0
488
vpshufb BSWAP_MASK, GHASHDATA1, GHASHDATA1
489
vpshufb BSWAP_MASK, GHASHDATA2, GHASHDATA2
490
.elseif \i == 1
491
vpshufb BSWAP_MASK, GHASHDATA3, GHASHDATA3
492
vpclmulqdq $0x00, H_POW4, GHASHDATA0, GHASH_ACC // LO_0
493
vpclmulqdq $0x00, H_POW3, GHASHDATA1, GHASHTMP0 // LO_1
494
vpclmulqdq $0x00, H_POW2, GHASHDATA2, GHASHTMP1 // LO_2
495
.elseif \i == 2
496
vpxord GHASHTMP0, GHASH_ACC, GHASH_ACC // sum(LO_{1,0})
497
vpclmulqdq $0x00, H_POW1, GHASHDATA3, GHASHTMP2 // LO_3
498
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC // LO = sum(LO_{3,2,1,0})
499
vpclmulqdq $0x01, H_POW4, GHASHDATA0, GHASHTMP0 // MI_0
500
.elseif \i == 3
501
vpclmulqdq $0x01, H_POW3, GHASHDATA1, GHASHTMP1 // MI_1
502
vpclmulqdq $0x01, H_POW2, GHASHDATA2, GHASHTMP2 // MI_2
503
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{2,1,0})
504
vpclmulqdq $0x01, H_POW1, GHASHDATA3, GHASHTMP1 // MI_3
505
.elseif \i == 4
506
vpclmulqdq $0x10, H_POW4, GHASHDATA0, GHASHTMP2 // MI_4
507
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{4,3,2,1,0})
508
vpclmulqdq $0x10, H_POW3, GHASHDATA1, GHASHTMP1 // MI_5
509
vpclmulqdq $0x10, H_POW2, GHASHDATA2, GHASHTMP2 // MI_6
510
.elseif \i == 5
511
vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{6,5,4,3,2,1,0})
512
vpclmulqdq $0x01, GHASH_ACC, GFPOLY, GHASHTMP2 // LO_L*(x^63 + x^62 + x^57)
513
vpclmulqdq $0x10, H_POW1, GHASHDATA3, GHASHTMP1 // MI_7
514
vpxord GHASHTMP1, GHASHTMP0, GHASHTMP0 // MI = sum(MI_{7,6,5,4,3,2,1,0})
515
.elseif \i == 6
516
vpshufd $0x4e, GHASH_ACC, GHASH_ACC // Swap halves of LO
517
vpclmulqdq $0x11, H_POW4, GHASHDATA0, GHASHDATA0 // HI_0
518
vpclmulqdq $0x11, H_POW3, GHASHDATA1, GHASHDATA1 // HI_1
519
vpclmulqdq $0x11, H_POW2, GHASHDATA2, GHASHDATA2 // HI_2
520
.elseif \i == 7
521
vpternlogd $0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0 // Fold LO into MI
522
vpclmulqdq $0x11, H_POW1, GHASHDATA3, GHASHDATA3 // HI_3
523
vpternlogd $0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})
524
vpclmulqdq $0x01, GHASHTMP0, GFPOLY, GHASHTMP1 // MI_L*(x^63 + x^62 + x^57)
525
.elseif \i == 8
526
vpxord GHASHDATA3, GHASHDATA0, GHASH_ACC // HI = sum(HI_{3,2,1,0})
527
vpshufd $0x4e, GHASHTMP0, GHASHTMP0 // Swap halves of MI
528
vpternlogd $0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC // Fold MI into HI
529
.elseif \i == 9
530
_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
531
GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
532
.endif
533
.endm
534
535
// Do one non-last round of AES encryption on the counter blocks in V0-V3 using
536
// the round key that has been broadcast to all 128-bit lanes of \round_key.
537
.macro _vaesenc_4x round_key
538
vaesenc \round_key, V0, V0
539
vaesenc \round_key, V1, V1
540
vaesenc \round_key, V2, V2
541
vaesenc \round_key, V3, V3
542
.endm
543
544
// Start the AES encryption of four vectors of counter blocks.
545
.macro _ctr_begin_4x
546
547
// Increment LE_CTR four times to generate four vectors of little-endian
548
// counter blocks, swap each to big-endian, and store them in V0-V3.
549
vpshufb BSWAP_MASK, LE_CTR, V0
550
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
551
vpshufb BSWAP_MASK, LE_CTR, V1
552
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
553
vpshufb BSWAP_MASK, LE_CTR, V2
554
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
555
vpshufb BSWAP_MASK, LE_CTR, V3
556
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
557
558
// AES "round zero": XOR in the zero-th round key.
559
vpxord RNDKEY0, V0, V0
560
vpxord RNDKEY0, V1, V1
561
vpxord RNDKEY0, V2, V2
562
vpxord RNDKEY0, V3, V3
563
.endm
564
565
// Do the last AES round for four vectors of counter blocks V0-V3, XOR source
566
// data with the resulting keystream, and write the result to DST and
567
// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)
568
.macro _aesenclast_and_xor_4x
569
// XOR the source data with the last round key, saving the result in
570
// GHASHDATA[0-3]. This reduces latency by taking advantage of the
571
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
572
vpxord 0*VL(SRC), RNDKEYLAST, GHASHDATA0
573
vpxord 1*VL(SRC), RNDKEYLAST, GHASHDATA1
574
vpxord 2*VL(SRC), RNDKEYLAST, GHASHDATA2
575
vpxord 3*VL(SRC), RNDKEYLAST, GHASHDATA3
576
577
// Do the last AES round. This handles the XOR with the source data
578
// too, as per the optimization described above.
579
vaesenclast GHASHDATA0, V0, GHASHDATA0
580
vaesenclast GHASHDATA1, V1, GHASHDATA1
581
vaesenclast GHASHDATA2, V2, GHASHDATA2
582
vaesenclast GHASHDATA3, V3, GHASHDATA3
583
584
// Store the en/decrypted data to DST.
585
vmovdqu8 GHASHDATA0, 0*VL(DST)
586
vmovdqu8 GHASHDATA1, 1*VL(DST)
587
vmovdqu8 GHASHDATA2, 2*VL(DST)
588
vmovdqu8 GHASHDATA3, 3*VL(DST)
589
.endm
590
591
// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,
592
// const u32 le_ctr[4], u8 ghash_acc[16],
593
// const u8 *src, u8 *dst, int datalen);
594
//
595
// This macro generates a GCM encryption or decryption update function with the
596
// above prototype (with \enc selecting which one). This macro supports both
597
// VL=32 and VL=64. _set_veclen must have been invoked with the desired length.
598
//
599
// This function computes the next portion of the CTR keystream, XOR's it with
600
// |datalen| bytes from |src|, and writes the resulting encrypted or decrypted
601
// data to |dst|. It also updates the GHASH accumulator |ghash_acc| using the
602
// next |datalen| ciphertext bytes.
603
//
604
// |datalen| must be a multiple of 16, except on the last call where it can be
605
// any length. The caller must do any buffering needed to ensure this. Both
606
// in-place and out-of-place en/decryption are supported.
607
//
608
// |le_ctr| must give the current counter in little-endian format. For a new
609
// message, the low word of the counter must be 2. This function loads the
610
// counter from |le_ctr| and increments the loaded counter as needed, but it
611
// does *not* store the updated counter back to |le_ctr|. The caller must
612
// update |le_ctr| if any more data segments follow. Internally, only the low
613
// 32-bit word of the counter is incremented, following the GCM standard.
614
.macro _aes_gcm_update enc
615
616
// Function arguments
617
.set KEY, %rdi
618
.set LE_CTR_PTR, %rsi
619
.set GHASH_ACC_PTR, %rdx
620
.set SRC, %rcx
621
.set DST, %r8
622
.set DATALEN, %r9d
623
.set DATALEN64, %r9 // Zero-extend DATALEN before using!
624
625
// Additional local variables
626
627
// %rax and %k1 are used as temporary registers. LE_CTR_PTR is also
628
// available as a temporary register after the counter is loaded.
629
630
// AES key length in bytes
631
.set AESKEYLEN, %r10d
632
.set AESKEYLEN64, %r10
633
634
// Pointer to the last AES round key for the chosen AES variant
635
.set RNDKEYLAST_PTR, %r11
636
637
// In the main loop, V0-V3 are used as AES input and output. Elsewhere
638
// they are used as temporary registers.
639
640
// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.
641
.set GHASHDATA0, V4
642
.set GHASHDATA0_XMM, %xmm4
643
.set GHASHDATA1, V5
644
.set GHASHDATA1_XMM, %xmm5
645
.set GHASHDATA2, V6
646
.set GHASHDATA2_XMM, %xmm6
647
.set GHASHDATA3, V7
648
649
// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
650
// using vpshufb, copied to all 128-bit lanes.
651
.set BSWAP_MASK, V8
652
653
// RNDKEY temporarily holds the next AES round key.
654
.set RNDKEY, V9
655
656
// GHASH_ACC is the accumulator variable for GHASH. When fully reduced,
657
// only the lowest 128-bit lane can be nonzero. When not fully reduced,
658
// more than one lane may be used, and they need to be XOR'd together.
659
.set GHASH_ACC, V10
660
.set GHASH_ACC_XMM, %xmm10
661
662
// LE_CTR_INC is the vector of 32-bit words that need to be added to a
663
// vector of little-endian counter blocks to advance it forwards.
664
.set LE_CTR_INC, V11
665
666
// LE_CTR contains the next set of little-endian counter blocks.
667
.set LE_CTR, V12
668
669
// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
670
// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
671
// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
672
.set RNDKEY0, V13
673
.set RNDKEYLAST, V14
674
.set RNDKEY_M9, V15
675
.set RNDKEY_M8, V16
676
.set RNDKEY_M7, V17
677
.set RNDKEY_M6, V18
678
.set RNDKEY_M5, V19
679
.set RNDKEY_M4, V20
680
.set RNDKEY_M3, V21
681
.set RNDKEY_M2, V22
682
.set RNDKEY_M1, V23
683
684
// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
685
// cannot coincide with anything used for AES encryption, since for
686
// performance reasons GHASH and AES encryption are interleaved.
687
.set GHASHTMP0, V24
688
.set GHASHTMP1, V25
689
.set GHASHTMP2, V26
690
691
// H_POW[4-1] contain the powers of the hash key H^(4*VL/16)...H^1. The
692
// descending numbering reflects the order of the key powers.
693
.set H_POW4, V27
694
.set H_POW3, V28
695
.set H_POW2, V29
696
.set H_POW1, V30
697
698
// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.
699
.set GFPOLY, V31
700
701
// Load some constants.
702
vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK
703
vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
704
705
// Load the GHASH accumulator and the starting counter.
706
vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
707
vbroadcasti32x4 (LE_CTR_PTR), LE_CTR
708
709
// Load the AES key length in bytes.
710
movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
711
712
// Make RNDKEYLAST_PTR point to the last AES round key. This is the
713
// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
714
// respectively. Then load the zero-th and last round keys.
715
lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
716
vbroadcasti32x4 (KEY), RNDKEY0
717
vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST
718
719
// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
720
vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR
721
722
// Initialize LE_CTR_INC to contain VL/16 in all 128-bit lanes.
723
.if VL == 32
724
vbroadcasti32x4 .Linc_2blocks(%rip), LE_CTR_INC
725
.elseif VL == 64
726
vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC
727
.else
728
.error "Unsupported vector length"
729
.endif
730
731
// If there are at least 4*VL bytes of data, then continue into the loop
732
// that processes 4*VL bytes of data at a time. Otherwise skip it.
733
//
734
// Pre-subtracting 4*VL from DATALEN saves an instruction from the main
735
// loop and also ensures that at least one write always occurs to
736
// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
737
add $-4*VL, DATALEN // shorter than 'sub 4*VL' when VL=32
738
jl .Lcrypt_loop_4x_done\@
739
740
// Load powers of the hash key.
741
vmovdqu8 OFFSETOFEND_H_POWERS-4*VL(KEY), H_POW4
742
vmovdqu8 OFFSETOFEND_H_POWERS-3*VL(KEY), H_POW3
743
vmovdqu8 OFFSETOFEND_H_POWERS-2*VL(KEY), H_POW2
744
vmovdqu8 OFFSETOFEND_H_POWERS-1*VL(KEY), H_POW1
745
746
// Main loop: en/decrypt and hash 4 vectors at a time.
747
//
748
// When possible, interleave the AES encryption of the counter blocks
749
// with the GHASH update of the ciphertext blocks. This improves
750
// performance on many CPUs because the execution ports used by the VAES
751
// instructions often differ from those used by vpclmulqdq and other
752
// instructions used in GHASH. For example, many Intel CPUs dispatch
753
// vaesenc to ports 0 and 1 and vpclmulqdq to port 5.
754
//
755
// The interleaving is easiest to do during decryption, since during
756
// decryption the ciphertext blocks are immediately available. For
757
// encryption, instead encrypt the first set of blocks, then hash those
758
// blocks while encrypting the next set of blocks, repeat that as
759
// needed, and finally hash the last set of blocks.
760
761
.if \enc
762
// Encrypt the first 4 vectors of plaintext blocks. Leave the resulting
763
// ciphertext in GHASHDATA[0-3] for GHASH.
764
_ctr_begin_4x
765
lea 16(KEY), %rax
766
1:
767
vbroadcasti32x4 (%rax), RNDKEY
768
_vaesenc_4x RNDKEY
769
add $16, %rax
770
cmp %rax, RNDKEYLAST_PTR
771
jne 1b
772
_aesenclast_and_xor_4x
773
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
774
sub $-4*VL, DST
775
add $-4*VL, DATALEN
776
jl .Lghash_last_ciphertext_4x\@
777
.endif
778
779
// Cache as many additional AES round keys as possible.
780
.irp i, 9,8,7,6,5,4,3,2,1
781
vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
782
.endr
783
784
.Lcrypt_loop_4x\@:
785
786
// If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If
787
// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.
788
.if !\enc
789
vmovdqu8 0*VL(SRC), GHASHDATA0
790
vmovdqu8 1*VL(SRC), GHASHDATA1
791
vmovdqu8 2*VL(SRC), GHASHDATA2
792
vmovdqu8 3*VL(SRC), GHASHDATA3
793
.endif
794
795
// Start the AES encryption of the counter blocks.
796
_ctr_begin_4x
797
cmp $24, AESKEYLEN
798
jl 128f // AES-128?
799
je 192f // AES-192?
800
// AES-256
801
vbroadcasti32x4 -13*16(RNDKEYLAST_PTR), RNDKEY
802
_vaesenc_4x RNDKEY
803
vbroadcasti32x4 -12*16(RNDKEYLAST_PTR), RNDKEY
804
_vaesenc_4x RNDKEY
805
192:
806
vbroadcasti32x4 -11*16(RNDKEYLAST_PTR), RNDKEY
807
_vaesenc_4x RNDKEY
808
vbroadcasti32x4 -10*16(RNDKEYLAST_PTR), RNDKEY
809
_vaesenc_4x RNDKEY
810
128:
811
812
// Finish the AES encryption of the counter blocks in V0-V3, interleaved
813
// with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].
814
.irp i, 9,8,7,6,5,4,3,2,1
815
_ghash_step_4x (9 - \i)
816
_vaesenc_4x RNDKEY_M\i
817
.endr
818
_ghash_step_4x 9
819
_aesenclast_and_xor_4x
820
sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32
821
sub $-4*VL, DST
822
add $-4*VL, DATALEN
823
jge .Lcrypt_loop_4x\@
824
825
.if \enc
826
.Lghash_last_ciphertext_4x\@:
827
// Update GHASH with the last set of ciphertext blocks.
828
.irp i, 0,1,2,3,4,5,6,7,8,9
829
_ghash_step_4x \i
830
.endr
831
.endif
832
833
.Lcrypt_loop_4x_done\@:
834
835
// Undo the extra subtraction by 4*VL and check whether data remains.
836
sub $-4*VL, DATALEN // shorter than 'add 4*VL' when VL=32
837
jz .Ldone\@
838
839
// The data length isn't a multiple of 4*VL. Process the remaining data
840
// of length 1 <= DATALEN < 4*VL, up to one vector (VL bytes) at a time.
841
// Going one vector at a time may seem inefficient compared to having
842
// separate code paths for each possible number of vectors remaining.
843
// However, using a loop keeps the code size down, and it performs
844
// surprising well; modern CPUs will start executing the next iteration
845
// before the previous one finishes and also predict the number of loop
846
// iterations. For a similar reason, we roll up the AES rounds.
847
//
848
// On the last iteration, the remaining length may be less than VL.
849
// Handle this using masking.
850
//
851
// Since there are enough key powers available for all remaining data,
852
// there is no need to do a GHASH reduction after each iteration.
853
// Instead, multiply each remaining block by its own key power, and only
854
// do a GHASH reduction at the very end.
855
856
// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N
857
// is the number of blocks that remain.
858
.set POWERS_PTR, LE_CTR_PTR // LE_CTR_PTR is free to be reused.
859
mov DATALEN, %eax
860
neg %rax
861
and $~15, %rax // -round_up(DATALEN, 16)
862
lea OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR
863
864
// Start collecting the unreduced GHASH intermediate value LO, MI, HI.
865
.set LO, GHASHDATA0
866
.set LO_XMM, GHASHDATA0_XMM
867
.set MI, GHASHDATA1
868
.set MI_XMM, GHASHDATA1_XMM
869
.set HI, GHASHDATA2
870
.set HI_XMM, GHASHDATA2_XMM
871
vpxor LO_XMM, LO_XMM, LO_XMM
872
vpxor MI_XMM, MI_XMM, MI_XMM
873
vpxor HI_XMM, HI_XMM, HI_XMM
874
875
.Lcrypt_loop_1x\@:
876
877
// Select the appropriate mask for this iteration: all 1's if
878
// DATALEN >= VL, otherwise DATALEN 1's. Do this branchlessly using the
879
// bzhi instruction from BMI2. (This relies on DATALEN <= 255.)
880
.if VL < 64
881
mov $-1, %eax
882
bzhi DATALEN, %eax, %eax
883
kmovd %eax, %k1
884
.else
885
mov $-1, %rax
886
bzhi DATALEN64, %rax, %rax
887
kmovq %rax, %k1
888
.endif
889
890
// Encrypt a vector of counter blocks. This does not need to be masked.
891
vpshufb BSWAP_MASK, LE_CTR, V0
892
vpaddd LE_CTR_INC, LE_CTR, LE_CTR
893
vpxord RNDKEY0, V0, V0
894
lea 16(KEY), %rax
895
1:
896
vbroadcasti32x4 (%rax), RNDKEY
897
vaesenc RNDKEY, V0, V0
898
add $16, %rax
899
cmp %rax, RNDKEYLAST_PTR
900
jne 1b
901
vaesenclast RNDKEYLAST, V0, V0
902
903
// XOR the data with the appropriate number of keystream bytes.
904
vmovdqu8 (SRC), V1{%k1}{z}
905
vpxord V1, V0, V0
906
vmovdqu8 V0, (DST){%k1}
907
908
// Update GHASH with the ciphertext block(s), without reducing.
909
//
910
// In the case of DATALEN < VL, the ciphertext is zero-padded to VL.
911
// (If decrypting, it's done by the above masked load. If encrypting,
912
// it's done by the below masked register-to-register move.) Note that
913
// if DATALEN <= VL - 16, there will be additional padding beyond the
914
// padding of the last block specified by GHASH itself; i.e., there may
915
// be whole block(s) that get processed by the GHASH multiplication and
916
// reduction instructions but should not actually be included in the
917
// GHASH. However, any such blocks are all-zeroes, and the values that
918
// they're multiplied with are also all-zeroes. Therefore they just add
919
// 0 * 0 = 0 to the final GHASH result, which makes no difference.
920
vmovdqu8 (POWERS_PTR), H_POW1
921
.if \enc
922
vmovdqu8 V0, V1{%k1}{z}
923
.endif
924
vpshufb BSWAP_MASK, V1, V0
925
vpxord GHASH_ACC, V0, V0
926
_ghash_mul_noreduce H_POW1, V0, LO, MI, HI, GHASHDATA3, V1, V2, V3
927
vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
928
929
add $VL, POWERS_PTR
930
add $VL, SRC
931
add $VL, DST
932
sub $VL, DATALEN
933
jg .Lcrypt_loop_1x\@
934
935
// Finally, do the GHASH reduction.
936
_ghash_reduce LO, MI, HI, GFPOLY, V0
937
_horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2
938
939
.Ldone\@:
940
// Store the updated GHASH accumulator back to memory.
941
vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
942
943
vzeroupper // This is needed after using ymm or zmm registers.
944
RET
945
.endm
946
947
// void aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
948
// const u32 le_ctr[4], u8 ghash_acc[16],
949
// u64 total_aadlen, u64 total_datalen);
950
// bool aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
951
// const u32 le_ctr[4],
952
// const u8 ghash_acc[16],
953
// u64 total_aadlen, u64 total_datalen,
954
// const u8 tag[16], int taglen);
955
//
956
// This macro generates one of the above two functions (with \enc selecting
957
// which one). Both functions finish computing the GCM authentication tag by
958
// updating GHASH with the lengths block and encrypting the GHASH accumulator.
959
// |total_aadlen| and |total_datalen| must be the total length of the additional
960
// authenticated data and the en/decrypted data in bytes, respectively.
961
//
962
// The encryption function then stores the full-length (16-byte) computed
963
// authentication tag to |ghash_acc|. The decryption function instead loads the
964
// expected authentication tag (the one that was transmitted) from the 16-byte
965
// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the
966
// computed tag in constant time, and returns true if and only if they match.
967
.macro _aes_gcm_final enc
968
969
// Function arguments
970
.set KEY, %rdi
971
.set LE_CTR_PTR, %rsi
972
.set GHASH_ACC_PTR, %rdx
973
.set TOTAL_AADLEN, %rcx
974
.set TOTAL_DATALEN, %r8
975
.set TAG, %r9
976
.set TAGLEN, %r10d // Originally at 8(%rsp)
977
978
// Additional local variables.
979
// %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.
980
.set AESKEYLEN, %r11d
981
.set AESKEYLEN64, %r11
982
.set GFPOLY, %xmm4
983
.set BSWAP_MASK, %xmm5
984
.set LE_CTR, %xmm6
985
.set GHASH_ACC, %xmm7
986
.set H_POW1, %xmm8
987
988
// Load some constants.
989
vmovdqa .Lgfpoly(%rip), GFPOLY
990
vmovdqa .Lbswap_mask(%rip), BSWAP_MASK
991
992
// Load the AES key length in bytes.
993
movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
994
995
// Set up a counter block with 1 in the low 32-bit word. This is the
996
// counter that produces the ciphertext needed to encrypt the auth tag.
997
// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.
998
vpblendd $0xe, (LE_CTR_PTR), GFPOLY, LE_CTR
999
1000
// Build the lengths block and XOR it with the GHASH accumulator.
1001
// Although the lengths block is defined as the AAD length followed by
1002
// the en/decrypted data length, both in big-endian byte order, a byte
1003
// reflection of the full block is needed because of the way we compute
1004
// GHASH (see _ghash_mul_step). By using little-endian values in the
1005
// opposite order, we avoid having to reflect any bytes here.
1006
vmovq TOTAL_DATALEN, %xmm0
1007
vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm0
1008
vpsllq $3, %xmm0, %xmm0 // Bytes to bits
1009
vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC
1010
1011
// Load the first hash key power (H^1), which is stored last.
1012
vmovdqu8 OFFSETOFEND_H_POWERS-16(KEY), H_POW1
1013
1014
.if !\enc
1015
// Prepare a mask of TAGLEN one bits.
1016
movl 8(%rsp), TAGLEN
1017
mov $-1, %eax
1018
bzhi TAGLEN, %eax, %eax
1019
kmovd %eax, %k1
1020
.endif
1021
1022
// Make %rax point to the last AES round key for the chosen AES variant.
1023
lea 6*16(KEY,AESKEYLEN64,4), %rax
1024
1025
// Start the AES encryption of the counter block by swapping the counter
1026
// block to big-endian and XOR-ing it with the zero-th AES round key.
1027
vpshufb BSWAP_MASK, LE_CTR, %xmm0
1028
vpxor (KEY), %xmm0, %xmm0
1029
1030
// Complete the AES encryption and multiply GHASH_ACC by H^1.
1031
// Interleave the AES and GHASH instructions to improve performance.
1032
cmp $24, AESKEYLEN
1033
jl 128f // AES-128?
1034
je 192f // AES-192?
1035
// AES-256
1036
vaesenc -13*16(%rax), %xmm0, %xmm0
1037
vaesenc -12*16(%rax), %xmm0, %xmm0
1038
192:
1039
vaesenc -11*16(%rax), %xmm0, %xmm0
1040
vaesenc -10*16(%rax), %xmm0, %xmm0
1041
128:
1042
.irp i, 0,1,2,3,4,5,6,7,8
1043
_ghash_mul_step \i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1044
%xmm1, %xmm2, %xmm3
1045
vaesenc (\i-9)*16(%rax), %xmm0, %xmm0
1046
.endr
1047
_ghash_mul_step 9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1048
%xmm1, %xmm2, %xmm3
1049
1050
// Undo the byte reflection of the GHASH accumulator.
1051
vpshufb BSWAP_MASK, GHASH_ACC, GHASH_ACC
1052
1053
// Do the last AES round and XOR the resulting keystream block with the
1054
// GHASH accumulator to produce the full computed authentication tag.
1055
//
1056
// Reduce latency by taking advantage of the property vaesenclast(key,
1057
// a) ^ b == vaesenclast(key ^ b, a). I.e., XOR GHASH_ACC into the last
1058
// round key, instead of XOR'ing the final AES output with GHASH_ACC.
1059
//
1060
// enc_final then returns the computed auth tag, while dec_final
1061
// compares it with the transmitted one and returns a bool. To compare
1062
// the tags, dec_final XORs them together and uses vptest to check
1063
// whether the result is all-zeroes. This should be constant-time.
1064
// dec_final applies the vaesenclast optimization to this additional
1065
// value XOR'd too, using vpternlogd to XOR the last round key, GHASH
1066
// accumulator, and transmitted auth tag together in one instruction.
1067
.if \enc
1068
vpxor (%rax), GHASH_ACC, %xmm1
1069
vaesenclast %xmm1, %xmm0, GHASH_ACC
1070
vmovdqu GHASH_ACC, (GHASH_ACC_PTR)
1071
.else
1072
vmovdqu (TAG), %xmm1
1073
vpternlogd $0x96, (%rax), GHASH_ACC, %xmm1
1074
vaesenclast %xmm1, %xmm0, %xmm0
1075
xor %eax, %eax
1076
vmovdqu8 %xmm0, %xmm0{%k1}{z} // Truncate to TAGLEN bytes
1077
vptest %xmm0, %xmm0
1078
sete %al
1079
.endif
1080
// No need for vzeroupper here, since only used xmm registers were used.
1081
RET
1082
.endm
1083
1084
_set_veclen 32
1085
SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_256)
1086
_aes_gcm_precompute
1087
SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_256)
1088
SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_256)
1089
_aes_gcm_update 1
1090
SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_256)
1091
SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_256)
1092
_aes_gcm_update 0
1093
SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_256)
1094
1095
_set_veclen 64
1096
SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_512)
1097
_aes_gcm_precompute
1098
SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_512)
1099
SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_512)
1100
_aes_gcm_update 1
1101
SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_512)
1102
SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_512)
1103
_aes_gcm_update 0
1104
SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_512)
1105
1106
// void aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key,
1107
// u8 ghash_acc[16],
1108
// const u8 *aad, int aadlen);
1109
//
1110
// This function processes the AAD (Additional Authenticated Data) in GCM.
1111
// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
1112
// data given by |aad| and |aadlen|. |key->ghash_key_powers| must have been
1113
// initialized. On the first call, |ghash_acc| must be all zeroes. |aadlen|
1114
// must be a multiple of 16, except on the last call where it can be any length.
1115
// The caller must do any buffering needed to ensure this.
1116
//
1117
// AES-GCM is almost always used with small amounts of AAD, less than 32 bytes.
1118
// Therefore, for AAD processing we currently only provide this implementation
1119
// which uses 256-bit vectors (ymm registers) and only has a 1x-wide loop. This
1120
// keeps the code size down, and it enables some micro-optimizations, e.g. using
1121
// VEX-coded instructions instead of EVEX-coded to save some instruction bytes.
1122
// To optimize for large amounts of AAD, we could implement a 4x-wide loop and
1123
// provide a version using 512-bit vectors, but that doesn't seem to be useful.
1124
SYM_FUNC_START(aes_gcm_aad_update_vaes_avx10)
1125
1126
// Function arguments
1127
.set KEY, %rdi
1128
.set GHASH_ACC_PTR, %rsi
1129
.set AAD, %rdx
1130
.set AADLEN, %ecx
1131
.set AADLEN64, %rcx // Zero-extend AADLEN before using!
1132
1133
// Additional local variables.
1134
// %rax, %ymm0-%ymm3, and %k1 are used as temporary registers.
1135
.set BSWAP_MASK, %ymm4
1136
.set GFPOLY, %ymm5
1137
.set GHASH_ACC, %ymm6
1138
.set GHASH_ACC_XMM, %xmm6
1139
.set H_POW1, %ymm7
1140
1141
// Load some constants.
1142
vbroadcasti128 .Lbswap_mask(%rip), BSWAP_MASK
1143
vbroadcasti128 .Lgfpoly(%rip), GFPOLY
1144
1145
// Load the GHASH accumulator.
1146
vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
1147
1148
// Update GHASH with 32 bytes of AAD at a time.
1149
//
1150
// Pre-subtracting 32 from AADLEN saves an instruction from the loop and
1151
// also ensures that at least one write always occurs to AADLEN,
1152
// zero-extending it and allowing AADLEN64 to be used later.
1153
sub $32, AADLEN
1154
jl .Laad_loop_1x_done
1155
vmovdqu8 OFFSETOFEND_H_POWERS-32(KEY), H_POW1 // [H^2, H^1]
1156
.Laad_loop_1x:
1157
vmovdqu (AAD), %ymm0
1158
vpshufb BSWAP_MASK, %ymm0, %ymm0
1159
vpxor %ymm0, GHASH_ACC, GHASH_ACC
1160
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1161
%ymm0, %ymm1, %ymm2
1162
vextracti128 $1, GHASH_ACC, %xmm0
1163
vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM
1164
add $32, AAD
1165
sub $32, AADLEN
1166
jge .Laad_loop_1x
1167
.Laad_loop_1x_done:
1168
add $32, AADLEN
1169
jz .Laad_done
1170
1171
// Update GHASH with the remaining 1 <= AADLEN < 32 bytes of AAD.
1172
mov $-1, %eax
1173
bzhi AADLEN, %eax, %eax
1174
kmovd %eax, %k1
1175
vmovdqu8 (AAD), %ymm0{%k1}{z}
1176
neg AADLEN64
1177
and $~15, AADLEN64 // -round_up(AADLEN, 16)
1178
vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
1179
vpshufb BSWAP_MASK, %ymm0, %ymm0
1180
vpxor %ymm0, GHASH_ACC, GHASH_ACC
1181
_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1182
%ymm0, %ymm1, %ymm2
1183
vextracti128 $1, GHASH_ACC, %xmm0
1184
vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM
1185
1186
.Laad_done:
1187
// Store the updated GHASH accumulator back to memory.
1188
vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
1189
1190
vzeroupper // This is needed after using ymm or zmm registers.
1191
RET
1192
SYM_FUNC_END(aes_gcm_aad_update_vaes_avx10)
1193
1194
SYM_FUNC_START(aes_gcm_enc_final_vaes_avx10)
1195
_aes_gcm_final 1
1196
SYM_FUNC_END(aes_gcm_enc_final_vaes_avx10)
1197
SYM_FUNC_START(aes_gcm_dec_final_vaes_avx10)
1198
_aes_gcm_final 0
1199
SYM_FUNC_END(aes_gcm_dec_final_vaes_avx10)
1200
1201