Path: blob/master/arch/x86/crypto/aes-gcm-avx10-x86_64.S
26451 views
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */1//2// VAES and VPCLMULQDQ optimized AES-GCM for x86_643//4// Copyright 2024 Google LLC5//6// Author: Eric Biggers <ebiggers@google.com>7//8//------------------------------------------------------------------------------9//10// This file is dual-licensed, meaning that you can use it under your choice of11// either of the following two licenses:12//13// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy14// of the License at15//16// http://www.apache.org/licenses/LICENSE-2.017//18// Unless required by applicable law or agreed to in writing, software19// distributed under the License is distributed on an "AS IS" BASIS,20// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.21// See the License for the specific language governing permissions and22// limitations under the License.23//24// or25//26// Redistribution and use in source and binary forms, with or without27// modification, are permitted provided that the following conditions are met:28//29// 1. Redistributions of source code must retain the above copyright notice,30// this list of conditions and the following disclaimer.31//32// 2. Redistributions in binary form must reproduce the above copyright33// notice, this list of conditions and the following disclaimer in the34// documentation and/or other materials provided with the distribution.35//36// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"37// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE38// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE39// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE40// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR41// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF42// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS43// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN44// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)45// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE46// POSSIBILITY OF SUCH DAMAGE.47//48//------------------------------------------------------------------------------49//50// This file implements AES-GCM (Galois/Counter Mode) for x86_64 CPUs that51// support VAES (vector AES), VPCLMULQDQ (vector carryless multiplication), and52// either AVX512 or AVX10. Some of the functions, notably the encryption and53// decryption update functions which are the most performance-critical, are54// provided in two variants generated from a macro: one using 256-bit vectors55// (suffix: vaes_avx10_256) and one using 512-bit vectors (vaes_avx10_512). The56// other, "shared" functions (vaes_avx10) use at most 256-bit vectors.57//58// The functions that use 512-bit vectors are intended for CPUs that support59// 512-bit vectors *and* where using them doesn't cause significant60// downclocking. They require the following CPU features:61//62// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/512)63//64// The other functions require the following CPU features:65//66// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/256)67//68// All functions use the "System V" ABI. The Windows ABI is not supported.69//70// Note that we use "avx10" in the names of the functions as a shorthand to71// really mean "AVX10 or a certain set of AVX512 features". Due to Intel's72// introduction of AVX512 and then its replacement by AVX10, there doesn't seem73// to be a simple way to name things that makes sense on all CPUs.74//75// Note that the macros that support both 256-bit and 512-bit vectors could76// fairly easily be changed to support 128-bit too. However, this would *not*77// be sufficient to allow the code to run on CPUs without AVX512 or AVX10,78// because the code heavily uses several features of these extensions other than79// the vector length: the increase in the number of SIMD registers from 16 to80// 32, masking support, and new instructions such as vpternlogd (which can do a81// three-argument XOR). These features are very useful for AES-GCM.8283#include <linux/linkage.h>8485.section .rodata86.p2align 68788// A shuffle mask that reflects the bytes of 16-byte blocks89.Lbswap_mask:90.octa 0x000102030405060708090a0b0c0d0e0f9192// This is the GHASH reducing polynomial without its constant term, i.e.93// x^128 + x^7 + x^2 + x, represented using the backwards mapping94// between bits and polynomial coefficients.95//96// Alternatively, it can be interpreted as the naturally-ordered97// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the98// "reversed" GHASH reducing polynomial without its x^128 term.99.Lgfpoly:100.octa 0xc2000000000000000000000000000001101102// Same as above, but with the (1 << 64) bit set.103.Lgfpoly_and_internal_carrybit:104.octa 0xc2000000000000010000000000000001105106// The below constants are used for incrementing the counter blocks.107// ctr_pattern points to the four 128-bit values [0, 1, 2, 3].108// inc_2blocks and inc_4blocks point to the single 128-bit values 2 and109// 4. Note that the same '2' is reused in ctr_pattern and inc_2blocks.110.Lctr_pattern:111.octa 0112.octa 1113.Linc_2blocks:114.octa 2115.octa 3116.Linc_4blocks:117.octa 4118119// Number of powers of the hash key stored in the key struct. The powers are120// stored from highest (H^NUM_H_POWERS) to lowest (H^1).121#define NUM_H_POWERS 16122123// Offset to AES key length (in bytes) in the key struct124#define OFFSETOF_AESKEYLEN 480125126// Offset to start of hash key powers array in the key struct127#define OFFSETOF_H_POWERS 512128129// Offset to end of hash key powers array in the key struct.130//131// This is immediately followed by three zeroized padding blocks, which are132// included so that partial vectors can be handled more easily. E.g. if VL=64133// and two blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most134// padding blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.135#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))136137.text138139// Set the vector length in bytes. This sets the VL variable and defines140// register aliases V0-V31 that map to the ymm or zmm registers.141.macro _set_veclen vl142.set VL, \vl143.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \14416,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31145.if VL == 32146.set V\i, %ymm\i147.elseif VL == 64148.set V\i, %zmm\i149.else150.error "Unsupported vector length"151.endif152.endr153.endm154155// The _ghash_mul_step macro does one step of GHASH multiplication of the156// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the157// reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the158// same size as \a and \b. To complete all steps, this must invoked with \i=0159// through \i=9. The division into steps allows users of this macro to160// optionally interleave the computation with other instructions. Users of this161// macro must preserve the parameter registers across steps.162//163// The multiplications are done in GHASH's representation of the finite field164// GF(2^128). Elements of GF(2^128) are represented as binary polynomials165// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial166// G. The GCM specification uses G = x^128 + x^7 + x^2 + x + 1. Addition is167// just XOR, while multiplication is more complex and has two parts: (a) do168// carryless multiplication of two 128-bit input polynomials to get a 256-bit169// intermediate product polynomial, and (b) reduce the intermediate product to170// 128 bits by adding multiples of G that cancel out terms in it. (Adding171// multiples of G doesn't change which field element the polynomial represents.)172//173// Unfortunately, the GCM specification maps bits to/from polynomial174// coefficients backwards from the natural order. In each byte it specifies the175// highest bit to be the lowest order polynomial coefficient, *not* the highest!176// This makes it nontrivial to work with the GHASH polynomials. We could177// reflect the bits, but x86 doesn't have an instruction that does that.178//179// Instead, we operate on the values without bit-reflecting them. This *mostly*180// just works, since XOR and carryless multiplication are symmetric with respect181// to bit order, but it has some consequences. First, due to GHASH's byte182// order, by skipping bit reflection, *byte* reflection becomes necessary to183// give the polynomial terms a consistent order. E.g., considering an N-bit184// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0185// through N-1 of the byte-reflected value represent the coefficients of x^(N-1)186// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value187// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked188// with. Fortunately, x86's vpshufb instruction can do byte reflection.189//190// Second, forgoing the bit reflection causes an extra multiple of x (still191// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each192// multiplication. This is because an M-bit by N-bit carryless multiplication193// really produces a (M+N-1)-bit product, but in practice it's zero-extended to194// M+N bits. In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits195// to polynomial coefficients backwards, this zero-extension actually changes196// the product by introducing an extra factor of x. Therefore, users of this197// macro must ensure that one of the inputs has an extra factor of x^-1, i.e.198// the multiplicative inverse of x, to cancel out the extra x.199//200// Third, the backwards coefficients convention is just confusing to work with,201// since it makes "low" and "high" in the polynomial math mean the opposite of202// their normal meaning in computer programming. This can be solved by using an203// alternative interpretation: the polynomial coefficients are understood to be204// in the natural order, and the multiplication is actually \a * \b * x^-128 mod205// x^128 + x^127 + x^126 + x^121 + 1. This doesn't change the inputs, outputs,206// or the implementation at all; it just changes the mathematical interpretation207// of what each instruction is doing. Starting from here, we'll use this208// alternative interpretation, as it's easier to understand the code that way.209//210// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>211// 128-bit carryless multiplication, so we break the 128 x 128 multiplication212// into parts as follows (the _L and _H suffixes denote low and high 64 bits):213//214// LO = a_L * b_L215// MI = (a_L * b_H) + (a_H * b_L)216// HI = a_H * b_H217//218// The 256-bit product is x^128*HI + x^64*MI + LO. LO, MI, and HI are 128-bit.219// Note that MI "overlaps" with LO and HI. We don't consolidate MI into LO and220// HI right away, since the way the reduction works makes that unnecessary.221//222// For the reduction, we cancel out the low 128 bits by adding multiples of G =223// x^128 + x^127 + x^126 + x^121 + 1. This is done by two iterations, each of224// which cancels out the next lowest 64 bits. Consider a value x^64*A + B,225// where A and B are 128-bit. Adding B_L*G to that value gives:226//227// x^64*A + B + B_L*G228// = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)229// = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L230// = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L231// = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))232//233// So: if we sum A, B with its halves swapped, and the low half of B times x^63234// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the235// original value x^64*A + B. I.e., the low 64 bits got canceled out.236//237// We just need to apply this twice: first to fold LO into MI, and second to238// fold the updated MI into HI.239//240// The needed three-argument XORs are done using the vpternlogd instruction with241// immediate 0x96, since this is faster than two vpxord instructions.242//243// A potential optimization, assuming that b is fixed per-key (if a is fixed244// per-key it would work the other way around), is to use one iteration of the245// reduction described above to precompute a value c such that x^64*c = b mod G,246// and then multiply a_L by c (and implicitly by x^64) instead of by b:247//248// MI = (a_L * c_L) + (a_H * b_L)249// HI = (a_L * c_H) + (a_H * b_H)250//251// This would eliminate the LO part of the intermediate product, which would252// eliminate the need to fold LO into MI. This would save two instructions,253// including a vpclmulqdq. However, we currently don't use this optimization254// because it would require twice as many per-key precomputed values.255//256// Using Karatsuba multiplication instead of "schoolbook" multiplication257// similarly would save a vpclmulqdq but does not seem to be worth it.258.macro _ghash_mul_step i, a, b, dst, gfpoly, t0, t1, t2259.if \i == 0260vpclmulqdq $0x00, \a, \b, \t0 // LO = a_L * b_L261vpclmulqdq $0x01, \a, \b, \t1 // MI_0 = a_L * b_H262.elseif \i == 1263vpclmulqdq $0x10, \a, \b, \t2 // MI_1 = a_H * b_L264.elseif \i == 2265vpxord \t2, \t1, \t1 // MI = MI_0 + MI_1266.elseif \i == 3267vpclmulqdq $0x01, \t0, \gfpoly, \t2 // LO_L*(x^63 + x^62 + x^57)268.elseif \i == 4269vpshufd $0x4e, \t0, \t0 // Swap halves of LO270.elseif \i == 5271vpternlogd $0x96, \t2, \t0, \t1 // Fold LO into MI272.elseif \i == 6273vpclmulqdq $0x11, \a, \b, \dst // HI = a_H * b_H274.elseif \i == 7275vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)276.elseif \i == 8277vpshufd $0x4e, \t1, \t1 // Swap halves of MI278.elseif \i == 9279vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI280.endif281.endm282283// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store284// the reduced products in \dst. See _ghash_mul_step for full explanation.285.macro _ghash_mul a, b, dst, gfpoly, t0, t1, t2286.irp i, 0,1,2,3,4,5,6,7,8,9287_ghash_mul_step \i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2288.endr289.endm290291// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the292// *unreduced* products to \lo, \mi, and \hi.293.macro _ghash_mul_noreduce a, b, lo, mi, hi, t0, t1, t2, t3294vpclmulqdq $0x00, \a, \b, \t0 // a_L * b_L295vpclmulqdq $0x01, \a, \b, \t1 // a_L * b_H296vpclmulqdq $0x10, \a, \b, \t2 // a_H * b_L297vpclmulqdq $0x11, \a, \b, \t3 // a_H * b_H298vpxord \t0, \lo, \lo299vpternlogd $0x96, \t2, \t1, \mi300vpxord \t3, \hi, \hi301.endm302303// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit304// reduced products in \hi. See _ghash_mul_step for explanation of reduction.305.macro _ghash_reduce lo, mi, hi, gfpoly, t0306vpclmulqdq $0x01, \lo, \gfpoly, \t0307vpshufd $0x4e, \lo, \lo308vpternlogd $0x96, \t0, \lo, \mi309vpclmulqdq $0x01, \mi, \gfpoly, \t0310vpshufd $0x4e, \mi, \mi311vpternlogd $0x96, \t0, \mi, \hi312.endm313314// void aes_gcm_precompute_##suffix(struct aes_gcm_key_avx10 *key);315//316// Given the expanded AES key |key->aes_key|, this function derives the GHASH317// subkey and initializes |key->ghash_key_powers| with powers of it.318//319// The number of key powers initialized is NUM_H_POWERS, and they are stored in320// the order H^NUM_H_POWERS to H^1. The zeroized padding blocks after the key321// powers themselves are also initialized.322//323// This macro supports both VL=32 and VL=64. _set_veclen must have been invoked324// with the desired length. In the VL=32 case, the function computes twice as325// many key powers than are actually used by the VL=32 GCM update functions.326// This is done to keep the key format the same regardless of vector length.327.macro _aes_gcm_precompute328329// Function arguments330.set KEY, %rdi331332// Additional local variables. V0-V2 and %rax are used as temporaries.333.set POWERS_PTR, %rsi334.set RNDKEYLAST_PTR, %rdx335.set H_CUR, V3336.set H_CUR_YMM, %ymm3337.set H_CUR_XMM, %xmm3338.set H_INC, V4339.set H_INC_YMM, %ymm4340.set H_INC_XMM, %xmm4341.set GFPOLY, V5342.set GFPOLY_YMM, %ymm5343.set GFPOLY_XMM, %xmm5344345// Get pointer to lowest set of key powers (located at end of array).346lea OFFSETOFEND_H_POWERS-VL(KEY), POWERS_PTR347348// Encrypt an all-zeroes block to get the raw hash subkey.349movl OFFSETOF_AESKEYLEN(KEY), %eax350lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR351vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block352add $16, KEY3531:354vaesenc (KEY), %xmm0, %xmm0355add $16, KEY356cmp KEY, RNDKEYLAST_PTR357jne 1b358vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0359360// Reflect the bytes of the raw hash subkey.361vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM362363// Zeroize the padding blocks.364vpxor %xmm0, %xmm0, %xmm0365vmovdqu %ymm0, VL(POWERS_PTR)366vmovdqu %xmm0, VL+2*16(POWERS_PTR)367368// Finish preprocessing the first key power, H^1. Since this GHASH369// implementation operates directly on values with the backwards bit370// order specified by the GCM standard, it's necessary to preprocess the371// raw key as follows. First, reflect its bytes. Second, multiply it372// by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards373// interpretation of polynomial coefficients), which can also be374// interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121375// + 1 using the alternative, natural interpretation of polynomial376// coefficients. For details, see the comment above _ghash_mul_step.377//378// Either way, for the multiplication the concrete operation performed379// is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2380// << 120) | 1 if a 1 bit was carried out. However, there's no 128-bit381// wide shift instruction, so instead double each of the two 64-bit382// halves and incorporate the internal carry bit into the value XOR'd.383vpshufd $0xd3, H_CUR_XMM, %xmm0384vpsrad $31, %xmm0, %xmm0385vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM386// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit387vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM388389// Load the gfpoly constant.390vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY391392// Square H^1 to get H^2.393//394// Note that as with H^1, all higher key powers also need an extra395// factor of x^-1 (or x using the natural interpretation). Nothing396// special needs to be done to make this happen, though: H^1 * H^1 would397// end up with two factors of x^-1, but the multiplication consumes one.398// So the product H^2 ends up with the desired one factor of x^-1.399_ghash_mul H_CUR_XMM, H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, \400%xmm0, %xmm1, %xmm2401402// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].403vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM404vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM405406.if VL == 64407// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].408_ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \409%ymm0, %ymm1, %ymm2410vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR411vshufi64x2 $0, H_INC, H_INC, H_INC412.endif413414// Store the lowest set of key powers.415vmovdqu8 H_CUR, (POWERS_PTR)416417// Compute and store the remaining key powers. With VL=32, repeatedly418// multiply [H^(i+1), H^i] by [H^2, H^2] to get [H^(i+3), H^(i+2)].419// With VL=64, repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by420// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].421mov $(NUM_H_POWERS*16/VL) - 1, %eax422.Lprecompute_next\@:423sub $VL, POWERS_PTR424_ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, V0, V1, V2425vmovdqu8 H_CUR, (POWERS_PTR)426dec %eax427jnz .Lprecompute_next\@428429vzeroupper // This is needed after using ymm or zmm registers.430RET431.endm432433// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store434// the result in \dst_xmm. This implicitly zeroizes the other lanes of dst.435.macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm436vextracti32x4 $1, \src, \t0_xmm437.if VL == 32438vpxord \t0_xmm, \src_xmm, \dst_xmm439.elseif VL == 64440vextracti32x4 $2, \src, \t1_xmm441vextracti32x4 $3, \src, \t2_xmm442vpxord \t0_xmm, \src_xmm, \dst_xmm443vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm444.else445.error "Unsupported vector length"446.endif447.endm448449// Do one step of the GHASH update of the data blocks given in the vector450// registers GHASHDATA[0-3]. \i specifies the step to do, 0 through 9. The451// division into steps allows users of this macro to optionally interleave the452// computation with other instructions. This macro uses the vector register453// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;454// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and455// GHASHTMP[0-2] as temporaries. This macro handles the byte-reflection of the456// data blocks. The parameter registers must be preserved across steps.457//458// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +459// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the460// operations are vectorized operations on vectors of 16-byte blocks. E.g.,461// with VL=32 there are 2 blocks per vector and the vectorized terms correspond462// to the following non-vectorized terms:463//464// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^8*(blk0 + GHASH_ACC_XMM) and H^7*(blk1 + 0)465// H_POW3*GHASHDATA1 => H^6*blk2 and H^5*blk3466// H_POW2*GHASHDATA2 => H^4*blk4 and H^3*blk5467// H_POW1*GHASHDATA3 => H^2*blk6 and H^1*blk7468//469// With VL=64, we use 4 blocks/vector, H^16 through H^1, and blk0 through blk15.470//471// More concretely, this code does:472// - Do vectorized "schoolbook" multiplications to compute the intermediate473// 256-bit product of each block and its corresponding hash key power.474// There are 4*VL/16 of these intermediate products.475// - Sum (XOR) the intermediate 256-bit products across vectors. This leaves476// VL/16 256-bit intermediate values.477// - Do a vectorized reduction of these 256-bit intermediate values to478// 128-bits each. This leaves VL/16 128-bit intermediate values.479// - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.480//481// See _ghash_mul_step for the full explanation of the operations performed for482// each individual finite field multiplication and reduction.483.macro _ghash_step_4x i484.if \i == 0485vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0486vpxord GHASH_ACC, GHASHDATA0, GHASHDATA0487vpshufb BSWAP_MASK, GHASHDATA1, GHASHDATA1488vpshufb BSWAP_MASK, GHASHDATA2, GHASHDATA2489.elseif \i == 1490vpshufb BSWAP_MASK, GHASHDATA3, GHASHDATA3491vpclmulqdq $0x00, H_POW4, GHASHDATA0, GHASH_ACC // LO_0492vpclmulqdq $0x00, H_POW3, GHASHDATA1, GHASHTMP0 // LO_1493vpclmulqdq $0x00, H_POW2, GHASHDATA2, GHASHTMP1 // LO_2494.elseif \i == 2495vpxord GHASHTMP0, GHASH_ACC, GHASH_ACC // sum(LO_{1,0})496vpclmulqdq $0x00, H_POW1, GHASHDATA3, GHASHTMP2 // LO_3497vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC // LO = sum(LO_{3,2,1,0})498vpclmulqdq $0x01, H_POW4, GHASHDATA0, GHASHTMP0 // MI_0499.elseif \i == 3500vpclmulqdq $0x01, H_POW3, GHASHDATA1, GHASHTMP1 // MI_1501vpclmulqdq $0x01, H_POW2, GHASHDATA2, GHASHTMP2 // MI_2502vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{2,1,0})503vpclmulqdq $0x01, H_POW1, GHASHDATA3, GHASHTMP1 // MI_3504.elseif \i == 4505vpclmulqdq $0x10, H_POW4, GHASHDATA0, GHASHTMP2 // MI_4506vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{4,3,2,1,0})507vpclmulqdq $0x10, H_POW3, GHASHDATA1, GHASHTMP1 // MI_5508vpclmulqdq $0x10, H_POW2, GHASHDATA2, GHASHTMP2 // MI_6509.elseif \i == 5510vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{6,5,4,3,2,1,0})511vpclmulqdq $0x01, GHASH_ACC, GFPOLY, GHASHTMP2 // LO_L*(x^63 + x^62 + x^57)512vpclmulqdq $0x10, H_POW1, GHASHDATA3, GHASHTMP1 // MI_7513vpxord GHASHTMP1, GHASHTMP0, GHASHTMP0 // MI = sum(MI_{7,6,5,4,3,2,1,0})514.elseif \i == 6515vpshufd $0x4e, GHASH_ACC, GHASH_ACC // Swap halves of LO516vpclmulqdq $0x11, H_POW4, GHASHDATA0, GHASHDATA0 // HI_0517vpclmulqdq $0x11, H_POW3, GHASHDATA1, GHASHDATA1 // HI_1518vpclmulqdq $0x11, H_POW2, GHASHDATA2, GHASHDATA2 // HI_2519.elseif \i == 7520vpternlogd $0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0 // Fold LO into MI521vpclmulqdq $0x11, H_POW1, GHASHDATA3, GHASHDATA3 // HI_3522vpternlogd $0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})523vpclmulqdq $0x01, GHASHTMP0, GFPOLY, GHASHTMP1 // MI_L*(x^63 + x^62 + x^57)524.elseif \i == 8525vpxord GHASHDATA3, GHASHDATA0, GHASH_ACC // HI = sum(HI_{3,2,1,0})526vpshufd $0x4e, GHASHTMP0, GHASHTMP0 // Swap halves of MI527vpternlogd $0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC // Fold MI into HI528.elseif \i == 9529_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \530GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM531.endif532.endm533534// Do one non-last round of AES encryption on the counter blocks in V0-V3 using535// the round key that has been broadcast to all 128-bit lanes of \round_key.536.macro _vaesenc_4x round_key537vaesenc \round_key, V0, V0538vaesenc \round_key, V1, V1539vaesenc \round_key, V2, V2540vaesenc \round_key, V3, V3541.endm542543// Start the AES encryption of four vectors of counter blocks.544.macro _ctr_begin_4x545546// Increment LE_CTR four times to generate four vectors of little-endian547// counter blocks, swap each to big-endian, and store them in V0-V3.548vpshufb BSWAP_MASK, LE_CTR, V0549vpaddd LE_CTR_INC, LE_CTR, LE_CTR550vpshufb BSWAP_MASK, LE_CTR, V1551vpaddd LE_CTR_INC, LE_CTR, LE_CTR552vpshufb BSWAP_MASK, LE_CTR, V2553vpaddd LE_CTR_INC, LE_CTR, LE_CTR554vpshufb BSWAP_MASK, LE_CTR, V3555vpaddd LE_CTR_INC, LE_CTR, LE_CTR556557// AES "round zero": XOR in the zero-th round key.558vpxord RNDKEY0, V0, V0559vpxord RNDKEY0, V1, V1560vpxord RNDKEY0, V2, V2561vpxord RNDKEY0, V3, V3562.endm563564// Do the last AES round for four vectors of counter blocks V0-V3, XOR source565// data with the resulting keystream, and write the result to DST and566// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)567.macro _aesenclast_and_xor_4x568// XOR the source data with the last round key, saving the result in569// GHASHDATA[0-3]. This reduces latency by taking advantage of the570// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).571vpxord 0*VL(SRC), RNDKEYLAST, GHASHDATA0572vpxord 1*VL(SRC), RNDKEYLAST, GHASHDATA1573vpxord 2*VL(SRC), RNDKEYLAST, GHASHDATA2574vpxord 3*VL(SRC), RNDKEYLAST, GHASHDATA3575576// Do the last AES round. This handles the XOR with the source data577// too, as per the optimization described above.578vaesenclast GHASHDATA0, V0, GHASHDATA0579vaesenclast GHASHDATA1, V1, GHASHDATA1580vaesenclast GHASHDATA2, V2, GHASHDATA2581vaesenclast GHASHDATA3, V3, GHASHDATA3582583// Store the en/decrypted data to DST.584vmovdqu8 GHASHDATA0, 0*VL(DST)585vmovdqu8 GHASHDATA1, 1*VL(DST)586vmovdqu8 GHASHDATA2, 2*VL(DST)587vmovdqu8 GHASHDATA3, 3*VL(DST)588.endm589590// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,591// const u32 le_ctr[4], u8 ghash_acc[16],592// const u8 *src, u8 *dst, int datalen);593//594// This macro generates a GCM encryption or decryption update function with the595// above prototype (with \enc selecting which one). This macro supports both596// VL=32 and VL=64. _set_veclen must have been invoked with the desired length.597//598// This function computes the next portion of the CTR keystream, XOR's it with599// |datalen| bytes from |src|, and writes the resulting encrypted or decrypted600// data to |dst|. It also updates the GHASH accumulator |ghash_acc| using the601// next |datalen| ciphertext bytes.602//603// |datalen| must be a multiple of 16, except on the last call where it can be604// any length. The caller must do any buffering needed to ensure this. Both605// in-place and out-of-place en/decryption are supported.606//607// |le_ctr| must give the current counter in little-endian format. For a new608// message, the low word of the counter must be 2. This function loads the609// counter from |le_ctr| and increments the loaded counter as needed, but it610// does *not* store the updated counter back to |le_ctr|. The caller must611// update |le_ctr| if any more data segments follow. Internally, only the low612// 32-bit word of the counter is incremented, following the GCM standard.613.macro _aes_gcm_update enc614615// Function arguments616.set KEY, %rdi617.set LE_CTR_PTR, %rsi618.set GHASH_ACC_PTR, %rdx619.set SRC, %rcx620.set DST, %r8621.set DATALEN, %r9d622.set DATALEN64, %r9 // Zero-extend DATALEN before using!623624// Additional local variables625626// %rax and %k1 are used as temporary registers. LE_CTR_PTR is also627// available as a temporary register after the counter is loaded.628629// AES key length in bytes630.set AESKEYLEN, %r10d631.set AESKEYLEN64, %r10632633// Pointer to the last AES round key for the chosen AES variant634.set RNDKEYLAST_PTR, %r11635636// In the main loop, V0-V3 are used as AES input and output. Elsewhere637// they are used as temporary registers.638639// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.640.set GHASHDATA0, V4641.set GHASHDATA0_XMM, %xmm4642.set GHASHDATA1, V5643.set GHASHDATA1_XMM, %xmm5644.set GHASHDATA2, V6645.set GHASHDATA2_XMM, %xmm6646.set GHASHDATA3, V7647648// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values649// using vpshufb, copied to all 128-bit lanes.650.set BSWAP_MASK, V8651652// RNDKEY temporarily holds the next AES round key.653.set RNDKEY, V9654655// GHASH_ACC is the accumulator variable for GHASH. When fully reduced,656// only the lowest 128-bit lane can be nonzero. When not fully reduced,657// more than one lane may be used, and they need to be XOR'd together.658.set GHASH_ACC, V10659.set GHASH_ACC_XMM, %xmm10660661// LE_CTR_INC is the vector of 32-bit words that need to be added to a662// vector of little-endian counter blocks to advance it forwards.663.set LE_CTR_INC, V11664665// LE_CTR contains the next set of little-endian counter blocks.666.set LE_CTR, V12667668// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,669// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,670// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.671.set RNDKEY0, V13672.set RNDKEYLAST, V14673.set RNDKEY_M9, V15674.set RNDKEY_M8, V16675.set RNDKEY_M7, V17676.set RNDKEY_M6, V18677.set RNDKEY_M5, V19678.set RNDKEY_M4, V20679.set RNDKEY_M3, V21680.set RNDKEY_M2, V22681.set RNDKEY_M1, V23682683// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These684// cannot coincide with anything used for AES encryption, since for685// performance reasons GHASH and AES encryption are interleaved.686.set GHASHTMP0, V24687.set GHASHTMP1, V25688.set GHASHTMP2, V26689690// H_POW[4-1] contain the powers of the hash key H^(4*VL/16)...H^1. The691// descending numbering reflects the order of the key powers.692.set H_POW4, V27693.set H_POW3, V28694.set H_POW2, V29695.set H_POW1, V30696697// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.698.set GFPOLY, V31699700// Load some constants.701vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK702vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY703704// Load the GHASH accumulator and the starting counter.705vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM706vbroadcasti32x4 (LE_CTR_PTR), LE_CTR707708// Load the AES key length in bytes.709movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN710711// Make RNDKEYLAST_PTR point to the last AES round key. This is the712// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256713// respectively. Then load the zero-th and last round keys.714lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR715vbroadcasti32x4 (KEY), RNDKEY0716vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST717718// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.719vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR720721// Initialize LE_CTR_INC to contain VL/16 in all 128-bit lanes.722.if VL == 32723vbroadcasti32x4 .Linc_2blocks(%rip), LE_CTR_INC724.elseif VL == 64725vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC726.else727.error "Unsupported vector length"728.endif729730// If there are at least 4*VL bytes of data, then continue into the loop731// that processes 4*VL bytes of data at a time. Otherwise skip it.732//733// Pre-subtracting 4*VL from DATALEN saves an instruction from the main734// loop and also ensures that at least one write always occurs to735// DATALEN, zero-extending it and allowing DATALEN64 to be used later.736add $-4*VL, DATALEN // shorter than 'sub 4*VL' when VL=32737jl .Lcrypt_loop_4x_done\@738739// Load powers of the hash key.740vmovdqu8 OFFSETOFEND_H_POWERS-4*VL(KEY), H_POW4741vmovdqu8 OFFSETOFEND_H_POWERS-3*VL(KEY), H_POW3742vmovdqu8 OFFSETOFEND_H_POWERS-2*VL(KEY), H_POW2743vmovdqu8 OFFSETOFEND_H_POWERS-1*VL(KEY), H_POW1744745// Main loop: en/decrypt and hash 4 vectors at a time.746//747// When possible, interleave the AES encryption of the counter blocks748// with the GHASH update of the ciphertext blocks. This improves749// performance on many CPUs because the execution ports used by the VAES750// instructions often differ from those used by vpclmulqdq and other751// instructions used in GHASH. For example, many Intel CPUs dispatch752// vaesenc to ports 0 and 1 and vpclmulqdq to port 5.753//754// The interleaving is easiest to do during decryption, since during755// decryption the ciphertext blocks are immediately available. For756// encryption, instead encrypt the first set of blocks, then hash those757// blocks while encrypting the next set of blocks, repeat that as758// needed, and finally hash the last set of blocks.759760.if \enc761// Encrypt the first 4 vectors of plaintext blocks. Leave the resulting762// ciphertext in GHASHDATA[0-3] for GHASH.763_ctr_begin_4x764lea 16(KEY), %rax7651:766vbroadcasti32x4 (%rax), RNDKEY767_vaesenc_4x RNDKEY768add $16, %rax769cmp %rax, RNDKEYLAST_PTR770jne 1b771_aesenclast_and_xor_4x772sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32773sub $-4*VL, DST774add $-4*VL, DATALEN775jl .Lghash_last_ciphertext_4x\@776.endif777778// Cache as many additional AES round keys as possible.779.irp i, 9,8,7,6,5,4,3,2,1780vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i781.endr782783.Lcrypt_loop_4x\@:784785// If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If786// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.787.if !\enc788vmovdqu8 0*VL(SRC), GHASHDATA0789vmovdqu8 1*VL(SRC), GHASHDATA1790vmovdqu8 2*VL(SRC), GHASHDATA2791vmovdqu8 3*VL(SRC), GHASHDATA3792.endif793794// Start the AES encryption of the counter blocks.795_ctr_begin_4x796cmp $24, AESKEYLEN797jl 128f // AES-128?798je 192f // AES-192?799// AES-256800vbroadcasti32x4 -13*16(RNDKEYLAST_PTR), RNDKEY801_vaesenc_4x RNDKEY802vbroadcasti32x4 -12*16(RNDKEYLAST_PTR), RNDKEY803_vaesenc_4x RNDKEY804192:805vbroadcasti32x4 -11*16(RNDKEYLAST_PTR), RNDKEY806_vaesenc_4x RNDKEY807vbroadcasti32x4 -10*16(RNDKEYLAST_PTR), RNDKEY808_vaesenc_4x RNDKEY809128:810811// Finish the AES encryption of the counter blocks in V0-V3, interleaved812// with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].813.irp i, 9,8,7,6,5,4,3,2,1814_ghash_step_4x (9 - \i)815_vaesenc_4x RNDKEY_M\i816.endr817_ghash_step_4x 9818_aesenclast_and_xor_4x819sub $-4*VL, SRC // shorter than 'add 4*VL' when VL=32820sub $-4*VL, DST821add $-4*VL, DATALEN822jge .Lcrypt_loop_4x\@823824.if \enc825.Lghash_last_ciphertext_4x\@:826// Update GHASH with the last set of ciphertext blocks.827.irp i, 0,1,2,3,4,5,6,7,8,9828_ghash_step_4x \i829.endr830.endif831832.Lcrypt_loop_4x_done\@:833834// Undo the extra subtraction by 4*VL and check whether data remains.835sub $-4*VL, DATALEN // shorter than 'add 4*VL' when VL=32836jz .Ldone\@837838// The data length isn't a multiple of 4*VL. Process the remaining data839// of length 1 <= DATALEN < 4*VL, up to one vector (VL bytes) at a time.840// Going one vector at a time may seem inefficient compared to having841// separate code paths for each possible number of vectors remaining.842// However, using a loop keeps the code size down, and it performs843// surprising well; modern CPUs will start executing the next iteration844// before the previous one finishes and also predict the number of loop845// iterations. For a similar reason, we roll up the AES rounds.846//847// On the last iteration, the remaining length may be less than VL.848// Handle this using masking.849//850// Since there are enough key powers available for all remaining data,851// there is no need to do a GHASH reduction after each iteration.852// Instead, multiply each remaining block by its own key power, and only853// do a GHASH reduction at the very end.854855// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N856// is the number of blocks that remain.857.set POWERS_PTR, LE_CTR_PTR // LE_CTR_PTR is free to be reused.858mov DATALEN, %eax859neg %rax860and $~15, %rax // -round_up(DATALEN, 16)861lea OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR862863// Start collecting the unreduced GHASH intermediate value LO, MI, HI.864.set LO, GHASHDATA0865.set LO_XMM, GHASHDATA0_XMM866.set MI, GHASHDATA1867.set MI_XMM, GHASHDATA1_XMM868.set HI, GHASHDATA2869.set HI_XMM, GHASHDATA2_XMM870vpxor LO_XMM, LO_XMM, LO_XMM871vpxor MI_XMM, MI_XMM, MI_XMM872vpxor HI_XMM, HI_XMM, HI_XMM873874.Lcrypt_loop_1x\@:875876// Select the appropriate mask for this iteration: all 1's if877// DATALEN >= VL, otherwise DATALEN 1's. Do this branchlessly using the878// bzhi instruction from BMI2. (This relies on DATALEN <= 255.)879.if VL < 64880mov $-1, %eax881bzhi DATALEN, %eax, %eax882kmovd %eax, %k1883.else884mov $-1, %rax885bzhi DATALEN64, %rax, %rax886kmovq %rax, %k1887.endif888889// Encrypt a vector of counter blocks. This does not need to be masked.890vpshufb BSWAP_MASK, LE_CTR, V0891vpaddd LE_CTR_INC, LE_CTR, LE_CTR892vpxord RNDKEY0, V0, V0893lea 16(KEY), %rax8941:895vbroadcasti32x4 (%rax), RNDKEY896vaesenc RNDKEY, V0, V0897add $16, %rax898cmp %rax, RNDKEYLAST_PTR899jne 1b900vaesenclast RNDKEYLAST, V0, V0901902// XOR the data with the appropriate number of keystream bytes.903vmovdqu8 (SRC), V1{%k1}{z}904vpxord V1, V0, V0905vmovdqu8 V0, (DST){%k1}906907// Update GHASH with the ciphertext block(s), without reducing.908//909// In the case of DATALEN < VL, the ciphertext is zero-padded to VL.910// (If decrypting, it's done by the above masked load. If encrypting,911// it's done by the below masked register-to-register move.) Note that912// if DATALEN <= VL - 16, there will be additional padding beyond the913// padding of the last block specified by GHASH itself; i.e., there may914// be whole block(s) that get processed by the GHASH multiplication and915// reduction instructions but should not actually be included in the916// GHASH. However, any such blocks are all-zeroes, and the values that917// they're multiplied with are also all-zeroes. Therefore they just add918// 0 * 0 = 0 to the final GHASH result, which makes no difference.919vmovdqu8 (POWERS_PTR), H_POW1920.if \enc921vmovdqu8 V0, V1{%k1}{z}922.endif923vpshufb BSWAP_MASK, V1, V0924vpxord GHASH_ACC, V0, V0925_ghash_mul_noreduce H_POW1, V0, LO, MI, HI, GHASHDATA3, V1, V2, V3926vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM927928add $VL, POWERS_PTR929add $VL, SRC930add $VL, DST931sub $VL, DATALEN932jg .Lcrypt_loop_1x\@933934// Finally, do the GHASH reduction.935_ghash_reduce LO, MI, HI, GFPOLY, V0936_horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2937938.Ldone\@:939// Store the updated GHASH accumulator back to memory.940vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)941942vzeroupper // This is needed after using ymm or zmm registers.943RET944.endm945946// void aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,947// const u32 le_ctr[4], u8 ghash_acc[16],948// u64 total_aadlen, u64 total_datalen);949// bool aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,950// const u32 le_ctr[4],951// const u8 ghash_acc[16],952// u64 total_aadlen, u64 total_datalen,953// const u8 tag[16], int taglen);954//955// This macro generates one of the above two functions (with \enc selecting956// which one). Both functions finish computing the GCM authentication tag by957// updating GHASH with the lengths block and encrypting the GHASH accumulator.958// |total_aadlen| and |total_datalen| must be the total length of the additional959// authenticated data and the en/decrypted data in bytes, respectively.960//961// The encryption function then stores the full-length (16-byte) computed962// authentication tag to |ghash_acc|. The decryption function instead loads the963// expected authentication tag (the one that was transmitted) from the 16-byte964// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the965// computed tag in constant time, and returns true if and only if they match.966.macro _aes_gcm_final enc967968// Function arguments969.set KEY, %rdi970.set LE_CTR_PTR, %rsi971.set GHASH_ACC_PTR, %rdx972.set TOTAL_AADLEN, %rcx973.set TOTAL_DATALEN, %r8974.set TAG, %r9975.set TAGLEN, %r10d // Originally at 8(%rsp)976977// Additional local variables.978// %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.979.set AESKEYLEN, %r11d980.set AESKEYLEN64, %r11981.set GFPOLY, %xmm4982.set BSWAP_MASK, %xmm5983.set LE_CTR, %xmm6984.set GHASH_ACC, %xmm7985.set H_POW1, %xmm8986987// Load some constants.988vmovdqa .Lgfpoly(%rip), GFPOLY989vmovdqa .Lbswap_mask(%rip), BSWAP_MASK990991// Load the AES key length in bytes.992movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN993994// Set up a counter block with 1 in the low 32-bit word. This is the995// counter that produces the ciphertext needed to encrypt the auth tag.996// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.997vpblendd $0xe, (LE_CTR_PTR), GFPOLY, LE_CTR998999// Build the lengths block and XOR it with the GHASH accumulator.1000// Although the lengths block is defined as the AAD length followed by1001// the en/decrypted data length, both in big-endian byte order, a byte1002// reflection of the full block is needed because of the way we compute1003// GHASH (see _ghash_mul_step). By using little-endian values in the1004// opposite order, we avoid having to reflect any bytes here.1005vmovq TOTAL_DATALEN, %xmm01006vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm01007vpsllq $3, %xmm0, %xmm0 // Bytes to bits1008vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC10091010// Load the first hash key power (H^1), which is stored last.1011vmovdqu8 OFFSETOFEND_H_POWERS-16(KEY), H_POW110121013.if !\enc1014// Prepare a mask of TAGLEN one bits.1015movl 8(%rsp), TAGLEN1016mov $-1, %eax1017bzhi TAGLEN, %eax, %eax1018kmovd %eax, %k11019.endif10201021// Make %rax point to the last AES round key for the chosen AES variant.1022lea 6*16(KEY,AESKEYLEN64,4), %rax10231024// Start the AES encryption of the counter block by swapping the counter1025// block to big-endian and XOR-ing it with the zero-th AES round key.1026vpshufb BSWAP_MASK, LE_CTR, %xmm01027vpxor (KEY), %xmm0, %xmm010281029// Complete the AES encryption and multiply GHASH_ACC by H^1.1030// Interleave the AES and GHASH instructions to improve performance.1031cmp $24, AESKEYLEN1032jl 128f // AES-128?1033je 192f // AES-192?1034// AES-2561035vaesenc -13*16(%rax), %xmm0, %xmm01036vaesenc -12*16(%rax), %xmm0, %xmm01037192:1038vaesenc -11*16(%rax), %xmm0, %xmm01039vaesenc -10*16(%rax), %xmm0, %xmm01040128:1041.irp i, 0,1,2,3,4,5,6,7,81042_ghash_mul_step \i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \1043%xmm1, %xmm2, %xmm31044vaesenc (\i-9)*16(%rax), %xmm0, %xmm01045.endr1046_ghash_mul_step 9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \1047%xmm1, %xmm2, %xmm310481049// Undo the byte reflection of the GHASH accumulator.1050vpshufb BSWAP_MASK, GHASH_ACC, GHASH_ACC10511052// Do the last AES round and XOR the resulting keystream block with the1053// GHASH accumulator to produce the full computed authentication tag.1054//1055// Reduce latency by taking advantage of the property vaesenclast(key,1056// a) ^ b == vaesenclast(key ^ b, a). I.e., XOR GHASH_ACC into the last1057// round key, instead of XOR'ing the final AES output with GHASH_ACC.1058//1059// enc_final then returns the computed auth tag, while dec_final1060// compares it with the transmitted one and returns a bool. To compare1061// the tags, dec_final XORs them together and uses vptest to check1062// whether the result is all-zeroes. This should be constant-time.1063// dec_final applies the vaesenclast optimization to this additional1064// value XOR'd too, using vpternlogd to XOR the last round key, GHASH1065// accumulator, and transmitted auth tag together in one instruction.1066.if \enc1067vpxor (%rax), GHASH_ACC, %xmm11068vaesenclast %xmm1, %xmm0, GHASH_ACC1069vmovdqu GHASH_ACC, (GHASH_ACC_PTR)1070.else1071vmovdqu (TAG), %xmm11072vpternlogd $0x96, (%rax), GHASH_ACC, %xmm11073vaesenclast %xmm1, %xmm0, %xmm01074xor %eax, %eax1075vmovdqu8 %xmm0, %xmm0{%k1}{z} // Truncate to TAGLEN bytes1076vptest %xmm0, %xmm01077sete %al1078.endif1079// No need for vzeroupper here, since only used xmm registers were used.1080RET1081.endm10821083_set_veclen 321084SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_256)1085_aes_gcm_precompute1086SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_256)1087SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_256)1088_aes_gcm_update 11089SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_256)1090SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_256)1091_aes_gcm_update 01092SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_256)10931094_set_veclen 641095SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_512)1096_aes_gcm_precompute1097SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_512)1098SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_512)1099_aes_gcm_update 11100SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_512)1101SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_512)1102_aes_gcm_update 01103SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_512)11041105// void aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key,1106// u8 ghash_acc[16],1107// const u8 *aad, int aadlen);1108//1109// This function processes the AAD (Additional Authenticated Data) in GCM.1110// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the1111// data given by |aad| and |aadlen|. |key->ghash_key_powers| must have been1112// initialized. On the first call, |ghash_acc| must be all zeroes. |aadlen|1113// must be a multiple of 16, except on the last call where it can be any length.1114// The caller must do any buffering needed to ensure this.1115//1116// AES-GCM is almost always used with small amounts of AAD, less than 32 bytes.1117// Therefore, for AAD processing we currently only provide this implementation1118// which uses 256-bit vectors (ymm registers) and only has a 1x-wide loop. This1119// keeps the code size down, and it enables some micro-optimizations, e.g. using1120// VEX-coded instructions instead of EVEX-coded to save some instruction bytes.1121// To optimize for large amounts of AAD, we could implement a 4x-wide loop and1122// provide a version using 512-bit vectors, but that doesn't seem to be useful.1123SYM_FUNC_START(aes_gcm_aad_update_vaes_avx10)11241125// Function arguments1126.set KEY, %rdi1127.set GHASH_ACC_PTR, %rsi1128.set AAD, %rdx1129.set AADLEN, %ecx1130.set AADLEN64, %rcx // Zero-extend AADLEN before using!11311132// Additional local variables.1133// %rax, %ymm0-%ymm3, and %k1 are used as temporary registers.1134.set BSWAP_MASK, %ymm41135.set GFPOLY, %ymm51136.set GHASH_ACC, %ymm61137.set GHASH_ACC_XMM, %xmm61138.set H_POW1, %ymm711391140// Load some constants.1141vbroadcasti128 .Lbswap_mask(%rip), BSWAP_MASK1142vbroadcasti128 .Lgfpoly(%rip), GFPOLY11431144// Load the GHASH accumulator.1145vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM11461147// Update GHASH with 32 bytes of AAD at a time.1148//1149// Pre-subtracting 32 from AADLEN saves an instruction from the loop and1150// also ensures that at least one write always occurs to AADLEN,1151// zero-extending it and allowing AADLEN64 to be used later.1152sub $32, AADLEN1153jl .Laad_loop_1x_done1154vmovdqu8 OFFSETOFEND_H_POWERS-32(KEY), H_POW1 // [H^2, H^1]1155.Laad_loop_1x:1156vmovdqu (AAD), %ymm01157vpshufb BSWAP_MASK, %ymm0, %ymm01158vpxor %ymm0, GHASH_ACC, GHASH_ACC1159_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \1160%ymm0, %ymm1, %ymm21161vextracti128 $1, GHASH_ACC, %xmm01162vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM1163add $32, AAD1164sub $32, AADLEN1165jge .Laad_loop_1x1166.Laad_loop_1x_done:1167add $32, AADLEN1168jz .Laad_done11691170// Update GHASH with the remaining 1 <= AADLEN < 32 bytes of AAD.1171mov $-1, %eax1172bzhi AADLEN, %eax, %eax1173kmovd %eax, %k11174vmovdqu8 (AAD), %ymm0{%k1}{z}1175neg AADLEN641176and $~15, AADLEN64 // -round_up(AADLEN, 16)1177vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW11178vpshufb BSWAP_MASK, %ymm0, %ymm01179vpxor %ymm0, GHASH_ACC, GHASH_ACC1180_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \1181%ymm0, %ymm1, %ymm21182vextracti128 $1, GHASH_ACC, %xmm01183vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM11841185.Laad_done:1186// Store the updated GHASH accumulator back to memory.1187vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)11881189vzeroupper // This is needed after using ymm or zmm registers.1190RET1191SYM_FUNC_END(aes_gcm_aad_update_vaes_avx10)11921193SYM_FUNC_START(aes_gcm_enc_final_vaes_avx10)1194_aes_gcm_final 11195SYM_FUNC_END(aes_gcm_enc_final_vaes_avx10)1196SYM_FUNC_START(aes_gcm_dec_final_vaes_avx10)1197_aes_gcm_final 01198SYM_FUNC_END(aes_gcm_dec_final_vaes_avx10)119912001201