Path: blob/master/arch/x86/crypto/aes-gcm-vaes-avx512.S
38310 views
/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */1//2// AES-GCM implementation for x86_64 CPUs that support the following CPU3// features: VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI24//5// Copyright 2024 Google LLC6//7// Author: Eric Biggers <ebiggers@google.com>8//9//------------------------------------------------------------------------------10//11// This file is dual-licensed, meaning that you can use it under your choice of12// either of the following two licenses:13//14// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy15// of the License at16//17// http://www.apache.org/licenses/LICENSE-2.018//19// Unless required by applicable law or agreed to in writing, software20// distributed under the License is distributed on an "AS IS" BASIS,21// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.22// See the License for the specific language governing permissions and23// limitations under the License.24//25// or26//27// Redistribution and use in source and binary forms, with or without28// modification, are permitted provided that the following conditions are met:29//30// 1. Redistributions of source code must retain the above copyright notice,31// this list of conditions and the following disclaimer.32//33// 2. Redistributions in binary form must reproduce the above copyright34// notice, this list of conditions and the following disclaimer in the35// documentation and/or other materials provided with the distribution.36//37// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"38// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE39// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE40// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE41// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR42// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF43// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS44// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN45// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)46// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE47// POSSIBILITY OF SUCH DAMAGE.4849#include <linux/linkage.h>5051.section .rodata52.p2align 65354// A shuffle mask that reflects the bytes of 16-byte blocks55.Lbswap_mask:56.octa 0x000102030405060708090a0b0c0d0e0f5758// This is the GHASH reducing polynomial without its constant term, i.e.59// x^128 + x^7 + x^2 + x, represented using the backwards mapping60// between bits and polynomial coefficients.61//62// Alternatively, it can be interpreted as the naturally-ordered63// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the64// "reversed" GHASH reducing polynomial without its x^128 term.65.Lgfpoly:66.octa 0xc20000000000000000000000000000016768// Same as above, but with the (1 << 64) bit set.69.Lgfpoly_and_internal_carrybit:70.octa 0xc20000000000000100000000000000017172// Values needed to prepare the initial vector of counter blocks.73.Lctr_pattern:74.octa 075.octa 176.octa 277.octa 37879// The number of AES blocks per vector, as a 128-bit value.80.Linc_4blocks:81.octa 48283// Number of powers of the hash key stored in the key struct. The powers are84// stored from highest (H^NUM_H_POWERS) to lowest (H^1).85#define NUM_H_POWERS 168687// Offset to AES key length (in bytes) in the key struct88#define OFFSETOF_AESKEYLEN 4808990// Offset to start of hash key powers array in the key struct91#define OFFSETOF_H_POWERS 5129293// Offset to end of hash key powers array in the key struct.94//95// This is immediately followed by three zeroized padding blocks, which are96// included so that partial vectors can be handled more easily. E.g. if two97// blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most padding98// blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.99#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))100101.text102103// The _ghash_mul_step macro does one step of GHASH multiplication of the104// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the105// reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the106// same size as \a and \b. To complete all steps, this must invoked with \i=0107// through \i=9. The division into steps allows users of this macro to108// optionally interleave the computation with other instructions. Users of this109// macro must preserve the parameter registers across steps.110//111// The multiplications are done in GHASH's representation of the finite field112// GF(2^128). Elements of GF(2^128) are represented as binary polynomials113// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial114// G. The GCM specification uses G = x^128 + x^7 + x^2 + x + 1. Addition is115// just XOR, while multiplication is more complex and has two parts: (a) do116// carryless multiplication of two 128-bit input polynomials to get a 256-bit117// intermediate product polynomial, and (b) reduce the intermediate product to118// 128 bits by adding multiples of G that cancel out terms in it. (Adding119// multiples of G doesn't change which field element the polynomial represents.)120//121// Unfortunately, the GCM specification maps bits to/from polynomial122// coefficients backwards from the natural order. In each byte it specifies the123// highest bit to be the lowest order polynomial coefficient, *not* the highest!124// This makes it nontrivial to work with the GHASH polynomials. We could125// reflect the bits, but x86 doesn't have an instruction that does that.126//127// Instead, we operate on the values without bit-reflecting them. This *mostly*128// just works, since XOR and carryless multiplication are symmetric with respect129// to bit order, but it has some consequences. First, due to GHASH's byte130// order, by skipping bit reflection, *byte* reflection becomes necessary to131// give the polynomial terms a consistent order. E.g., considering an N-bit132// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0133// through N-1 of the byte-reflected value represent the coefficients of x^(N-1)134// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value135// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked136// with. Fortunately, x86's vpshufb instruction can do byte reflection.137//138// Second, forgoing the bit reflection causes an extra multiple of x (still139// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each140// multiplication. This is because an M-bit by N-bit carryless multiplication141// really produces a (M+N-1)-bit product, but in practice it's zero-extended to142// M+N bits. In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits143// to polynomial coefficients backwards, this zero-extension actually changes144// the product by introducing an extra factor of x. Therefore, users of this145// macro must ensure that one of the inputs has an extra factor of x^-1, i.e.146// the multiplicative inverse of x, to cancel out the extra x.147//148// Third, the backwards coefficients convention is just confusing to work with,149// since it makes "low" and "high" in the polynomial math mean the opposite of150// their normal meaning in computer programming. This can be solved by using an151// alternative interpretation: the polynomial coefficients are understood to be152// in the natural order, and the multiplication is actually \a * \b * x^-128 mod153// x^128 + x^127 + x^126 + x^121 + 1. This doesn't change the inputs, outputs,154// or the implementation at all; it just changes the mathematical interpretation155// of what each instruction is doing. Starting from here, we'll use this156// alternative interpretation, as it's easier to understand the code that way.157//158// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>159// 128-bit carryless multiplication, so we break the 128 x 128 multiplication160// into parts as follows (the _L and _H suffixes denote low and high 64 bits):161//162// LO = a_L * b_L163// MI = (a_L * b_H) + (a_H * b_L)164// HI = a_H * b_H165//166// The 256-bit product is x^128*HI + x^64*MI + LO. LO, MI, and HI are 128-bit.167// Note that MI "overlaps" with LO and HI. We don't consolidate MI into LO and168// HI right away, since the way the reduction works makes that unnecessary.169//170// For the reduction, we cancel out the low 128 bits by adding multiples of G =171// x^128 + x^127 + x^126 + x^121 + 1. This is done by two iterations, each of172// which cancels out the next lowest 64 bits. Consider a value x^64*A + B,173// where A and B are 128-bit. Adding B_L*G to that value gives:174//175// x^64*A + B + B_L*G176// = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)177// = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L178// = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L179// = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))180//181// So: if we sum A, B with its halves swapped, and the low half of B times x^63182// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the183// original value x^64*A + B. I.e., the low 64 bits got canceled out.184//185// We just need to apply this twice: first to fold LO into MI, and second to186// fold the updated MI into HI.187//188// The needed three-argument XORs are done using the vpternlogd instruction with189// immediate 0x96, since this is faster than two vpxord instructions.190//191// A potential optimization, assuming that b is fixed per-key (if a is fixed192// per-key it would work the other way around), is to use one iteration of the193// reduction described above to precompute a value c such that x^64*c = b mod G,194// and then multiply a_L by c (and implicitly by x^64) instead of by b:195//196// MI = (a_L * c_L) + (a_H * b_L)197// HI = (a_L * c_H) + (a_H * b_H)198//199// This would eliminate the LO part of the intermediate product, which would200// eliminate the need to fold LO into MI. This would save two instructions,201// including a vpclmulqdq. However, we currently don't use this optimization202// because it would require twice as many per-key precomputed values.203//204// Using Karatsuba multiplication instead of "schoolbook" multiplication205// similarly would save a vpclmulqdq but does not seem to be worth it.206.macro _ghash_mul_step i, a, b, dst, gfpoly, t0, t1, t2207.if \i == 0208vpclmulqdq $0x00, \a, \b, \t0 // LO = a_L * b_L209vpclmulqdq $0x01, \a, \b, \t1 // MI_0 = a_L * b_H210.elseif \i == 1211vpclmulqdq $0x10, \a, \b, \t2 // MI_1 = a_H * b_L212.elseif \i == 2213vpxord \t2, \t1, \t1 // MI = MI_0 + MI_1214.elseif \i == 3215vpclmulqdq $0x01, \t0, \gfpoly, \t2 // LO_L*(x^63 + x^62 + x^57)216.elseif \i == 4217vpshufd $0x4e, \t0, \t0 // Swap halves of LO218.elseif \i == 5219vpternlogd $0x96, \t2, \t0, \t1 // Fold LO into MI220.elseif \i == 6221vpclmulqdq $0x11, \a, \b, \dst // HI = a_H * b_H222.elseif \i == 7223vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)224.elseif \i == 8225vpshufd $0x4e, \t1, \t1 // Swap halves of MI226.elseif \i == 9227vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI228.endif229.endm230231// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store232// the reduced products in \dst. See _ghash_mul_step for full explanation.233.macro _ghash_mul a, b, dst, gfpoly, t0, t1, t2234.irp i, 0,1,2,3,4,5,6,7,8,9235_ghash_mul_step \i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2236.endr237.endm238239// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the240// *unreduced* products to \lo, \mi, and \hi.241.macro _ghash_mul_noreduce a, b, lo, mi, hi, t0, t1, t2, t3242vpclmulqdq $0x00, \a, \b, \t0 // a_L * b_L243vpclmulqdq $0x01, \a, \b, \t1 // a_L * b_H244vpclmulqdq $0x10, \a, \b, \t2 // a_H * b_L245vpclmulqdq $0x11, \a, \b, \t3 // a_H * b_H246vpxord \t0, \lo, \lo247vpternlogd $0x96, \t2, \t1, \mi248vpxord \t3, \hi, \hi249.endm250251// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit252// reduced products in \hi. See _ghash_mul_step for explanation of reduction.253.macro _ghash_reduce lo, mi, hi, gfpoly, t0254vpclmulqdq $0x01, \lo, \gfpoly, \t0255vpshufd $0x4e, \lo, \lo256vpternlogd $0x96, \t0, \lo, \mi257vpclmulqdq $0x01, \mi, \gfpoly, \t0258vpshufd $0x4e, \mi, \mi259vpternlogd $0x96, \t0, \mi, \hi260.endm261262// This is a specialized version of _ghash_mul that computes \a * \a, i.e. it263// squares \a. It skips computing MI = (a_L * a_H) + (a_H * a_L) = 0.264.macro _ghash_square a, dst, gfpoly, t0, t1265vpclmulqdq $0x00, \a, \a, \t0 // LO = a_L * a_L266vpclmulqdq $0x11, \a, \a, \dst // HI = a_H * a_H267vpclmulqdq $0x01, \t0, \gfpoly, \t1 // LO_L*(x^63 + x^62 + x^57)268vpshufd $0x4e, \t0, \t0 // Swap halves of LO269vpxord \t0, \t1, \t1 // Fold LO into MI270vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)271vpshufd $0x4e, \t1, \t1 // Swap halves of MI272vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI273.endm274275// void aes_gcm_precompute_vaes_avx512(struct aes_gcm_key_vaes_avx512 *key);276//277// Given the expanded AES key |key->base.aes_key|, derive the GHASH subkey and278// initialize |key->h_powers| and |key->padding|.279SYM_FUNC_START(aes_gcm_precompute_vaes_avx512)280281// Function arguments282.set KEY, %rdi283284// Additional local variables.285// %zmm[0-2] and %rax are used as temporaries.286.set POWERS_PTR, %rsi287.set RNDKEYLAST_PTR, %rdx288.set H_CUR, %zmm3289.set H_CUR_YMM, %ymm3290.set H_CUR_XMM, %xmm3291.set H_INC, %zmm4292.set H_INC_YMM, %ymm4293.set H_INC_XMM, %xmm4294.set GFPOLY, %zmm5295.set GFPOLY_YMM, %ymm5296.set GFPOLY_XMM, %xmm5297298// Get pointer to lowest set of key powers (located at end of array).299lea OFFSETOFEND_H_POWERS-64(KEY), POWERS_PTR300301// Encrypt an all-zeroes block to get the raw hash subkey.302movl OFFSETOF_AESKEYLEN(KEY), %eax303lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR304vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block305add $16, KEY3061:307vaesenc (KEY), %xmm0, %xmm0308add $16, KEY309cmp KEY, RNDKEYLAST_PTR310jne 1b311vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0312313// Reflect the bytes of the raw hash subkey.314vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM315316// Zeroize the padding blocks.317vpxor %xmm0, %xmm0, %xmm0318vmovdqu %ymm0, 64(POWERS_PTR)319vmovdqu %xmm0, 64+2*16(POWERS_PTR)320321// Finish preprocessing the first key power, H^1. Since this GHASH322// implementation operates directly on values with the backwards bit323// order specified by the GCM standard, it's necessary to preprocess the324// raw key as follows. First, reflect its bytes. Second, multiply it325// by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards326// interpretation of polynomial coefficients), which can also be327// interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121328// + 1 using the alternative, natural interpretation of polynomial329// coefficients. For details, see the comment above _ghash_mul_step.330//331// Either way, for the multiplication the concrete operation performed332// is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2333// << 120) | 1 if a 1 bit was carried out. However, there's no 128-bit334// wide shift instruction, so instead double each of the two 64-bit335// halves and incorporate the internal carry bit into the value XOR'd.336vpshufd $0xd3, H_CUR_XMM, %xmm0337vpsrad $31, %xmm0, %xmm0338vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM339// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit340vpternlogd $0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM341342// Load the gfpoly constant.343vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY344345// Square H^1 to get H^2.346//347// Note that as with H^1, all higher key powers also need an extra348// factor of x^-1 (or x using the natural interpretation). Nothing349// special needs to be done to make this happen, though: H^1 * H^1 would350// end up with two factors of x^-1, but the multiplication consumes one.351// So the product H^2 ends up with the desired one factor of x^-1.352_ghash_square H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1353354// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].355vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM356vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM357358// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].359_ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \360%ymm0, %ymm1, %ymm2361vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR362vshufi64x2 $0, H_INC, H_INC, H_INC363364// Store the lowest set of key powers.365vmovdqu8 H_CUR, (POWERS_PTR)366367// Compute and store the remaining key powers.368// Repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by369// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].370mov $3, %eax371.Lprecompute_next:372sub $64, POWERS_PTR373_ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, %zmm0, %zmm1, %zmm2374vmovdqu8 H_CUR, (POWERS_PTR)375dec %eax376jnz .Lprecompute_next377378vzeroupper // This is needed after using ymm or zmm registers.379RET380SYM_FUNC_END(aes_gcm_precompute_vaes_avx512)381382// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store383// the result in \dst_xmm. This implicitly zeroizes the other lanes of dst.384.macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm385vextracti32x4 $1, \src, \t0_xmm386vextracti32x4 $2, \src, \t1_xmm387vextracti32x4 $3, \src, \t2_xmm388vpxord \t0_xmm, \src_xmm, \dst_xmm389vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm390.endm391392// Do one step of the GHASH update of the data blocks given in the vector393// registers GHASHDATA[0-3]. \i specifies the step to do, 0 through 9. The394// division into steps allows users of this macro to optionally interleave the395// computation with other instructions. This macro uses the vector register396// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;397// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and398// GHASHTMP[0-2] as temporaries. This macro handles the byte-reflection of the399// data blocks. The parameter registers must be preserved across steps.400//401// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +402// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the403// operations are vectorized operations on 512-bit vectors of 128-bit blocks.404// The vectorized terms correspond to the following non-vectorized terms:405//406// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^16*(blk0 + GHASH_ACC_XMM),407// H^15*(blk1 + 0), H^14*(blk2 + 0), and H^13*(blk3 + 0)408// H_POW3*GHASHDATA1 => H^12*blk4, H^11*blk5, H^10*blk6, and H^9*blk7409// H_POW2*GHASHDATA2 => H^8*blk8, H^7*blk9, H^6*blk10, and H^5*blk11410// H_POW1*GHASHDATA3 => H^4*blk12, H^3*blk13, H^2*blk14, and H^1*blk15411//412// More concretely, this code does:413// - Do vectorized "schoolbook" multiplications to compute the intermediate414// 256-bit product of each block and its corresponding hash key power.415// - Sum (XOR) the intermediate 256-bit products across vectors.416// - Do a vectorized reduction of these 256-bit intermediate values to417// 128-bits each.418// - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.419//420// See _ghash_mul_step for the full explanation of the operations performed for421// each individual finite field multiplication and reduction.422.macro _ghash_step_4x i423.if \i == 0424vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0425vpxord GHASH_ACC, GHASHDATA0, GHASHDATA0426vpshufb BSWAP_MASK, GHASHDATA1, GHASHDATA1427vpshufb BSWAP_MASK, GHASHDATA2, GHASHDATA2428.elseif \i == 1429vpshufb BSWAP_MASK, GHASHDATA3, GHASHDATA3430vpclmulqdq $0x00, H_POW4, GHASHDATA0, GHASH_ACC // LO_0431vpclmulqdq $0x00, H_POW3, GHASHDATA1, GHASHTMP0 // LO_1432vpclmulqdq $0x00, H_POW2, GHASHDATA2, GHASHTMP1 // LO_2433.elseif \i == 2434vpxord GHASHTMP0, GHASH_ACC, GHASH_ACC // sum(LO_{1,0})435vpclmulqdq $0x00, H_POW1, GHASHDATA3, GHASHTMP2 // LO_3436vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC // LO = sum(LO_{3,2,1,0})437vpclmulqdq $0x01, H_POW4, GHASHDATA0, GHASHTMP0 // MI_0438.elseif \i == 3439vpclmulqdq $0x01, H_POW3, GHASHDATA1, GHASHTMP1 // MI_1440vpclmulqdq $0x01, H_POW2, GHASHDATA2, GHASHTMP2 // MI_2441vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{2,1,0})442vpclmulqdq $0x01, H_POW1, GHASHDATA3, GHASHTMP1 // MI_3443.elseif \i == 4444vpclmulqdq $0x10, H_POW4, GHASHDATA0, GHASHTMP2 // MI_4445vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{4,3,2,1,0})446vpclmulqdq $0x10, H_POW3, GHASHDATA1, GHASHTMP1 // MI_5447vpclmulqdq $0x10, H_POW2, GHASHDATA2, GHASHTMP2 // MI_6448.elseif \i == 5449vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{6,5,4,3,2,1,0})450vpclmulqdq $0x01, GHASH_ACC, GFPOLY, GHASHTMP2 // LO_L*(x^63 + x^62 + x^57)451vpclmulqdq $0x10, H_POW1, GHASHDATA3, GHASHTMP1 // MI_7452vpxord GHASHTMP1, GHASHTMP0, GHASHTMP0 // MI = sum(MI_{7,6,5,4,3,2,1,0})453.elseif \i == 6454vpshufd $0x4e, GHASH_ACC, GHASH_ACC // Swap halves of LO455vpclmulqdq $0x11, H_POW4, GHASHDATA0, GHASHDATA0 // HI_0456vpclmulqdq $0x11, H_POW3, GHASHDATA1, GHASHDATA1 // HI_1457vpclmulqdq $0x11, H_POW2, GHASHDATA2, GHASHDATA2 // HI_2458.elseif \i == 7459vpternlogd $0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0 // Fold LO into MI460vpclmulqdq $0x11, H_POW1, GHASHDATA3, GHASHDATA3 // HI_3461vpternlogd $0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})462vpclmulqdq $0x01, GHASHTMP0, GFPOLY, GHASHTMP1 // MI_L*(x^63 + x^62 + x^57)463.elseif \i == 8464vpxord GHASHDATA3, GHASHDATA0, GHASH_ACC // HI = sum(HI_{3,2,1,0})465vpshufd $0x4e, GHASHTMP0, GHASHTMP0 // Swap halves of MI466vpternlogd $0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC // Fold MI into HI467.elseif \i == 9468_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \469GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM470.endif471.endm472473// Update GHASH with four vectors of data blocks. See _ghash_step_4x for full474// explanation.475.macro _ghash_4x476.irp i, 0,1,2,3,4,5,6,7,8,9477_ghash_step_4x \i478.endr479.endm480481// void aes_gcm_aad_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,482// u8 ghash_acc[16],483// const u8 *aad, int aadlen);484//485// This function processes the AAD (Additional Authenticated Data) in GCM.486// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the487// data given by |aad| and |aadlen|. On the first call, |ghash_acc| must be all488// zeroes. |aadlen| must be a multiple of 16, except on the last call where it489// can be any length. The caller must do any buffering needed to ensure this.490//491// This handles large amounts of AAD efficiently, while also keeping overhead492// low for small amounts which is the common case. TLS and IPsec use less than493// one block of AAD, but (uncommonly) other use cases may use much more.494SYM_FUNC_START(aes_gcm_aad_update_vaes_avx512)495496// Function arguments497.set KEY, %rdi498.set GHASH_ACC_PTR, %rsi499.set AAD, %rdx500.set AADLEN, %ecx501.set AADLEN64, %rcx // Zero-extend AADLEN before using!502503// Additional local variables.504// %rax and %k1 are used as temporary registers.505.set GHASHDATA0, %zmm0506.set GHASHDATA0_XMM, %xmm0507.set GHASHDATA1, %zmm1508.set GHASHDATA1_XMM, %xmm1509.set GHASHDATA2, %zmm2510.set GHASHDATA2_XMM, %xmm2511.set GHASHDATA3, %zmm3512.set BSWAP_MASK, %zmm4513.set BSWAP_MASK_XMM, %xmm4514.set GHASH_ACC, %zmm5515.set GHASH_ACC_XMM, %xmm5516.set H_POW4, %zmm6517.set H_POW3, %zmm7518.set H_POW2, %zmm8519.set H_POW1, %zmm9520.set H_POW1_XMM, %xmm9521.set GFPOLY, %zmm10522.set GFPOLY_XMM, %xmm10523.set GHASHTMP0, %zmm11524.set GHASHTMP1, %zmm12525.set GHASHTMP2, %zmm13526527// Load the GHASH accumulator.528vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM529530// Check for the common case of AADLEN <= 16, as well as AADLEN == 0.531cmp $16, AADLEN532jg .Laad_more_than_16bytes533test AADLEN, AADLEN534jz .Laad_done535536// Fast path: update GHASH with 1 <= AADLEN <= 16 bytes of AAD.537vmovdqu .Lbswap_mask(%rip), BSWAP_MASK_XMM538vmovdqu .Lgfpoly(%rip), GFPOLY_XMM539mov $-1, %eax540bzhi AADLEN, %eax, %eax541kmovd %eax, %k1542vmovdqu8 (AAD), GHASHDATA0_XMM{%k1}{z}543vmovdqu OFFSETOFEND_H_POWERS-16(KEY), H_POW1_XMM544vpshufb BSWAP_MASK_XMM, GHASHDATA0_XMM, GHASHDATA0_XMM545vpxor GHASHDATA0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM546_ghash_mul H_POW1_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM, GFPOLY_XMM, \547GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM548jmp .Laad_done549550.Laad_more_than_16bytes:551vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK552vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY553554// If AADLEN >= 256, update GHASH with 256 bytes of AAD at a time.555sub $256, AADLEN556jl .Laad_loop_4x_done557vmovdqu8 OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4558vmovdqu8 OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3559vmovdqu8 OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2560vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1561.Laad_loop_4x:562vmovdqu8 0*64(AAD), GHASHDATA0563vmovdqu8 1*64(AAD), GHASHDATA1564vmovdqu8 2*64(AAD), GHASHDATA2565vmovdqu8 3*64(AAD), GHASHDATA3566_ghash_4x567add $256, AAD568sub $256, AADLEN569jge .Laad_loop_4x570.Laad_loop_4x_done:571572// If AADLEN >= 64, update GHASH with 64 bytes of AAD at a time.573add $192, AADLEN574jl .Laad_loop_1x_done575vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1576.Laad_loop_1x:577vmovdqu8 (AAD), GHASHDATA0578vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0579vpxord GHASHDATA0, GHASH_ACC, GHASH_ACC580_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \581GHASHDATA0, GHASHDATA1, GHASHDATA2582_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \583GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM584add $64, AAD585sub $64, AADLEN586jge .Laad_loop_1x587.Laad_loop_1x_done:588589// Update GHASH with the remaining 0 <= AADLEN < 64 bytes of AAD.590add $64, AADLEN591jz .Laad_done592mov $-1, %rax593bzhi AADLEN64, %rax, %rax594kmovq %rax, %k1595vmovdqu8 (AAD), GHASHDATA0{%k1}{z}596neg AADLEN64597and $~15, AADLEN64 // -round_up(AADLEN, 16)598vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1599vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0600vpxord GHASHDATA0, GHASH_ACC, GHASH_ACC601_ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \602GHASHDATA0, GHASHDATA1, GHASHDATA2603_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \604GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM605606.Laad_done:607// Store the updated GHASH accumulator back to memory.608vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)609610vzeroupper // This is needed after using ymm or zmm registers.611RET612SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)613614// Do one non-last round of AES encryption on the blocks in %zmm[0-3] using the615// round key that has been broadcast to all 128-bit lanes of \round_key.616.macro _vaesenc_4x round_key617vaesenc \round_key, %zmm0, %zmm0618vaesenc \round_key, %zmm1, %zmm1619vaesenc \round_key, %zmm2, %zmm2620vaesenc \round_key, %zmm3, %zmm3621.endm622623// Start the AES encryption of four vectors of counter blocks.624.macro _ctr_begin_4x625626// Increment LE_CTR four times to generate four vectors of little-endian627// counter blocks, swap each to big-endian, and store them in %zmm[0-3].628vpshufb BSWAP_MASK, LE_CTR, %zmm0629vpaddd LE_CTR_INC, LE_CTR, LE_CTR630vpshufb BSWAP_MASK, LE_CTR, %zmm1631vpaddd LE_CTR_INC, LE_CTR, LE_CTR632vpshufb BSWAP_MASK, LE_CTR, %zmm2633vpaddd LE_CTR_INC, LE_CTR, LE_CTR634vpshufb BSWAP_MASK, LE_CTR, %zmm3635vpaddd LE_CTR_INC, LE_CTR, LE_CTR636637// AES "round zero": XOR in the zero-th round key.638vpxord RNDKEY0, %zmm0, %zmm0639vpxord RNDKEY0, %zmm1, %zmm1640vpxord RNDKEY0, %zmm2, %zmm2641vpxord RNDKEY0, %zmm3, %zmm3642.endm643644// Do the last AES round for four vectors of counter blocks %zmm[0-3], XOR645// source data with the resulting keystream, and write the result to DST and646// GHASHDATA[0-3]. (Implementation differs slightly, but has the same effect.)647.macro _aesenclast_and_xor_4x648// XOR the source data with the last round key, saving the result in649// GHASHDATA[0-3]. This reduces latency by taking advantage of the650// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).651vpxord 0*64(SRC), RNDKEYLAST, GHASHDATA0652vpxord 1*64(SRC), RNDKEYLAST, GHASHDATA1653vpxord 2*64(SRC), RNDKEYLAST, GHASHDATA2654vpxord 3*64(SRC), RNDKEYLAST, GHASHDATA3655656// Do the last AES round. This handles the XOR with the source data657// too, as per the optimization described above.658vaesenclast GHASHDATA0, %zmm0, GHASHDATA0659vaesenclast GHASHDATA1, %zmm1, GHASHDATA1660vaesenclast GHASHDATA2, %zmm2, GHASHDATA2661vaesenclast GHASHDATA3, %zmm3, GHASHDATA3662663// Store the en/decrypted data to DST.664vmovdqu8 GHASHDATA0, 0*64(DST)665vmovdqu8 GHASHDATA1, 1*64(DST)666vmovdqu8 GHASHDATA2, 2*64(DST)667vmovdqu8 GHASHDATA3, 3*64(DST)668.endm669670// void aes_gcm_{enc,dec}_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,671// const u32 le_ctr[4], u8 ghash_acc[16],672// const u8 *src, u8 *dst, int datalen);673//674// This macro generates a GCM encryption or decryption update function with the675// above prototype (with \enc selecting which one). The function computes the676// next portion of the CTR keystream, XOR's it with |datalen| bytes from |src|,677// and writes the resulting encrypted or decrypted data to |dst|. It also678// updates the GHASH accumulator |ghash_acc| using the next |datalen| ciphertext679// bytes.680//681// |datalen| must be a multiple of 16, except on the last call where it can be682// any length. The caller must do any buffering needed to ensure this. Both683// in-place and out-of-place en/decryption are supported.684//685// |le_ctr| must give the current counter in little-endian format. This686// function loads the counter from |le_ctr| and increments the loaded counter as687// needed, but it does *not* store the updated counter back to |le_ctr|. The688// caller must update |le_ctr| if any more data segments follow. Internally,689// only the low 32-bit word of the counter is incremented, following the GCM690// standard.691.macro _aes_gcm_update enc692693// Function arguments694.set KEY, %rdi695.set LE_CTR_PTR, %rsi696.set GHASH_ACC_PTR, %rdx697.set SRC, %rcx698.set DST, %r8699.set DATALEN, %r9d700.set DATALEN64, %r9 // Zero-extend DATALEN before using!701702// Additional local variables703704// %rax and %k1 are used as temporary registers. LE_CTR_PTR is also705// available as a temporary register after the counter is loaded.706707// AES key length in bytes708.set AESKEYLEN, %r10d709.set AESKEYLEN64, %r10710711// Pointer to the last AES round key for the chosen AES variant712.set RNDKEYLAST_PTR, %r11713714// In the main loop, %zmm[0-3] are used as AES input and output.715// Elsewhere they are used as temporary registers.716717// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.718.set GHASHDATA0, %zmm4719.set GHASHDATA0_XMM, %xmm4720.set GHASHDATA1, %zmm5721.set GHASHDATA1_XMM, %xmm5722.set GHASHDATA2, %zmm6723.set GHASHDATA2_XMM, %xmm6724.set GHASHDATA3, %zmm7725726// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values727// using vpshufb, copied to all 128-bit lanes.728.set BSWAP_MASK, %zmm8729730// RNDKEY temporarily holds the next AES round key.731.set RNDKEY, %zmm9732733// GHASH_ACC is the accumulator variable for GHASH. When fully reduced,734// only the lowest 128-bit lane can be nonzero. When not fully reduced,735// more than one lane may be used, and they need to be XOR'd together.736.set GHASH_ACC, %zmm10737.set GHASH_ACC_XMM, %xmm10738739// LE_CTR_INC is the vector of 32-bit words that need to be added to a740// vector of little-endian counter blocks to advance it forwards.741.set LE_CTR_INC, %zmm11742743// LE_CTR contains the next set of little-endian counter blocks.744.set LE_CTR, %zmm12745746// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,747// copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,748// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.749.set RNDKEY0, %zmm13750.set RNDKEYLAST, %zmm14751.set RNDKEY_M9, %zmm15752.set RNDKEY_M8, %zmm16753.set RNDKEY_M7, %zmm17754.set RNDKEY_M6, %zmm18755.set RNDKEY_M5, %zmm19756.set RNDKEY_M4, %zmm20757.set RNDKEY_M3, %zmm21758.set RNDKEY_M2, %zmm22759.set RNDKEY_M1, %zmm23760761// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These762// cannot coincide with anything used for AES encryption, since for763// performance reasons GHASH and AES encryption are interleaved.764.set GHASHTMP0, %zmm24765.set GHASHTMP1, %zmm25766.set GHASHTMP2, %zmm26767768// H_POW[4-1] contain the powers of the hash key H^16...H^1. The769// descending numbering reflects the order of the key powers.770.set H_POW4, %zmm27771.set H_POW3, %zmm28772.set H_POW2, %zmm29773.set H_POW1, %zmm30774775// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.776.set GFPOLY, %zmm31777778// Load some constants.779vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK780vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY781782// Load the GHASH accumulator and the starting counter.783vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM784vbroadcasti32x4 (LE_CTR_PTR), LE_CTR785786// Load the AES key length in bytes.787movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN788789// Make RNDKEYLAST_PTR point to the last AES round key. This is the790// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256791// respectively. Then load the zero-th and last round keys.792lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR793vbroadcasti32x4 (KEY), RNDKEY0794vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST795796// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.797vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR798799// Load 4 into all 128-bit lanes of LE_CTR_INC.800vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC801802// If there are at least 256 bytes of data, then continue into the loop803// that processes 256 bytes of data at a time. Otherwise skip it.804//805// Pre-subtracting 256 from DATALEN saves an instruction from the main806// loop and also ensures that at least one write always occurs to807// DATALEN, zero-extending it and allowing DATALEN64 to be used later.808sub $256, DATALEN809jl .Lcrypt_loop_4x_done\@810811// Load powers of the hash key.812vmovdqu8 OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4813vmovdqu8 OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3814vmovdqu8 OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2815vmovdqu8 OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1816817// Main loop: en/decrypt and hash 4 vectors at a time.818//819// When possible, interleave the AES encryption of the counter blocks820// with the GHASH update of the ciphertext blocks. This improves821// performance on many CPUs because the execution ports used by the VAES822// instructions often differ from those used by vpclmulqdq and other823// instructions used in GHASH. For example, many Intel CPUs dispatch824// vaesenc to ports 0 and 1 and vpclmulqdq to port 5.825//826// The interleaving is easiest to do during decryption, since during827// decryption the ciphertext blocks are immediately available. For828// encryption, instead encrypt the first set of blocks, then hash those829// blocks while encrypting the next set of blocks, repeat that as830// needed, and finally hash the last set of blocks.831832.if \enc833// Encrypt the first 4 vectors of plaintext blocks. Leave the resulting834// ciphertext in GHASHDATA[0-3] for GHASH.835_ctr_begin_4x836lea 16(KEY), %rax8371:838vbroadcasti32x4 (%rax), RNDKEY839_vaesenc_4x RNDKEY840add $16, %rax841cmp %rax, RNDKEYLAST_PTR842jne 1b843_aesenclast_and_xor_4x844add $256, SRC845add $256, DST846sub $256, DATALEN847jl .Lghash_last_ciphertext_4x\@848.endif849850// Cache as many additional AES round keys as possible.851.irp i, 9,8,7,6,5,4,3,2,1852vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i853.endr854855.Lcrypt_loop_4x\@:856857// If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If858// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.859.if !\enc860vmovdqu8 0*64(SRC), GHASHDATA0861vmovdqu8 1*64(SRC), GHASHDATA1862vmovdqu8 2*64(SRC), GHASHDATA2863vmovdqu8 3*64(SRC), GHASHDATA3864.endif865866// Start the AES encryption of the counter blocks.867_ctr_begin_4x868cmp $24, AESKEYLEN869jl 128f // AES-128?870je 192f // AES-192?871// AES-256872vbroadcasti32x4 -13*16(RNDKEYLAST_PTR), RNDKEY873_vaesenc_4x RNDKEY874vbroadcasti32x4 -12*16(RNDKEYLAST_PTR), RNDKEY875_vaesenc_4x RNDKEY876192:877vbroadcasti32x4 -11*16(RNDKEYLAST_PTR), RNDKEY878_vaesenc_4x RNDKEY879vbroadcasti32x4 -10*16(RNDKEYLAST_PTR), RNDKEY880_vaesenc_4x RNDKEY881128:882883// Finish the AES encryption of the counter blocks in %zmm[0-3],884// interleaved with the GHASH update of the ciphertext blocks in885// GHASHDATA[0-3].886.irp i, 9,8,7,6,5,4,3,2,1887_ghash_step_4x (9 - \i)888_vaesenc_4x RNDKEY_M\i889.endr890_ghash_step_4x 9891_aesenclast_and_xor_4x892add $256, SRC893add $256, DST894sub $256, DATALEN895jge .Lcrypt_loop_4x\@896897.if \enc898.Lghash_last_ciphertext_4x\@:899// Update GHASH with the last set of ciphertext blocks.900_ghash_4x901.endif902903.Lcrypt_loop_4x_done\@:904905// Undo the extra subtraction by 256 and check whether data remains.906add $256, DATALEN907jz .Ldone\@908909// The data length isn't a multiple of 256 bytes. Process the remaining910// data of length 1 <= DATALEN < 256, up to one 64-byte vector at a911// time. Going one vector at a time may seem inefficient compared to912// having separate code paths for each possible number of vectors913// remaining. However, using a loop keeps the code size down, and it914// performs surprising well; modern CPUs will start executing the next915// iteration before the previous one finishes and also predict the916// number of loop iterations. For a similar reason, we roll up the AES917// rounds.918//919// On the last iteration, the remaining length may be less than 64920// bytes. Handle this using masking.921//922// Since there are enough key powers available for all remaining data,923// there is no need to do a GHASH reduction after each iteration.924// Instead, multiply each remaining block by its own key power, and only925// do a GHASH reduction at the very end.926927// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N928// is the number of blocks that remain.929.set POWERS_PTR, LE_CTR_PTR // LE_CTR_PTR is free to be reused.930mov DATALEN, %eax931neg %rax932and $~15, %rax // -round_up(DATALEN, 16)933lea OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR934935// Start collecting the unreduced GHASH intermediate value LO, MI, HI.936.set LO, GHASHDATA0937.set LO_XMM, GHASHDATA0_XMM938.set MI, GHASHDATA1939.set MI_XMM, GHASHDATA1_XMM940.set HI, GHASHDATA2941.set HI_XMM, GHASHDATA2_XMM942vpxor LO_XMM, LO_XMM, LO_XMM943vpxor MI_XMM, MI_XMM, MI_XMM944vpxor HI_XMM, HI_XMM, HI_XMM945946.Lcrypt_loop_1x\@:947948// Select the appropriate mask for this iteration: all 1's if949// DATALEN >= 64, otherwise DATALEN 1's. Do this branchlessly using the950// bzhi instruction from BMI2. (This relies on DATALEN <= 255.)951mov $-1, %rax952bzhi DATALEN64, %rax, %rax953kmovq %rax, %k1954955// Encrypt a vector of counter blocks. This does not need to be masked.956vpshufb BSWAP_MASK, LE_CTR, %zmm0957vpaddd LE_CTR_INC, LE_CTR, LE_CTR958vpxord RNDKEY0, %zmm0, %zmm0959lea 16(KEY), %rax9601:961vbroadcasti32x4 (%rax), RNDKEY962vaesenc RNDKEY, %zmm0, %zmm0963add $16, %rax964cmp %rax, RNDKEYLAST_PTR965jne 1b966vaesenclast RNDKEYLAST, %zmm0, %zmm0967968// XOR the data with the appropriate number of keystream bytes.969vmovdqu8 (SRC), %zmm1{%k1}{z}970vpxord %zmm1, %zmm0, %zmm0971vmovdqu8 %zmm0, (DST){%k1}972973// Update GHASH with the ciphertext block(s), without reducing.974//975// In the case of DATALEN < 64, the ciphertext is zero-padded to 64976// bytes. (If decrypting, it's done by the above masked load. If977// encrypting, it's done by the below masked register-to-register move.)978// Note that if DATALEN <= 48, there will be additional padding beyond979// the padding of the last block specified by GHASH itself; i.e., there980// may be whole block(s) that get processed by the GHASH multiplication981// and reduction instructions but should not actually be included in the982// GHASH. However, any such blocks are all-zeroes, and the values that983// they're multiplied with are also all-zeroes. Therefore they just add984// 0 * 0 = 0 to the final GHASH result, which makes no difference.985vmovdqu8 (POWERS_PTR), H_POW1986.if \enc987vmovdqu8 %zmm0, %zmm1{%k1}{z}988.endif989vpshufb BSWAP_MASK, %zmm1, %zmm0990vpxord GHASH_ACC, %zmm0, %zmm0991_ghash_mul_noreduce H_POW1, %zmm0, LO, MI, HI, \992GHASHDATA3, %zmm1, %zmm2, %zmm3993vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM994995add $64, POWERS_PTR996add $64, SRC997add $64, DST998sub $64, DATALEN999jg .Lcrypt_loop_1x\@10001001// Finally, do the GHASH reduction.1002_ghash_reduce LO, MI, HI, GFPOLY, %zmm01003_horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm210041005.Ldone\@:1006// Store the updated GHASH accumulator back to memory.1007vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)10081009vzeroupper // This is needed after using ymm or zmm registers.1010RET1011.endm10121013// void aes_gcm_enc_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,1014// const u32 le_ctr[4], u8 ghash_acc[16],1015// u64 total_aadlen, u64 total_datalen);1016// bool aes_gcm_dec_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,1017// const u32 le_ctr[4],1018// const u8 ghash_acc[16],1019// u64 total_aadlen, u64 total_datalen,1020// const u8 tag[16], int taglen);1021//1022// This macro generates one of the above two functions (with \enc selecting1023// which one). Both functions finish computing the GCM authentication tag by1024// updating GHASH with the lengths block and encrypting the GHASH accumulator.1025// |total_aadlen| and |total_datalen| must be the total length of the additional1026// authenticated data and the en/decrypted data in bytes, respectively.1027//1028// The encryption function then stores the full-length (16-byte) computed1029// authentication tag to |ghash_acc|. The decryption function instead loads the1030// expected authentication tag (the one that was transmitted) from the 16-byte1031// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the1032// computed tag in constant time, and returns true if and only if they match.1033.macro _aes_gcm_final enc10341035// Function arguments1036.set KEY, %rdi1037.set LE_CTR_PTR, %rsi1038.set GHASH_ACC_PTR, %rdx1039.set TOTAL_AADLEN, %rcx1040.set TOTAL_DATALEN, %r81041.set TAG, %r91042.set TAGLEN, %r10d // Originally at 8(%rsp)10431044// Additional local variables.1045// %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.1046.set AESKEYLEN, %r11d1047.set AESKEYLEN64, %r111048.set GFPOLY, %xmm41049.set BSWAP_MASK, %xmm51050.set LE_CTR, %xmm61051.set GHASH_ACC, %xmm71052.set H_POW1, %xmm810531054// Load some constants.1055vmovdqa .Lgfpoly(%rip), GFPOLY1056vmovdqa .Lbswap_mask(%rip), BSWAP_MASK10571058// Load the AES key length in bytes.1059movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN10601061// Set up a counter block with 1 in the low 32-bit word. This is the1062// counter that produces the ciphertext needed to encrypt the auth tag.1063// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.1064vpblendd $0xe, (LE_CTR_PTR), GFPOLY, LE_CTR10651066// Build the lengths block and XOR it with the GHASH accumulator.1067// Although the lengths block is defined as the AAD length followed by1068// the en/decrypted data length, both in big-endian byte order, a byte1069// reflection of the full block is needed because of the way we compute1070// GHASH (see _ghash_mul_step). By using little-endian values in the1071// opposite order, we avoid having to reflect any bytes here.1072vmovq TOTAL_DATALEN, %xmm01073vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm01074vpsllq $3, %xmm0, %xmm0 // Bytes to bits1075vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC10761077// Load the first hash key power (H^1), which is stored last.1078vmovdqu8 OFFSETOFEND_H_POWERS-16(KEY), H_POW110791080.if !\enc1081// Prepare a mask of TAGLEN one bits.1082movl 8(%rsp), TAGLEN1083mov $-1, %eax1084bzhi TAGLEN, %eax, %eax1085kmovd %eax, %k11086.endif10871088// Make %rax point to the last AES round key for the chosen AES variant.1089lea 6*16(KEY,AESKEYLEN64,4), %rax10901091// Start the AES encryption of the counter block by swapping the counter1092// block to big-endian and XOR-ing it with the zero-th AES round key.1093vpshufb BSWAP_MASK, LE_CTR, %xmm01094vpxor (KEY), %xmm0, %xmm010951096// Complete the AES encryption and multiply GHASH_ACC by H^1.1097// Interleave the AES and GHASH instructions to improve performance.1098cmp $24, AESKEYLEN1099jl 128f // AES-128?1100je 192f // AES-192?1101// AES-2561102vaesenc -13*16(%rax), %xmm0, %xmm01103vaesenc -12*16(%rax), %xmm0, %xmm01104192:1105vaesenc -11*16(%rax), %xmm0, %xmm01106vaesenc -10*16(%rax), %xmm0, %xmm01107128:1108.irp i, 0,1,2,3,4,5,6,7,81109_ghash_mul_step \i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \1110%xmm1, %xmm2, %xmm31111vaesenc (\i-9)*16(%rax), %xmm0, %xmm01112.endr1113_ghash_mul_step 9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \1114%xmm1, %xmm2, %xmm311151116// Undo the byte reflection of the GHASH accumulator.1117vpshufb BSWAP_MASK, GHASH_ACC, GHASH_ACC11181119// Do the last AES round and XOR the resulting keystream block with the1120// GHASH accumulator to produce the full computed authentication tag.1121//1122// Reduce latency by taking advantage of the property vaesenclast(key,1123// a) ^ b == vaesenclast(key ^ b, a). I.e., XOR GHASH_ACC into the last1124// round key, instead of XOR'ing the final AES output with GHASH_ACC.1125//1126// enc_final then returns the computed auth tag, while dec_final1127// compares it with the transmitted one and returns a bool. To compare1128// the tags, dec_final XORs them together and uses vptest to check1129// whether the result is all-zeroes. This should be constant-time.1130// dec_final applies the vaesenclast optimization to this additional1131// value XOR'd too, using vpternlogd to XOR the last round key, GHASH1132// accumulator, and transmitted auth tag together in one instruction.1133.if \enc1134vpxor (%rax), GHASH_ACC, %xmm11135vaesenclast %xmm1, %xmm0, GHASH_ACC1136vmovdqu GHASH_ACC, (GHASH_ACC_PTR)1137.else1138vmovdqu (TAG), %xmm11139vpternlogd $0x96, (%rax), GHASH_ACC, %xmm11140vaesenclast %xmm1, %xmm0, %xmm01141xor %eax, %eax1142vmovdqu8 %xmm0, %xmm0{%k1}{z} // Truncate to TAGLEN bytes1143vptest %xmm0, %xmm01144sete %al1145.endif1146// No need for vzeroupper here, since only used xmm registers were used.1147RET1148.endm11491150SYM_FUNC_START(aes_gcm_enc_update_vaes_avx512)1151_aes_gcm_update 11152SYM_FUNC_END(aes_gcm_enc_update_vaes_avx512)1153SYM_FUNC_START(aes_gcm_dec_update_vaes_avx512)1154_aes_gcm_update 01155SYM_FUNC_END(aes_gcm_dec_update_vaes_avx512)11561157SYM_FUNC_START(aes_gcm_enc_final_vaes_avx512)1158_aes_gcm_final 11159SYM_FUNC_END(aes_gcm_enc_final_vaes_avx512)1160SYM_FUNC_START(aes_gcm_dec_final_vaes_avx512)1161_aes_gcm_final 01162SYM_FUNC_END(aes_gcm_dec_final_vaes_avx512)116311641165