Path: blob/master/arch/x86/crypto/sm4-aesni-avx-asm_64.S
121811 views
/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* SM4 Cipher Algorithm, AES-NI/AVX optimized.3* as specified in4* https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html5*6* Copyright (C) 2018 Markku-Juhani O. Saarinen <[email protected]>7* Copyright (C) 2020 Jussi Kivilinna <[email protected]>8* Copyright (c) 2021 Tianjia Zhang <[email protected]>9*/1011/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:12* https://github.com/mjosaarinen/sm4ni13*/1415#include <linux/linkage.h>16#include <linux/cfi_types.h>17#include <asm/frame.h>1819#define rRIP (%rip)2021#define RX0 %xmm022#define RX1 %xmm123#define MASK_4BIT %xmm224#define RTMP0 %xmm325#define RTMP1 %xmm426#define RTMP2 %xmm527#define RTMP3 %xmm628#define RTMP4 %xmm72930#define RA0 %xmm831#define RA1 %xmm932#define RA2 %xmm1033#define RA3 %xmm113435#define RB0 %xmm1236#define RB1 %xmm1337#define RB2 %xmm1438#define RB3 %xmm153940#define RNOT %xmm041#define RBSWAP %xmm1424344/* Transpose four 32-bit words between 128-bit vectors. */45#define transpose_4x4(x0, x1, x2, x3, t1, t2) \46vpunpckhdq x1, x0, t2; \47vpunpckldq x1, x0, x0; \48\49vpunpckldq x3, x2, t1; \50vpunpckhdq x3, x2, x2; \51\52vpunpckhqdq t1, x0, x1; \53vpunpcklqdq t1, x0, x0; \54\55vpunpckhqdq x2, t2, x3; \56vpunpcklqdq x2, t2, x2;5758/* pre-SubByte transform. */59#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \60vpand x, mask4bit, tmp0; \61vpandn x, mask4bit, x; \62vpsrld $4, x, x; \63\64vpshufb tmp0, lo_t, tmp0; \65vpshufb x, hi_t, x; \66vpxor tmp0, x, x;6768/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by69* 'vaeslastenc' instruction.70*/71#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \72vpandn mask4bit, x, tmp0; \73vpsrld $4, x, x; \74vpand x, mask4bit, x; \75\76vpshufb tmp0, lo_t, tmp0; \77vpshufb x, hi_t, x; \78vpxor tmp0, x, x;798081.section .rodata.cst16, "aM", @progbits, 1682.align 168384/*85* Following four affine transform look-up tables are from work by86* Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni87*88* These allow exposing SM4 S-Box from AES SubByte.89*/9091/* pre-SubByte affine transform, from SM4 field to AES field. */92.Lpre_tf_lo_s:93.quad 0x9197E2E474720701, 0xC7C1B4B22224515794.Lpre_tf_hi_s:95.quad 0xE240AB09EB49A200, 0xF052B91BF95BB0129697/* post-SubByte affine transform, from AES field to SM4 field. */98.Lpost_tf_lo_s:99.quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82100.Lpost_tf_hi_s:101.quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF102103/* For isolating SubBytes from AESENCLAST, inverse shift row */104.Linv_shift_row:105.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b106.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03107108/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */109.Linv_shift_row_rol_8:110.byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e111.byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06112113/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */114.Linv_shift_row_rol_16:115.byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01116.byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09117118/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */119.Linv_shift_row_rol_24:120.byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04121.byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c122123/* For CTR-mode IV byteswap */124.Lbswap128_mask:125.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0126127/* For input word byte-swap */128.Lbswap32_mask:129.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12130131.align 4132/* 4-bit mask */133.L0f0f0f0f:134.long 0x0f0f0f0f135136/* 12 bytes, only for padding */137.Lpadding_deadbeef:138.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef139140141.text142143/*144* void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,145* const u8 *src, int nblocks)146*/147SYM_FUNC_START(sm4_aesni_avx_crypt4)148/* input:149* %rdi: round key array, CTX150* %rsi: dst (1..4 blocks)151* %rdx: src (1..4 blocks)152* %rcx: num blocks (1..4)153*/154FRAME_BEGIN155156vmovdqu 0*16(%rdx), RA0;157vmovdqa RA0, RA1;158vmovdqa RA0, RA2;159vmovdqa RA0, RA3;160cmpq $2, %rcx;161jb .Lblk4_load_input_done;162vmovdqu 1*16(%rdx), RA1;163je .Lblk4_load_input_done;164vmovdqu 2*16(%rdx), RA2;165cmpq $3, %rcx;166je .Lblk4_load_input_done;167vmovdqu 3*16(%rdx), RA3;168169.Lblk4_load_input_done:170171vmovdqa .Lbswap32_mask rRIP, RTMP2;172vpshufb RTMP2, RA0, RA0;173vpshufb RTMP2, RA1, RA1;174vpshufb RTMP2, RA2, RA2;175vpshufb RTMP2, RA3, RA3;176177vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;178vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;179vmovdqa .Lpre_tf_hi_s rRIP, RB0;180vmovdqa .Lpost_tf_lo_s rRIP, RB1;181vmovdqa .Lpost_tf_hi_s rRIP, RB2;182vmovdqa .Linv_shift_row rRIP, RB3;183vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2;184vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3;185transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);186187#define ROUND(round, s0, s1, s2, s3) \188vbroadcastss (4*(round))(%rdi), RX0; \189vpxor s1, RX0, RX0; \190vpxor s2, RX0, RX0; \191vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \192\193/* sbox, non-linear part */ \194transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \195vaesenclast MASK_4BIT, RX0, RX0; \196transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \197\198/* linear part */ \199vpshufb RB3, RX0, RTMP0; \200vpxor RTMP0, s0, s0; /* s0 ^ x */ \201vpshufb RTMP2, RX0, RTMP1; \202vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \203vpshufb RTMP3, RX0, RTMP1; \204vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \205vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \206vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \207vpslld $2, RTMP0, RTMP1; \208vpsrld $30, RTMP0, RTMP0; \209vpxor RTMP0, s0, s0; \210/* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \211vpxor RTMP1, s0, s0;212213leaq (32*4)(%rdi), %rax;214.align 16215.Lroundloop_blk4:216ROUND(0, RA0, RA1, RA2, RA3);217ROUND(1, RA1, RA2, RA3, RA0);218ROUND(2, RA2, RA3, RA0, RA1);219ROUND(3, RA3, RA0, RA1, RA2);220leaq (4*4)(%rdi), %rdi;221cmpq %rax, %rdi;222jne .Lroundloop_blk4;223224#undef ROUND225226vmovdqa .Lbswap128_mask rRIP, RTMP2;227228transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);229vpshufb RTMP2, RA0, RA0;230vpshufb RTMP2, RA1, RA1;231vpshufb RTMP2, RA2, RA2;232vpshufb RTMP2, RA3, RA3;233234vmovdqu RA0, 0*16(%rsi);235cmpq $2, %rcx;236jb .Lblk4_store_output_done;237vmovdqu RA1, 1*16(%rsi);238je .Lblk4_store_output_done;239vmovdqu RA2, 2*16(%rsi);240cmpq $3, %rcx;241je .Lblk4_store_output_done;242vmovdqu RA3, 3*16(%rsi);243244.Lblk4_store_output_done:245vzeroall;246FRAME_END247RET;248SYM_FUNC_END(sm4_aesni_avx_crypt4)249250SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)251/* input:252* %rdi: round key array, CTX253* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel254* plaintext blocks255* output:256* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel257* ciphertext blocks258*/259FRAME_BEGIN260261vmovdqa .Lbswap32_mask rRIP, RTMP2;262vpshufb RTMP2, RA0, RA0;263vpshufb RTMP2, RA1, RA1;264vpshufb RTMP2, RA2, RA2;265vpshufb RTMP2, RA3, RA3;266vpshufb RTMP2, RB0, RB0;267vpshufb RTMP2, RB1, RB1;268vpshufb RTMP2, RB2, RB2;269vpshufb RTMP2, RB3, RB3;270271vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;272transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);273transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);274275#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \276vbroadcastss (4*(round))(%rdi), RX0; \277vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \278vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \279vmovdqa RX0, RX1; \280vpxor s1, RX0, RX0; \281vpxor s2, RX0, RX0; \282vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \283vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \284vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \285vpxor r1, RX1, RX1; \286vpxor r2, RX1, RX1; \287vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \288\289/* sbox, non-linear part */ \290transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \291transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \292vmovdqa .Linv_shift_row rRIP, RTMP4; \293vaesenclast MASK_4BIT, RX0, RX0; \294vaesenclast MASK_4BIT, RX1, RX1; \295transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \296transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \297\298/* linear part */ \299vpshufb RTMP4, RX0, RTMP0; \300vpxor RTMP0, s0, s0; /* s0 ^ x */ \301vpshufb RTMP4, RX1, RTMP2; \302vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \303vpxor RTMP2, r0, r0; /* r0 ^ x */ \304vpshufb RTMP4, RX0, RTMP1; \305vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \306vpshufb RTMP4, RX1, RTMP3; \307vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \308vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \309vpshufb RTMP4, RX0, RTMP1; \310vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \311vpshufb RTMP4, RX1, RTMP3; \312vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \313vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \314vpshufb RTMP4, RX0, RTMP1; \315vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \316/* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \317vpslld $2, RTMP0, RTMP1; \318vpsrld $30, RTMP0, RTMP0; \319vpxor RTMP0, s0, s0; \320vpxor RTMP1, s0, s0; \321vpshufb RTMP4, RX1, RTMP3; \322vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \323/* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \324vpslld $2, RTMP2, RTMP3; \325vpsrld $30, RTMP2, RTMP2; \326vpxor RTMP2, r0, r0; \327vpxor RTMP3, r0, r0;328329leaq (32*4)(%rdi), %rax;330.align 16331.Lroundloop_blk8:332ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);333ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);334ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);335ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);336leaq (4*4)(%rdi), %rdi;337cmpq %rax, %rdi;338jne .Lroundloop_blk8;339340#undef ROUND341342vmovdqa .Lbswap128_mask rRIP, RTMP2;343344transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);345transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);346vpshufb RTMP2, RA0, RA0;347vpshufb RTMP2, RA1, RA1;348vpshufb RTMP2, RA2, RA2;349vpshufb RTMP2, RA3, RA3;350vpshufb RTMP2, RB0, RB0;351vpshufb RTMP2, RB1, RB1;352vpshufb RTMP2, RB2, RB2;353vpshufb RTMP2, RB3, RB3;354355FRAME_END356RET;357SYM_FUNC_END(__sm4_crypt_blk8)358359/*360* void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,361* const u8 *src, int nblocks)362*/363SYM_FUNC_START(sm4_aesni_avx_crypt8)364/* input:365* %rdi: round key array, CTX366* %rsi: dst (1..8 blocks)367* %rdx: src (1..8 blocks)368* %rcx: num blocks (1..8)369*/370cmpq $5, %rcx;371jb sm4_aesni_avx_crypt4;372373FRAME_BEGIN374375vmovdqu (0 * 16)(%rdx), RA0;376vmovdqu (1 * 16)(%rdx), RA1;377vmovdqu (2 * 16)(%rdx), RA2;378vmovdqu (3 * 16)(%rdx), RA3;379vmovdqu (4 * 16)(%rdx), RB0;380vmovdqa RB0, RB1;381vmovdqa RB0, RB2;382vmovdqa RB0, RB3;383je .Lblk8_load_input_done;384vmovdqu (5 * 16)(%rdx), RB1;385cmpq $7, %rcx;386jb .Lblk8_load_input_done;387vmovdqu (6 * 16)(%rdx), RB2;388je .Lblk8_load_input_done;389vmovdqu (7 * 16)(%rdx), RB3;390391.Lblk8_load_input_done:392call __sm4_crypt_blk8;393394cmpq $6, %rcx;395vmovdqu RA0, (0 * 16)(%rsi);396vmovdqu RA1, (1 * 16)(%rsi);397vmovdqu RA2, (2 * 16)(%rsi);398vmovdqu RA3, (3 * 16)(%rsi);399vmovdqu RB0, (4 * 16)(%rsi);400jb .Lblk8_store_output_done;401vmovdqu RB1, (5 * 16)(%rsi);402je .Lblk8_store_output_done;403vmovdqu RB2, (6 * 16)(%rsi);404cmpq $7, %rcx;405je .Lblk8_store_output_done;406vmovdqu RB3, (7 * 16)(%rsi);407408.Lblk8_store_output_done:409vzeroall;410FRAME_END411RET;412SYM_FUNC_END(sm4_aesni_avx_crypt8)413414/*415* void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,416* const u8 *src, u8 *iv)417*/418SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)419/* input:420* %rdi: round key array, CTX421* %rsi: dst (8 blocks)422* %rdx: src (8 blocks)423* %rcx: iv (big endian, 128bit)424*/425FRAME_BEGIN426427/* load IV and byteswap */428vmovdqu (%rcx), RA0;429430vmovdqa .Lbswap128_mask rRIP, RBSWAP;431vpshufb RBSWAP, RA0, RTMP0; /* be => le */432433vpcmpeqd RNOT, RNOT, RNOT;434vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */435436#define inc_le128(x, minus_one, tmp) \437vpcmpeqq minus_one, x, tmp; \438vpsubq minus_one, x, x; \439vpslldq $8, tmp, tmp; \440vpsubq tmp, x, x;441442/* construct IVs */443inc_le128(RTMP0, RNOT, RTMP2); /* +1 */444vpshufb RBSWAP, RTMP0, RA1;445inc_le128(RTMP0, RNOT, RTMP2); /* +2 */446vpshufb RBSWAP, RTMP0, RA2;447inc_le128(RTMP0, RNOT, RTMP2); /* +3 */448vpshufb RBSWAP, RTMP0, RA3;449inc_le128(RTMP0, RNOT, RTMP2); /* +4 */450vpshufb RBSWAP, RTMP0, RB0;451inc_le128(RTMP0, RNOT, RTMP2); /* +5 */452vpshufb RBSWAP, RTMP0, RB1;453inc_le128(RTMP0, RNOT, RTMP2); /* +6 */454vpshufb RBSWAP, RTMP0, RB2;455inc_le128(RTMP0, RNOT, RTMP2); /* +7 */456vpshufb RBSWAP, RTMP0, RB3;457inc_le128(RTMP0, RNOT, RTMP2); /* +8 */458vpshufb RBSWAP, RTMP0, RTMP1;459460/* store new IV */461vmovdqu RTMP1, (%rcx);462463call __sm4_crypt_blk8;464465vpxor (0 * 16)(%rdx), RA0, RA0;466vpxor (1 * 16)(%rdx), RA1, RA1;467vpxor (2 * 16)(%rdx), RA2, RA2;468vpxor (3 * 16)(%rdx), RA3, RA3;469vpxor (4 * 16)(%rdx), RB0, RB0;470vpxor (5 * 16)(%rdx), RB1, RB1;471vpxor (6 * 16)(%rdx), RB2, RB2;472vpxor (7 * 16)(%rdx), RB3, RB3;473474vmovdqu RA0, (0 * 16)(%rsi);475vmovdqu RA1, (1 * 16)(%rsi);476vmovdqu RA2, (2 * 16)(%rsi);477vmovdqu RA3, (3 * 16)(%rsi);478vmovdqu RB0, (4 * 16)(%rsi);479vmovdqu RB1, (5 * 16)(%rsi);480vmovdqu RB2, (6 * 16)(%rsi);481vmovdqu RB3, (7 * 16)(%rsi);482483vzeroall;484FRAME_END485RET;486SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)487488/*489* void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,490* const u8 *src, u8 *iv)491*/492SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)493/* input:494* %rdi: round key array, CTX495* %rsi: dst (8 blocks)496* %rdx: src (8 blocks)497* %rcx: iv498*/499FRAME_BEGIN500501vmovdqu (0 * 16)(%rdx), RA0;502vmovdqu (1 * 16)(%rdx), RA1;503vmovdqu (2 * 16)(%rdx), RA2;504vmovdqu (3 * 16)(%rdx), RA3;505vmovdqu (4 * 16)(%rdx), RB0;506vmovdqu (5 * 16)(%rdx), RB1;507vmovdqu (6 * 16)(%rdx), RB2;508vmovdqu (7 * 16)(%rdx), RB3;509510call __sm4_crypt_blk8;511512vmovdqu (7 * 16)(%rdx), RNOT;513vpxor (%rcx), RA0, RA0;514vpxor (0 * 16)(%rdx), RA1, RA1;515vpxor (1 * 16)(%rdx), RA2, RA2;516vpxor (2 * 16)(%rdx), RA3, RA3;517vpxor (3 * 16)(%rdx), RB0, RB0;518vpxor (4 * 16)(%rdx), RB1, RB1;519vpxor (5 * 16)(%rdx), RB2, RB2;520vpxor (6 * 16)(%rdx), RB3, RB3;521vmovdqu RNOT, (%rcx); /* store new IV */522523vmovdqu RA0, (0 * 16)(%rsi);524vmovdqu RA1, (1 * 16)(%rsi);525vmovdqu RA2, (2 * 16)(%rsi);526vmovdqu RA3, (3 * 16)(%rsi);527vmovdqu RB0, (4 * 16)(%rsi);528vmovdqu RB1, (5 * 16)(%rsi);529vmovdqu RB2, (6 * 16)(%rsi);530vmovdqu RB3, (7 * 16)(%rsi);531532vzeroall;533FRAME_END534RET;535SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)536537538