/*-1* Copyright (c) 2021 The FreeBSD Foundation2*3* This software was developed by Andrew Turner under sponsorship from4* the FreeBSD Foundation.5*6* Redistribution and use in source and binary forms, with or without7* modification, are permitted provided that the following conditions8* are met:9* 1. Redistributions of source code must retain the above copyright10* notice, this list of conditions and the following disclaimer.11* 2. Redistributions in binary form must reproduce the above copyright12* notice, this list of conditions and the following disclaimer in the13* documentation and/or other materials provided with the distribution.14*15* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND16* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE17* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE18* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE19* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL20* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS21* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)22* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT23* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY24* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF25* SUCH DAMAGE.26*/2728#include <sys/types.h>2930#include <arm_neon.h>3132#include "sha512.h"33#include "sha512c_impl.h"3435void __hidden36SHA512_Transform_arm64_impl(uint64_t * state,37const unsigned char block[SHA512_BLOCK_LENGTH], const uint64_t K[80])38{39uint64x2_t W[8];40uint64x2_t S[4];41uint64x2_t S_start[4];42uint64x2_t K_tmp, S_tmp;43int i;4445#define A64_LOAD_W(x) \46W[x] = vld1q_u64((const uint64_t *)(&block[(x) * 16])); \47W[x] = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(W[x])))4849/* 1. Prepare the first part of the message schedule W. */50A64_LOAD_W(0);51A64_LOAD_W(1);52A64_LOAD_W(2);53A64_LOAD_W(3);54A64_LOAD_W(4);55A64_LOAD_W(5);56A64_LOAD_W(6);57A64_LOAD_W(7);5859/* 2. Initialize working variables. */60S[0] = vld1q_u64(&state[0]);61S[1] = vld1q_u64(&state[2]);62S[2] = vld1q_u64(&state[4]);63S[3] = vld1q_u64(&state[6]);6465S_start[0] = S[0];66S_start[1] = S[1];67S_start[2] = S[2];68S_start[3] = S[3];6970/* 3. Mix. */71for (i = 0; i < 80; i += 16) {72/*73* The schedule array has 4 vectors:74* ab = S[( 8 - i) % 4]75* cd = S[( 9 - i) % 4]76* ef = S[(10 - i) % 4]77* gh = S[(11 - i) % 4]78*79* The following maacro:80* - Loads the round constants81* - Add them to schedule words82* - Rotates the total to switch the order of the two halves83* so they are in the correct order for gh84* - Fix the alignment85* - Extract fg from ef and gh86* - Extract de from cd and ef87* - Pass these into the first part of the sha512 calculation88* to calculate the Sigma 1 and Ch steps89* - Calculate the Sigma 0 and Maj steps and store to gh90* - Add the first part to the cd vector91*/92#define A64_RNDr(S, W, i, ii) \93K_tmp = vld1q_u64(K + (i * 2) + ii); \94K_tmp = vaddq_u64(W[i], K_tmp); \95K_tmp = vextq_u64(K_tmp, K_tmp, 1); \96K_tmp = vaddq_u64(K_tmp, S[(11 - i) % 4]); \97S_tmp = vsha512hq_u64(K_tmp, \98vextq_u64(S[(10 - i) % 4], S[(11 - i) % 4], 1), \99vextq_u64(S[(9 - i) % 4], S[(10 - i) % 4], 1)); \100S[(11 - i) % 4] = vsha512h2q_u64(S_tmp, S[(9 - i) % 4], S[(8 - i) % 4]); \101S[(9 - i) % 4] = vaddq_u64(S[(9 - i) % 4], S_tmp)102103A64_RNDr(S, W, 0, i);104A64_RNDr(S, W, 1, i);105A64_RNDr(S, W, 2, i);106A64_RNDr(S, W, 3, i);107A64_RNDr(S, W, 4, i);108A64_RNDr(S, W, 5, i);109A64_RNDr(S, W, 6, i);110A64_RNDr(S, W, 7, i);111112if (i == 64)113break;114115/*116* Perform the Message schedule computation:117* - vsha512su0q_u64 performs the sigma 0 half and add it to118* the old value119* - vextq_u64 fixes the alignment of the vectors120* - vsha512su1q_u64 performs the sigma 1 half and adds it121* and both the above all together122*/123#define A64_MSCH(x) \124W[x] = vsha512su1q_u64( \125vsha512su0q_u64(W[x], W[(x + 1) % 8]), \126W[(x + 7) % 8], \127vextq_u64(W[(x + 4) % 8], W[(x + 5) % 8], 1))128129A64_MSCH(0);130A64_MSCH(1);131A64_MSCH(2);132A64_MSCH(3);133A64_MSCH(4);134A64_MSCH(5);135A64_MSCH(6);136A64_MSCH(7);137}138139/* 4. Mix local working variables into global state */140S[0] = vaddq_u64(S[0], S_start[0]);141S[1] = vaddq_u64(S[1], S_start[1]);142S[2] = vaddq_u64(S[2], S_start[2]);143S[3] = vaddq_u64(S[3], S_start[3]);144145vst1q_u64(&state[0], S[0]);146vst1q_u64(&state[2], S[1]);147vst1q_u64(&state[4], S[2]);148vst1q_u64(&state[6], S[3]);149}150151152