Path: blob/master/drivers/crypto/inside-secure/eip93/eip93-common.c
26285 views
// SPDX-License-Identifier: GPL-2.01/*2* Copyright (C) 2019 - 20213*4* Richard van Schagen <[email protected]>5* Christian Marangi <[email protected]6*/78#include <crypto/aes.h>9#include <crypto/ctr.h>10#include <crypto/hmac.h>11#include <crypto/sha1.h>12#include <crypto/sha2.h>13#include <linux/kernel.h>14#include <linux/delay.h>15#include <linux/dma-mapping.h>16#include <linux/scatterlist.h>1718#include "eip93-cipher.h"19#include "eip93-hash.h"20#include "eip93-common.h"21#include "eip93-main.h"22#include "eip93-regs.h"2324int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err)25{26u32 ext_err;2728if (!err)29return 0;3031switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {32case EIP93_PE_CTRL_PE_AUTH_ERR:33case EIP93_PE_CTRL_PE_PAD_ERR:34return -EBADMSG;35/* let software handle anti-replay errors */36case EIP93_PE_CTRL_PE_SEQNUM_ERR:37return 0;38case EIP93_PE_CTRL_PE_EXT_ERR:39break;40default:41dev_err(eip93->dev, "Unhandled error 0x%08x\n", err);42return -EINVAL;43}4445/* Parse additional ext errors */46ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);47switch (ext_err) {48case EIP93_PE_CTRL_PE_EXT_ERR_BUS:49case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:50return -EIO;51case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:52return -EACCES;53case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:54case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:55case EIP93_PE_CTRL_PE_EXT_ERR_SPI:56return -EINVAL;57case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:58case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:59case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:60return -EBADMSG;61default:62dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err);63return -EINVAL;64}65}6667static void *eip93_ring_next_wptr(struct eip93_device *eip93,68struct eip93_desc_ring *ring)69{70void *ptr = ring->write;7172if ((ring->write == ring->read - ring->offset) ||73(ring->read == ring->base && ring->write == ring->base_end))74return ERR_PTR(-ENOMEM);7576if (ring->write == ring->base_end)77ring->write = ring->base;78else79ring->write += ring->offset;8081return ptr;82}8384static void *eip93_ring_next_rptr(struct eip93_device *eip93,85struct eip93_desc_ring *ring)86{87void *ptr = ring->read;8889if (ring->write == ring->read)90return ERR_PTR(-ENOENT);9192if (ring->read == ring->base_end)93ring->read = ring->base;94else95ring->read += ring->offset;9697return ptr;98}99100int eip93_put_descriptor(struct eip93_device *eip93,101struct eip93_descriptor *desc)102{103struct eip93_descriptor *cdesc;104struct eip93_descriptor *rdesc;105106rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr);107if (IS_ERR(rdesc))108return -ENOENT;109110cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr);111if (IS_ERR(cdesc))112return -ENOENT;113114memset(rdesc, 0, sizeof(struct eip93_descriptor));115116memcpy(cdesc, desc, sizeof(struct eip93_descriptor));117118return 0;119}120121void *eip93_get_descriptor(struct eip93_device *eip93)122{123struct eip93_descriptor *cdesc;124void *ptr;125126cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr);127if (IS_ERR(cdesc))128return ERR_PTR(-ENOENT);129130memset(cdesc, 0, sizeof(struct eip93_descriptor));131132ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr);133if (IS_ERR(ptr))134return ERR_PTR(-ENOENT);135136return ptr;137}138139static void eip93_free_sg_copy(const int len, struct scatterlist **sg)140{141if (!*sg || !len)142return;143144free_pages((unsigned long)sg_virt(*sg), get_order(len));145kfree(*sg);146*sg = NULL;147}148149static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,150const u32 len, const bool copy)151{152void *pages;153154*dst = kmalloc(sizeof(**dst), GFP_KERNEL);155if (!*dst)156return -ENOMEM;157158pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,159get_order(len));160if (!pages) {161kfree(*dst);162*dst = NULL;163return -ENOMEM;164}165166sg_init_table(*dst, 1);167sg_set_buf(*dst, pages, len);168169/* copy only as requested */170if (copy)171sg_copy_to_buffer(src, sg_nents(src), pages, len);172173return 0;174}175176static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,177const int blksize)178{179int nents;180181for (nents = 0; sg; sg = sg_next(sg), ++nents) {182if (!IS_ALIGNED(sg->offset, 4))183return false;184185if (len <= sg->length) {186if (!IS_ALIGNED(len, blksize))187return false;188189return true;190}191192if (!IS_ALIGNED(sg->length, blksize))193return false;194195len -= sg->length;196}197return false;198}199200int check_valid_request(struct eip93_cipher_reqctx *rctx)201{202struct scatterlist *src = rctx->sg_src;203struct scatterlist *dst = rctx->sg_dst;204u32 textsize = rctx->textsize;205u32 authsize = rctx->authsize;206u32 blksize = rctx->blksize;207u32 totlen_src = rctx->assoclen + rctx->textsize;208u32 totlen_dst = rctx->assoclen + rctx->textsize;209u32 copy_len;210bool src_align, dst_align;211int src_nents, dst_nents;212int err = -EINVAL;213214if (!IS_CTR(rctx->flags)) {215if (!IS_ALIGNED(textsize, blksize))216return err;217}218219if (authsize) {220if (IS_ENCRYPT(rctx->flags))221totlen_dst += authsize;222else223totlen_src += authsize;224}225226src_nents = sg_nents_for_len(src, totlen_src);227if (src_nents < 0)228return src_nents;229230dst_nents = sg_nents_for_len(dst, totlen_dst);231if (dst_nents < 0)232return dst_nents;233234if (src == dst) {235src_nents = max(src_nents, dst_nents);236dst_nents = src_nents;237if (unlikely((totlen_src || totlen_dst) && !src_nents))238return err;239240} else {241if (unlikely(totlen_src && !src_nents))242return err;243244if (unlikely(totlen_dst && !dst_nents))245return err;246}247248if (authsize) {249if (dst_nents == 1 && src_nents == 1) {250src_align = eip93_is_sg_aligned(src, totlen_src, blksize);251if (src == dst)252dst_align = src_align;253else254dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);255} else {256src_align = false;257dst_align = false;258}259} else {260src_align = eip93_is_sg_aligned(src, totlen_src, blksize);261if (src == dst)262dst_align = src_align;263else264dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);265}266267copy_len = max(totlen_src, totlen_dst);268if (!src_align) {269err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);270if (err)271return err;272}273274if (!dst_align) {275err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);276if (err)277return err;278}279280src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);281if (src_nents < 0)282return src_nents;283284dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);285if (dst_nents < 0)286return dst_nents;287288rctx->src_nents = src_nents;289rctx->dst_nents = dst_nents;290291return 0;292}293294/*295* Set sa_record function:296* Even sa_record is set to "0", keep " = 0" for readability.297*/298void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,299const u32 flags)300{301/* Reset cmd word */302sa_record->sa_cmd0_word = 0;303sa_record->sa_cmd1_word = 0;304305sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;306if (!IS_ECB(flags))307sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;308309sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;310311switch ((flags & EIP93_ALG_MASK)) {312case EIP93_ALG_AES:313sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;314sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,315keylen >> 3);316break;317case EIP93_ALG_3DES:318sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;319break;320case EIP93_ALG_DES:321sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;322break;323default:324sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;325}326327switch ((flags & EIP93_HASH_MASK)) {328case EIP93_HASH_SHA256:329sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;330break;331case EIP93_HASH_SHA224:332sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;333break;334case EIP93_HASH_SHA1:335sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;336break;337case EIP93_HASH_MD5:338sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;339break;340default:341sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;342}343344sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;345346switch ((flags & EIP93_MODE_MASK)) {347case EIP93_MODE_CBC:348sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;349break;350case EIP93_MODE_CTR:351sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;352break;353case EIP93_MODE_ECB:354sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;355break;356}357358sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;359if (IS_HASH(flags)) {360sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;361sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;362}363364if (IS_HMAC(flags)) {365sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;366sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;367}368369sa_record->sa_spi = 0x0;370sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;371sa_record->sa_seqmum_mask[1] = 0x0;372}373374/*375* Poor mans Scatter/gather function:376* Create a Descriptor for every segment to avoid copying buffers.377* For performance better to wait for hardware to perform multiple DMA378*/379static int eip93_scatter_combine(struct eip93_device *eip93,380struct eip93_cipher_reqctx *rctx,381u32 datalen, u32 split, int offsetin)382{383struct eip93_descriptor *cdesc = rctx->cdesc;384struct scatterlist *sgsrc = rctx->sg_src;385struct scatterlist *sgdst = rctx->sg_dst;386unsigned int remainin = sg_dma_len(sgsrc);387unsigned int remainout = sg_dma_len(sgdst);388dma_addr_t saddr = sg_dma_address(sgsrc);389dma_addr_t daddr = sg_dma_address(sgdst);390dma_addr_t state_addr;391u32 src_addr, dst_addr, len, n;392bool nextin = false;393bool nextout = false;394int offsetout = 0;395int err;396397if (IS_ECB(rctx->flags))398rctx->sa_state_base = 0;399400if (split < datalen) {401state_addr = rctx->sa_state_ctr_base;402n = split;403} else {404state_addr = rctx->sa_state_base;405n = datalen;406}407408do {409if (nextin) {410sgsrc = sg_next(sgsrc);411remainin = sg_dma_len(sgsrc);412if (remainin == 0)413continue;414415saddr = sg_dma_address(sgsrc);416offsetin = 0;417nextin = false;418}419420if (nextout) {421sgdst = sg_next(sgdst);422remainout = sg_dma_len(sgdst);423if (remainout == 0)424continue;425426daddr = sg_dma_address(sgdst);427offsetout = 0;428nextout = false;429}430src_addr = saddr + offsetin;431dst_addr = daddr + offsetout;432433if (remainin == remainout) {434len = remainin;435if (len > n) {436len = n;437remainin -= n;438remainout -= n;439offsetin += n;440offsetout += n;441} else {442nextin = true;443nextout = true;444}445} else if (remainin < remainout) {446len = remainin;447if (len > n) {448len = n;449remainin -= n;450remainout -= n;451offsetin += n;452offsetout += n;453} else {454offsetout += len;455remainout -= len;456nextin = true;457}458} else {459len = remainout;460if (len > n) {461len = n;462remainin -= n;463remainout -= n;464offsetin += n;465offsetout += n;466} else {467offsetin += len;468remainin -= len;469nextout = true;470}471}472n -= len;473474cdesc->src_addr = src_addr;475cdesc->dst_addr = dst_addr;476cdesc->state_addr = state_addr;477cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,478EIP93_PE_LENGTH_HOST_READY);479cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);480481if (n == 0) {482n = datalen - split;483split = datalen;484state_addr = rctx->sa_state_base;485}486487if (n == 0)488cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,489EIP93_DESC_LAST);490491/*492* Loop - Delay - No need to rollback493* Maybe refine by slowing down at EIP93_RING_BUSY494*/495again:496scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)497err = eip93_put_descriptor(eip93, cdesc);498if (err) {499usleep_range(EIP93_RING_BUSY_DELAY,500EIP93_RING_BUSY_DELAY * 2);501goto again;502}503/* Writing new descriptor count starts DMA action */504writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);505} while (n);506507return -EINPROGRESS;508}509510int eip93_send_req(struct crypto_async_request *async,511const u8 *reqiv, struct eip93_cipher_reqctx *rctx)512{513struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);514struct eip93_device *eip93 = ctx->eip93;515struct scatterlist *src = rctx->sg_src;516struct scatterlist *dst = rctx->sg_dst;517struct sa_state *sa_state;518struct eip93_descriptor cdesc;519u32 flags = rctx->flags;520int offsetin = 0, err;521u32 datalen = rctx->assoclen + rctx->textsize;522u32 split = datalen;523u32 start, end, ctr, blocks;524u32 iv[AES_BLOCK_SIZE / sizeof(u32)];525int crypto_async_idr;526527rctx->sa_state_ctr = NULL;528rctx->sa_state = NULL;529530if (IS_ECB(flags))531goto skip_iv;532533memcpy(iv, reqiv, rctx->ivsize);534535rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);536if (!rctx->sa_state)537return -ENOMEM;538539sa_state = rctx->sa_state;540541memcpy(sa_state->state_iv, iv, rctx->ivsize);542if (IS_RFC3686(flags)) {543sa_state->state_iv[0] = ctx->sa_nonce;544sa_state->state_iv[1] = iv[0];545sa_state->state_iv[2] = iv[1];546sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1);547} else if (!IS_HMAC(flags) && IS_CTR(flags)) {548/* Compute data length. */549blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);550ctr = be32_to_cpu((__be32 __force)iv[3]);551/* Check 32bit counter overflow. */552start = ctr;553end = start + blocks - 1;554if (end < start) {555split = AES_BLOCK_SIZE * -start;556/*557* Increment the counter manually to cope with558* the hardware counter overflow.559*/560iv[3] = 0xffffffff;561crypto_inc((u8 *)iv, AES_BLOCK_SIZE);562563rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),564GFP_KERNEL);565if (!rctx->sa_state_ctr) {566err = -ENOMEM;567goto free_sa_state;568}569570memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);571memcpy(sa_state->state_iv, iv, rctx->ivsize);572573rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr,574sizeof(*rctx->sa_state_ctr),575DMA_TO_DEVICE);576err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base);577if (err)578goto free_sa_state_ctr;579}580}581582rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state,583sizeof(*rctx->sa_state), DMA_TO_DEVICE);584err = dma_mapping_error(eip93->dev, rctx->sa_state_base);585if (err)586goto free_sa_state_ctr_dma;587588skip_iv:589590cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,591EIP93_PE_CTRL_HOST_READY);592cdesc.sa_addr = rctx->sa_record_base;593cdesc.arc4_addr = 0;594595scoped_guard(spinlock_bh, &eip93->ring->idr_lock)596crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,597EIP93_RING_NUM - 1, GFP_ATOMIC);598599cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |600FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);601602rctx->cdesc = &cdesc;603604/* map DMA_BIDIRECTIONAL to invalidate cache on destination605* implies __dma_cache_wback_inv606*/607if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {608err = -ENOMEM;609goto free_sa_state_ctr_dma;610}611612if (src != dst &&613!dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {614err = -ENOMEM;615goto free_sg_dma;616}617618return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin);619620free_sg_dma:621dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);622free_sa_state_ctr_dma:623if (rctx->sa_state_ctr)624dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,625sizeof(*rctx->sa_state_ctr),626DMA_TO_DEVICE);627free_sa_state_ctr:628kfree(rctx->sa_state_ctr);629if (rctx->sa_state)630dma_unmap_single(eip93->dev, rctx->sa_state_base,631sizeof(*rctx->sa_state),632DMA_TO_DEVICE);633free_sa_state:634kfree(rctx->sa_state);635636return err;637}638639void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,640struct scatterlist *reqsrc, struct scatterlist *reqdst)641{642u32 len = rctx->assoclen + rctx->textsize;643u32 authsize = rctx->authsize;644u32 flags = rctx->flags;645u32 *otag;646int i;647648if (rctx->sg_src == rctx->sg_dst) {649dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,650DMA_BIDIRECTIONAL);651goto process_tag;652}653654dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents,655DMA_TO_DEVICE);656657if (rctx->sg_src != reqsrc)658eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src);659660dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,661DMA_BIDIRECTIONAL);662663/* SHA tags need conversion from net-to-host */664process_tag:665if (IS_DECRYPT(flags))666authsize = 0;667668if (authsize) {669if (!IS_HASH_MD5(flags)) {670otag = sg_virt(rctx->sg_dst) + len;671for (i = 0; i < (authsize / 4); i++)672otag[i] = be32_to_cpu((__be32 __force)otag[i]);673}674}675676if (rctx->sg_dst != reqdst) {677sg_copy_from_buffer(reqdst, sg_nents(reqdst),678sg_virt(rctx->sg_dst), len + authsize);679eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);680}681}682683void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,684u8 *reqiv)685{686if (rctx->sa_state_ctr)687dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,688sizeof(*rctx->sa_state_ctr),689DMA_FROM_DEVICE);690691if (rctx->sa_state)692dma_unmap_single(eip93->dev, rctx->sa_state_base,693sizeof(*rctx->sa_state),694DMA_FROM_DEVICE);695696if (!IS_ECB(rctx->flags))697memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);698699kfree(rctx->sa_state_ctr);700kfree(rctx->sa_state);701}702703int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,704unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad,705bool skip_ipad)706{707u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE];708struct crypto_ahash *ahash_tfm;709struct eip93_hash_reqctx *rctx;710struct ahash_request *req;711DECLARE_CRYPTO_WAIT(wait);712struct scatterlist sg[1];713const char *alg_name;714int i, ret;715716switch (ctx_flags & EIP93_HASH_MASK) {717case EIP93_HASH_SHA256:718alg_name = "sha256-eip93";719break;720case EIP93_HASH_SHA224:721alg_name = "sha224-eip93";722break;723case EIP93_HASH_SHA1:724alg_name = "sha1-eip93";725break;726case EIP93_HASH_MD5:727alg_name = "md5-eip93";728break;729default: /* Impossible */730return -EINVAL;731}732733ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);734if (IS_ERR(ahash_tfm))735return PTR_ERR(ahash_tfm);736737req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC);738if (!req) {739ret = -ENOMEM;740goto err_ahash;741}742743rctx = ahash_request_ctx_dma(req);744crypto_init_wait(&wait);745ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,746crypto_req_done, &wait);747748/* Hash the key if > SHA256_BLOCK_SIZE */749if (keylen > SHA256_BLOCK_SIZE) {750sg_init_one(&sg[0], key, keylen);751752ahash_request_set_crypt(req, sg, ipad, keylen);753ret = crypto_wait_req(crypto_ahash_digest(req), &wait);754if (ret)755goto err_req;756757keylen = hashlen;758} else {759memcpy(ipad, key, keylen);760}761762/* Copy to opad */763memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);764memcpy(opad, ipad, SHA256_BLOCK_SIZE);765766/* Pad with HMAC constants */767for (i = 0; i < SHA256_BLOCK_SIZE; i++) {768ipad[i] ^= HMAC_IPAD_VALUE;769opad[i] ^= HMAC_OPAD_VALUE;770}771772if (skip_ipad) {773memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE);774} else {775/* Hash ipad */776sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);777ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE);778ret = crypto_ahash_init(req);779if (ret)780goto err_req;781782/* Disable HASH_FINALIZE for ipad hash */783rctx->partial_hash = true;784785ret = crypto_wait_req(crypto_ahash_finup(req), &wait);786if (ret)787goto err_req;788}789790/* Hash opad */791sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);792ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE);793ret = crypto_ahash_init(req);794if (ret)795goto err_req;796797/* Disable HASH_FINALIZE for opad hash */798rctx->partial_hash = true;799800ret = crypto_wait_req(crypto_ahash_finup(req), &wait);801if (ret)802goto err_req;803804if (!IS_HASH_MD5(ctx_flags)) {805for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {806u32 *ipad_hash = (u32 *)dest_ipad;807u32 *opad_hash = (u32 *)dest_opad;808809if (!skip_ipad)810ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]);811opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]);812}813}814815err_req:816ahash_request_free(req);817err_ahash:818crypto_free_ahash(ahash_tfm);819820return ret;821}822823824