Path: blob/master/drivers/crypto/hisilicon/sec/sec_algs.c
26292 views
// SPDX-License-Identifier: GPL-2.01/* Copyright (c) 2016-2017 HiSilicon Limited. */2#include <linux/crypto.h>3#include <linux/dma-mapping.h>4#include <linux/dmapool.h>5#include <linux/module.h>6#include <linux/mutex.h>7#include <linux/slab.h>89#include <crypto/aes.h>10#include <crypto/algapi.h>11#include <crypto/internal/des.h>12#include <crypto/skcipher.h>13#include <crypto/xts.h>14#include <crypto/internal/skcipher.h>1516#include "sec_drv.h"1718#define SEC_MAX_CIPHER_KEY 6419#define SEC_REQ_LIMIT SZ_32M2021struct sec_c_alg_cfg {22unsigned c_alg : 3;23unsigned c_mode : 3;24unsigned key_len : 2;25unsigned c_width : 2;26};2728static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {29[SEC_C_DES_ECB_64] = {30.c_alg = SEC_C_ALG_DES,31.c_mode = SEC_C_MODE_ECB,32.key_len = SEC_KEY_LEN_DES,33},34[SEC_C_DES_CBC_64] = {35.c_alg = SEC_C_ALG_DES,36.c_mode = SEC_C_MODE_CBC,37.key_len = SEC_KEY_LEN_DES,38},39[SEC_C_3DES_ECB_192_3KEY] = {40.c_alg = SEC_C_ALG_3DES,41.c_mode = SEC_C_MODE_ECB,42.key_len = SEC_KEY_LEN_3DES_3_KEY,43},44[SEC_C_3DES_ECB_192_2KEY] = {45.c_alg = SEC_C_ALG_3DES,46.c_mode = SEC_C_MODE_ECB,47.key_len = SEC_KEY_LEN_3DES_2_KEY,48},49[SEC_C_3DES_CBC_192_3KEY] = {50.c_alg = SEC_C_ALG_3DES,51.c_mode = SEC_C_MODE_CBC,52.key_len = SEC_KEY_LEN_3DES_3_KEY,53},54[SEC_C_3DES_CBC_192_2KEY] = {55.c_alg = SEC_C_ALG_3DES,56.c_mode = SEC_C_MODE_CBC,57.key_len = SEC_KEY_LEN_3DES_2_KEY,58},59[SEC_C_AES_ECB_128] = {60.c_alg = SEC_C_ALG_AES,61.c_mode = SEC_C_MODE_ECB,62.key_len = SEC_KEY_LEN_AES_128,63},64[SEC_C_AES_ECB_192] = {65.c_alg = SEC_C_ALG_AES,66.c_mode = SEC_C_MODE_ECB,67.key_len = SEC_KEY_LEN_AES_192,68},69[SEC_C_AES_ECB_256] = {70.c_alg = SEC_C_ALG_AES,71.c_mode = SEC_C_MODE_ECB,72.key_len = SEC_KEY_LEN_AES_256,73},74[SEC_C_AES_CBC_128] = {75.c_alg = SEC_C_ALG_AES,76.c_mode = SEC_C_MODE_CBC,77.key_len = SEC_KEY_LEN_AES_128,78},79[SEC_C_AES_CBC_192] = {80.c_alg = SEC_C_ALG_AES,81.c_mode = SEC_C_MODE_CBC,82.key_len = SEC_KEY_LEN_AES_192,83},84[SEC_C_AES_CBC_256] = {85.c_alg = SEC_C_ALG_AES,86.c_mode = SEC_C_MODE_CBC,87.key_len = SEC_KEY_LEN_AES_256,88},89[SEC_C_AES_CTR_128] = {90.c_alg = SEC_C_ALG_AES,91.c_mode = SEC_C_MODE_CTR,92.key_len = SEC_KEY_LEN_AES_128,93},94[SEC_C_AES_CTR_192] = {95.c_alg = SEC_C_ALG_AES,96.c_mode = SEC_C_MODE_CTR,97.key_len = SEC_KEY_LEN_AES_192,98},99[SEC_C_AES_CTR_256] = {100.c_alg = SEC_C_ALG_AES,101.c_mode = SEC_C_MODE_CTR,102.key_len = SEC_KEY_LEN_AES_256,103},104[SEC_C_AES_XTS_128] = {105.c_alg = SEC_C_ALG_AES,106.c_mode = SEC_C_MODE_XTS,107.key_len = SEC_KEY_LEN_AES_128,108},109[SEC_C_AES_XTS_256] = {110.c_alg = SEC_C_ALG_AES,111.c_mode = SEC_C_MODE_XTS,112.key_len = SEC_KEY_LEN_AES_256,113},114[SEC_C_NULL] = {115},116};117118/*119* Mutex used to ensure safe operation of reference count of120* alg providers121*/122static DEFINE_MUTEX(algs_lock);123static unsigned int active_devs;124125static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,126struct sec_bd_info *req,127enum sec_cipher_alg alg)128{129const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];130131memset(req, 0, sizeof(*req));132req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;133req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;134req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;135req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;136137req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);138req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);139}140141static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,142const u8 *key,143unsigned int keylen,144enum sec_cipher_alg alg)145{146struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);147struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);148149ctx->cipher_alg = alg;150memcpy(ctx->key, key, keylen);151sec_alg_skcipher_init_template(ctx, &ctx->req_template,152ctx->cipher_alg);153}154155static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,156dma_addr_t psec_sgl, struct sec_dev_info *info)157{158struct sec_hw_sgl *sgl_current, *sgl_next;159dma_addr_t sgl_next_dma;160161sgl_current = hw_sgl;162while (sgl_current) {163sgl_next = sgl_current->next;164sgl_next_dma = sgl_current->next_sgl;165166dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);167168sgl_current = sgl_next;169psec_sgl = sgl_next_dma;170}171}172173static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,174dma_addr_t *psec_sgl,175struct scatterlist *sgl,176int count,177struct sec_dev_info *info,178gfp_t gfp)179{180struct sec_hw_sgl *sgl_current = NULL;181struct sec_hw_sgl *sgl_next;182dma_addr_t sgl_next_dma;183struct scatterlist *sg;184int ret, sge_index, i;185186if (!count)187return -EINVAL;188189for_each_sg(sgl, sg, count, i) {190sge_index = i % SEC_MAX_SGE_NUM;191if (sge_index == 0) {192sgl_next = dma_pool_zalloc(info->hw_sgl_pool,193gfp, &sgl_next_dma);194if (!sgl_next) {195ret = -ENOMEM;196goto err_free_hw_sgls;197}198199if (!sgl_current) { /* First one */200*psec_sgl = sgl_next_dma;201*sec_sgl = sgl_next;202} else { /* Chained */203sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;204sgl_current->next_sgl = sgl_next_dma;205sgl_current->next = sgl_next;206}207sgl_current = sgl_next;208}209sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);210sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);211sgl_current->data_bytes_in_sgl += sg_dma_len(sg);212}213sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;214sgl_current->next_sgl = 0;215(*sec_sgl)->entry_sum_in_chain = count;216217return 0;218219err_free_hw_sgls:220sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);221*psec_sgl = 0;222223return ret;224}225226static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,227const u8 *key, unsigned int keylen,228enum sec_cipher_alg alg)229{230struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);231struct device *dev = ctx->queue->dev_info->dev;232233mutex_lock(&ctx->lock);234if (ctx->key) {235/* rekeying */236memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);237} else {238/* new key */239ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,240&ctx->pkey, GFP_KERNEL);241if (!ctx->key) {242mutex_unlock(&ctx->lock);243return -ENOMEM;244}245}246mutex_unlock(&ctx->lock);247sec_alg_skcipher_init_context(tfm, key, keylen, alg);248249return 0;250}251252static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,253const u8 *key, unsigned int keylen)254{255enum sec_cipher_alg alg;256257switch (keylen) {258case AES_KEYSIZE_128:259alg = SEC_C_AES_ECB_128;260break;261case AES_KEYSIZE_192:262alg = SEC_C_AES_ECB_192;263break;264case AES_KEYSIZE_256:265alg = SEC_C_AES_ECB_256;266break;267default:268return -EINVAL;269}270271return sec_alg_skcipher_setkey(tfm, key, keylen, alg);272}273274static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,275const u8 *key, unsigned int keylen)276{277enum sec_cipher_alg alg;278279switch (keylen) {280case AES_KEYSIZE_128:281alg = SEC_C_AES_CBC_128;282break;283case AES_KEYSIZE_192:284alg = SEC_C_AES_CBC_192;285break;286case AES_KEYSIZE_256:287alg = SEC_C_AES_CBC_256;288break;289default:290return -EINVAL;291}292293return sec_alg_skcipher_setkey(tfm, key, keylen, alg);294}295296static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,297const u8 *key, unsigned int keylen)298{299enum sec_cipher_alg alg;300301switch (keylen) {302case AES_KEYSIZE_128:303alg = SEC_C_AES_CTR_128;304break;305case AES_KEYSIZE_192:306alg = SEC_C_AES_CTR_192;307break;308case AES_KEYSIZE_256:309alg = SEC_C_AES_CTR_256;310break;311default:312return -EINVAL;313}314315return sec_alg_skcipher_setkey(tfm, key, keylen, alg);316}317318static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,319const u8 *key, unsigned int keylen)320{321enum sec_cipher_alg alg;322int ret;323324ret = xts_verify_key(tfm, key, keylen);325if (ret)326return ret;327328switch (keylen) {329case AES_KEYSIZE_128 * 2:330alg = SEC_C_AES_XTS_128;331break;332case AES_KEYSIZE_256 * 2:333alg = SEC_C_AES_XTS_256;334break;335default:336return -EINVAL;337}338339return sec_alg_skcipher_setkey(tfm, key, keylen, alg);340}341342static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,343const u8 *key, unsigned int keylen)344{345return verify_skcipher_des_key(tfm, key) ?:346sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);347}348349static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,350const u8 *key, unsigned int keylen)351{352return verify_skcipher_des_key(tfm, key) ?:353sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);354}355356static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,357const u8 *key, unsigned int keylen)358{359return verify_skcipher_des3_key(tfm, key) ?:360sec_alg_skcipher_setkey(tfm, key, keylen,361SEC_C_3DES_ECB_192_3KEY);362}363364static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,365const u8 *key, unsigned int keylen)366{367return verify_skcipher_des3_key(tfm, key) ?:368sec_alg_skcipher_setkey(tfm, key, keylen,369SEC_C_3DES_CBC_192_3KEY);370}371372static void sec_alg_free_el(struct sec_request_el *el,373struct sec_dev_info *info)374{375sec_free_hw_sgl(el->out, el->dma_out, info);376sec_free_hw_sgl(el->in, el->dma_in, info);377kfree(el->sgl_in);378kfree(el->sgl_out);379kfree(el);380}381382/* queuelock must be held */383static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)384{385struct sec_request_el *el, *temp;386int ret = 0;387388mutex_lock(&sec_req->lock);389list_for_each_entry_safe(el, temp, &sec_req->elements, head) {390/*391* Add to hardware queue only under following circumstances392* 1) Software and hardware queue empty so no chain dependencies393* 2) No dependencies as new IV - (check software queue empty394* to maintain order)395* 3) No dependencies because the mode does no chaining.396*397* In other cases first insert onto the software queue which398* is then emptied as requests complete399*/400if (!queue->havesoftqueue ||401(kfifo_is_empty(&queue->softqueue) &&402sec_queue_empty(queue))) {403ret = sec_queue_send(queue, &el->req, sec_req);404if (ret == -EAGAIN) {405/* Wait unti we can send then try again */406/* DEAD if here - should not happen */407ret = -EBUSY;408goto err_unlock;409}410} else {411kfifo_put(&queue->softqueue, el);412}413}414err_unlock:415mutex_unlock(&sec_req->lock);416417return ret;418}419420static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,421struct crypto_async_request *req_base)422{423struct skcipher_request *skreq = container_of(req_base,424struct skcipher_request,425base);426struct sec_request *sec_req = skcipher_request_ctx(skreq);427struct sec_request *backlog_req;428struct sec_request_el *sec_req_el, *nextrequest;429struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;430struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);431struct device *dev = ctx->queue->dev_info->dev;432int icv_or_skey_en, ret;433bool done;434435sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,436head);437icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>438SEC_BD_W0_ICV_OR_SKEY_EN_S;439if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {440dev_err(dev, "Got an invalid answer %lu %d\n",441sec_resp->w1 & SEC_BD_W1_BD_INVALID,442icv_or_skey_en);443sec_req->err = -EINVAL;444/*445* We need to muddle on to avoid getting stuck with elements446* on the queue. Error will be reported so requester so447* it should be able to handle appropriately.448*/449}450451spin_lock_bh(&ctx->queue->queuelock);452/* Put the IV in place for chained cases */453switch (ctx->cipher_alg) {454case SEC_C_AES_CBC_128:455case SEC_C_AES_CBC_192:456case SEC_C_AES_CBC_256:457if (sec_req_el->req.w0 & SEC_BD_W0_DE)458sg_pcopy_to_buffer(sec_req_el->sgl_out,459sg_nents(sec_req_el->sgl_out),460skreq->iv,461crypto_skcipher_ivsize(atfm),462sec_req_el->el_length -463crypto_skcipher_ivsize(atfm));464else465sg_pcopy_to_buffer(sec_req_el->sgl_in,466sg_nents(sec_req_el->sgl_in),467skreq->iv,468crypto_skcipher_ivsize(atfm),469sec_req_el->el_length -470crypto_skcipher_ivsize(atfm));471/* No need to sync to the device as coherent DMA */472break;473case SEC_C_AES_CTR_128:474case SEC_C_AES_CTR_192:475case SEC_C_AES_CTR_256:476crypto_inc(skreq->iv, 16);477break;478default:479/* Do not update */480break;481}482483if (ctx->queue->havesoftqueue &&484!kfifo_is_empty(&ctx->queue->softqueue) &&485sec_queue_empty(ctx->queue)) {486ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);487if (ret <= 0)488dev_err(dev,489"Error getting next element from kfifo %d\n",490ret);491else492/* We know there is space so this cannot fail */493sec_queue_send(ctx->queue, &nextrequest->req,494nextrequest->sec_req);495} else if (!list_empty(&ctx->backlog)) {496/* Need to verify there is room first */497backlog_req = list_first_entry(&ctx->backlog,498typeof(*backlog_req),499backlog_head);500if (sec_queue_can_enqueue(ctx->queue,501backlog_req->num_elements) ||502(ctx->queue->havesoftqueue &&503kfifo_avail(&ctx->queue->softqueue) >504backlog_req->num_elements)) {505sec_send_request(backlog_req, ctx->queue);506crypto_request_complete(backlog_req->req_base,507-EINPROGRESS);508list_del(&backlog_req->backlog_head);509}510}511spin_unlock_bh(&ctx->queue->queuelock);512513mutex_lock(&sec_req->lock);514list_del(&sec_req_el->head);515mutex_unlock(&sec_req->lock);516sec_alg_free_el(sec_req_el, ctx->queue->dev_info);517518/*519* Request is done.520* The dance is needed as the lock is freed in the completion521*/522mutex_lock(&sec_req->lock);523done = list_empty(&sec_req->elements);524mutex_unlock(&sec_req->lock);525if (done) {526if (crypto_skcipher_ivsize(atfm)) {527dma_unmap_single(dev, sec_req->dma_iv,528crypto_skcipher_ivsize(atfm),529DMA_TO_DEVICE);530}531dma_unmap_sg(dev, skreq->src, sec_req->len_in,532DMA_BIDIRECTIONAL);533if (skreq->src != skreq->dst)534dma_unmap_sg(dev, skreq->dst, sec_req->len_out,535DMA_BIDIRECTIONAL);536skcipher_request_complete(skreq, sec_req->err);537}538}539540void sec_alg_callback(struct sec_bd_info *resp, void *shadow)541{542struct sec_request *sec_req = shadow;543544sec_req->cb(resp, sec_req->req_base);545}546547static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,548int *steps, gfp_t gfp)549{550size_t *sizes;551int i;552553/* Split into suitable sized blocks */554*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;555sizes = kcalloc(*steps, sizeof(*sizes), gfp);556if (!sizes)557return -ENOMEM;558559for (i = 0; i < *steps - 1; i++)560sizes[i] = SEC_REQ_LIMIT;561sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);562*split_sizes = sizes;563564return 0;565}566567static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,568int steps, struct scatterlist ***splits,569int **splits_nents,570int sgl_len_in,571struct device *dev, gfp_t gfp)572{573int ret, count;574575count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);576if (!count)577return -EINVAL;578579*splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);580if (!*splits) {581ret = -ENOMEM;582goto err_unmap_sg;583}584*splits_nents = kcalloc(steps, sizeof(int), gfp);585if (!*splits_nents) {586ret = -ENOMEM;587goto err_free_splits;588}589590/* output the scatter list before and after this */591ret = sg_split(sgl, count, 0, steps, split_sizes,592*splits, *splits_nents, gfp);593if (ret) {594ret = -ENOMEM;595goto err_free_splits_nents;596}597598return 0;599600err_free_splits_nents:601kfree(*splits_nents);602err_free_splits:603kfree(*splits);604err_unmap_sg:605dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);606607return ret;608}609610/*611* Reverses the sec_map_and_split_sg call for messages not yet added to612* the queues.613*/614static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,615struct scatterlist **splits, int *splits_nents,616int sgl_len_in, struct device *dev)617{618int i;619620for (i = 0; i < steps; i++)621kfree(splits[i]);622kfree(splits_nents);623kfree(splits);624625dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);626}627628static struct sec_request_el629*sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,630int el_size, bool different_dest,631struct scatterlist *sgl_in, int n_ents_in,632struct scatterlist *sgl_out, int n_ents_out,633struct sec_dev_info *info, gfp_t gfp)634{635struct sec_request_el *el;636struct sec_bd_info *req;637int ret;638639el = kzalloc(sizeof(*el), gfp);640if (!el)641return ERR_PTR(-ENOMEM);642el->el_length = el_size;643req = &el->req;644memcpy(req, template, sizeof(*req));645646req->w0 &= ~SEC_BD_W0_CIPHER_M;647if (encrypt)648req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;649else650req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;651652req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;653req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &654SEC_BD_W0_C_GRAN_SIZE_19_16_M;655656req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;657req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &658SEC_BD_W0_C_GRAN_SIZE_21_20_M;659660/* Writing whole u32 so no need to take care of masking */661req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |662((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &663SEC_BD_W2_C_GRAN_SIZE_15_0_M);664665req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;666req->w1 |= SEC_BD_W1_ADDR_TYPE;667668el->sgl_in = sgl_in;669670ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,671n_ents_in, info, gfp);672if (ret)673goto err_free_el;674675req->data_addr_lo = lower_32_bits(el->dma_in);676req->data_addr_hi = upper_32_bits(el->dma_in);677678if (different_dest) {679el->sgl_out = sgl_out;680ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,681el->sgl_out,682n_ents_out, info, gfp);683if (ret)684goto err_free_hw_sgl_in;685686req->w0 |= SEC_BD_W0_DE;687req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);688req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);689690} else {691req->w0 &= ~SEC_BD_W0_DE;692req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);693req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);694}695696return el;697698err_free_hw_sgl_in:699sec_free_hw_sgl(el->in, el->dma_in, info);700err_free_el:701kfree(el);702703return ERR_PTR(ret);704}705706static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,707bool encrypt)708{709struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);710struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);711struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);712struct sec_queue *queue = ctx->queue;713struct sec_request *sec_req = skcipher_request_ctx(skreq);714struct sec_dev_info *info = queue->dev_info;715int i, ret, steps;716size_t *split_sizes;717struct scatterlist **splits_in;718struct scatterlist **splits_out = NULL;719int *splits_in_nents;720int *splits_out_nents = NULL;721struct sec_request_el *el, *temp;722bool split = skreq->src != skreq->dst;723gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;724725mutex_init(&sec_req->lock);726sec_req->req_base = &skreq->base;727sec_req->err = 0;728/* SGL mapping out here to allow us to break it up as necessary */729sec_req->len_in = sg_nents(skreq->src);730731ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,732&steps, gfp);733if (ret)734return ret;735sec_req->num_elements = steps;736ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,737&splits_in_nents, sec_req->len_in,738info->dev, gfp);739if (ret)740goto err_free_split_sizes;741742if (split) {743sec_req->len_out = sg_nents(skreq->dst);744ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,745&splits_out, &splits_out_nents,746sec_req->len_out, info->dev, gfp);747if (ret)748goto err_unmap_in_sg;749}750/* Shared info stored in seq_req - applies to all BDs */751sec_req->tfm_ctx = ctx;752sec_req->cb = sec_skcipher_alg_callback;753INIT_LIST_HEAD(&sec_req->elements);754755/*756* Future optimization.757* In the chaining case we can't use a dma pool bounce buffer758* but in the case where we know there is no chaining we can759*/760if (crypto_skcipher_ivsize(atfm)) {761sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,762crypto_skcipher_ivsize(atfm),763DMA_TO_DEVICE);764if (dma_mapping_error(info->dev, sec_req->dma_iv)) {765ret = -ENOMEM;766goto err_unmap_out_sg;767}768}769770/* Set them all up then queue - cleaner error handling. */771for (i = 0; i < steps; i++) {772el = sec_alg_alloc_and_fill_el(&ctx->req_template,773encrypt ? 1 : 0,774split_sizes[i],775skreq->src != skreq->dst,776splits_in[i], splits_in_nents[i],777split ? splits_out[i] : NULL,778split ? splits_out_nents[i] : 0,779info, gfp);780if (IS_ERR(el)) {781ret = PTR_ERR(el);782goto err_free_elements;783}784el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);785el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);786el->sec_req = sec_req;787list_add_tail(&el->head, &sec_req->elements);788}789790/*791* Only attempt to queue if the whole lot can fit in the queue -792* we can't successfully cleanup after a partial queing so this793* must succeed or fail atomically.794*795* Big hammer test of both software and hardware queues - could be796* more refined but this is unlikely to happen so no need.797*/798799/* Grab a big lock for a long time to avoid concurrency issues */800spin_lock_bh(&queue->queuelock);801802/*803* Can go on to queue if we have space in either:804* 1) The hardware queue and no software queue805* 2) The software queue806* AND there is nothing in the backlog. If there is backlog we807* have to only queue to the backlog queue and return busy.808*/809if ((!sec_queue_can_enqueue(queue, steps) &&810(!queue->havesoftqueue ||811kfifo_avail(&queue->softqueue) > steps)) ||812!list_empty(&ctx->backlog)) {813ret = -EBUSY;814if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {815list_add_tail(&sec_req->backlog_head, &ctx->backlog);816spin_unlock_bh(&queue->queuelock);817goto out;818}819820spin_unlock_bh(&queue->queuelock);821goto err_free_elements;822}823ret = sec_send_request(sec_req, queue);824spin_unlock_bh(&queue->queuelock);825if (ret)826goto err_free_elements;827828ret = -EINPROGRESS;829out:830/* Cleanup - all elements in pointer arrays have been copied */831kfree(splits_in_nents);832kfree(splits_in);833kfree(splits_out_nents);834kfree(splits_out);835kfree(split_sizes);836return ret;837838err_free_elements:839list_for_each_entry_safe(el, temp, &sec_req->elements, head) {840list_del(&el->head);841sec_alg_free_el(el, info);842}843if (crypto_skcipher_ivsize(atfm))844dma_unmap_single(info->dev, sec_req->dma_iv,845crypto_skcipher_ivsize(atfm),846DMA_BIDIRECTIONAL);847err_unmap_out_sg:848if (split)849sec_unmap_sg_on_err(skreq->dst, steps, splits_out,850splits_out_nents, sec_req->len_out,851info->dev);852err_unmap_in_sg:853sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,854sec_req->len_in, info->dev);855err_free_split_sizes:856kfree(split_sizes);857858return ret;859}860861static int sec_alg_skcipher_encrypt(struct skcipher_request *req)862{863return sec_alg_skcipher_crypto(req, true);864}865866static int sec_alg_skcipher_decrypt(struct skcipher_request *req)867{868return sec_alg_skcipher_crypto(req, false);869}870871static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)872{873struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);874875mutex_init(&ctx->lock);876INIT_LIST_HEAD(&ctx->backlog);877crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));878879ctx->queue = sec_queue_alloc_start_safe();880if (IS_ERR(ctx->queue))881return PTR_ERR(ctx->queue);882883spin_lock_init(&ctx->queue->queuelock);884ctx->queue->havesoftqueue = false;885886return 0;887}888889static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)890{891struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);892struct device *dev = ctx->queue->dev_info->dev;893894if (ctx->key) {895memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);896dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,897ctx->pkey);898}899sec_queue_stop_release(ctx->queue);900}901902static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)903{904struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);905int ret;906907ret = sec_alg_skcipher_init(tfm);908if (ret)909return ret;910911INIT_KFIFO(ctx->queue->softqueue);912ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);913if (ret) {914sec_alg_skcipher_exit(tfm);915return ret;916}917ctx->queue->havesoftqueue = true;918919return 0;920}921922static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)923{924struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);925926kfifo_free(&ctx->queue->softqueue);927sec_alg_skcipher_exit(tfm);928}929930static struct skcipher_alg sec_algs[] = {931{932.base = {933.cra_name = "ecb(aes)",934.cra_driver_name = "hisi_sec_aes_ecb",935.cra_priority = 4001,936.cra_flags = CRYPTO_ALG_ASYNC |937CRYPTO_ALG_ALLOCATES_MEMORY,938.cra_blocksize = AES_BLOCK_SIZE,939.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),940.cra_alignmask = 0,941.cra_module = THIS_MODULE,942},943.init = sec_alg_skcipher_init,944.exit = sec_alg_skcipher_exit,945.setkey = sec_alg_skcipher_setkey_aes_ecb,946.decrypt = sec_alg_skcipher_decrypt,947.encrypt = sec_alg_skcipher_encrypt,948.min_keysize = AES_MIN_KEY_SIZE,949.max_keysize = AES_MAX_KEY_SIZE,950.ivsize = 0,951}, {952.base = {953.cra_name = "cbc(aes)",954.cra_driver_name = "hisi_sec_aes_cbc",955.cra_priority = 4001,956.cra_flags = CRYPTO_ALG_ASYNC |957CRYPTO_ALG_ALLOCATES_MEMORY,958.cra_blocksize = AES_BLOCK_SIZE,959.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),960.cra_alignmask = 0,961.cra_module = THIS_MODULE,962},963.init = sec_alg_skcipher_init_with_queue,964.exit = sec_alg_skcipher_exit_with_queue,965.setkey = sec_alg_skcipher_setkey_aes_cbc,966.decrypt = sec_alg_skcipher_decrypt,967.encrypt = sec_alg_skcipher_encrypt,968.min_keysize = AES_MIN_KEY_SIZE,969.max_keysize = AES_MAX_KEY_SIZE,970.ivsize = AES_BLOCK_SIZE,971}, {972.base = {973.cra_name = "ctr(aes)",974.cra_driver_name = "hisi_sec_aes_ctr",975.cra_priority = 4001,976.cra_flags = CRYPTO_ALG_ASYNC |977CRYPTO_ALG_ALLOCATES_MEMORY,978.cra_blocksize = AES_BLOCK_SIZE,979.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),980.cra_alignmask = 0,981.cra_module = THIS_MODULE,982},983.init = sec_alg_skcipher_init_with_queue,984.exit = sec_alg_skcipher_exit_with_queue,985.setkey = sec_alg_skcipher_setkey_aes_ctr,986.decrypt = sec_alg_skcipher_decrypt,987.encrypt = sec_alg_skcipher_encrypt,988.min_keysize = AES_MIN_KEY_SIZE,989.max_keysize = AES_MAX_KEY_SIZE,990.ivsize = AES_BLOCK_SIZE,991}, {992.base = {993.cra_name = "xts(aes)",994.cra_driver_name = "hisi_sec_aes_xts",995.cra_priority = 4001,996.cra_flags = CRYPTO_ALG_ASYNC |997CRYPTO_ALG_ALLOCATES_MEMORY,998.cra_blocksize = AES_BLOCK_SIZE,999.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),1000.cra_alignmask = 0,1001.cra_module = THIS_MODULE,1002},1003.init = sec_alg_skcipher_init,1004.exit = sec_alg_skcipher_exit,1005.setkey = sec_alg_skcipher_setkey_aes_xts,1006.decrypt = sec_alg_skcipher_decrypt,1007.encrypt = sec_alg_skcipher_encrypt,1008.min_keysize = 2 * AES_MIN_KEY_SIZE,1009.max_keysize = 2 * AES_MAX_KEY_SIZE,1010.ivsize = AES_BLOCK_SIZE,1011}, {1012/* Unable to find any test vectors so untested */1013.base = {1014.cra_name = "ecb(des)",1015.cra_driver_name = "hisi_sec_des_ecb",1016.cra_priority = 4001,1017.cra_flags = CRYPTO_ALG_ASYNC |1018CRYPTO_ALG_ALLOCATES_MEMORY,1019.cra_blocksize = DES_BLOCK_SIZE,1020.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),1021.cra_alignmask = 0,1022.cra_module = THIS_MODULE,1023},1024.init = sec_alg_skcipher_init,1025.exit = sec_alg_skcipher_exit,1026.setkey = sec_alg_skcipher_setkey_des_ecb,1027.decrypt = sec_alg_skcipher_decrypt,1028.encrypt = sec_alg_skcipher_encrypt,1029.min_keysize = DES_KEY_SIZE,1030.max_keysize = DES_KEY_SIZE,1031.ivsize = 0,1032}, {1033.base = {1034.cra_name = "cbc(des)",1035.cra_driver_name = "hisi_sec_des_cbc",1036.cra_priority = 4001,1037.cra_flags = CRYPTO_ALG_ASYNC |1038CRYPTO_ALG_ALLOCATES_MEMORY,1039.cra_blocksize = DES_BLOCK_SIZE,1040.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),1041.cra_alignmask = 0,1042.cra_module = THIS_MODULE,1043},1044.init = sec_alg_skcipher_init_with_queue,1045.exit = sec_alg_skcipher_exit_with_queue,1046.setkey = sec_alg_skcipher_setkey_des_cbc,1047.decrypt = sec_alg_skcipher_decrypt,1048.encrypt = sec_alg_skcipher_encrypt,1049.min_keysize = DES_KEY_SIZE,1050.max_keysize = DES_KEY_SIZE,1051.ivsize = DES_BLOCK_SIZE,1052}, {1053.base = {1054.cra_name = "cbc(des3_ede)",1055.cra_driver_name = "hisi_sec_3des_cbc",1056.cra_priority = 4001,1057.cra_flags = CRYPTO_ALG_ASYNC |1058CRYPTO_ALG_ALLOCATES_MEMORY,1059.cra_blocksize = DES3_EDE_BLOCK_SIZE,1060.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),1061.cra_alignmask = 0,1062.cra_module = THIS_MODULE,1063},1064.init = sec_alg_skcipher_init_with_queue,1065.exit = sec_alg_skcipher_exit_with_queue,1066.setkey = sec_alg_skcipher_setkey_3des_cbc,1067.decrypt = sec_alg_skcipher_decrypt,1068.encrypt = sec_alg_skcipher_encrypt,1069.min_keysize = DES3_EDE_KEY_SIZE,1070.max_keysize = DES3_EDE_KEY_SIZE,1071.ivsize = DES3_EDE_BLOCK_SIZE,1072}, {1073.base = {1074.cra_name = "ecb(des3_ede)",1075.cra_driver_name = "hisi_sec_3des_ecb",1076.cra_priority = 4001,1077.cra_flags = CRYPTO_ALG_ASYNC |1078CRYPTO_ALG_ALLOCATES_MEMORY,1079.cra_blocksize = DES3_EDE_BLOCK_SIZE,1080.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),1081.cra_alignmask = 0,1082.cra_module = THIS_MODULE,1083},1084.init = sec_alg_skcipher_init,1085.exit = sec_alg_skcipher_exit,1086.setkey = sec_alg_skcipher_setkey_3des_ecb,1087.decrypt = sec_alg_skcipher_decrypt,1088.encrypt = sec_alg_skcipher_encrypt,1089.min_keysize = DES3_EDE_KEY_SIZE,1090.max_keysize = DES3_EDE_KEY_SIZE,1091.ivsize = 0,1092}1093};10941095int sec_algs_register(void)1096{1097int ret = 0;10981099mutex_lock(&algs_lock);1100if (++active_devs != 1)1101goto unlock;11021103ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));1104if (ret)1105--active_devs;1106unlock:1107mutex_unlock(&algs_lock);11081109return ret;1110}11111112void sec_algs_unregister(void)1113{1114mutex_lock(&algs_lock);1115if (--active_devs != 0)1116goto unlock;1117crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));11181119unlock:1120mutex_unlock(&algs_lock);1121}112211231124