Path: blob/master/drivers/crypto/hisilicon/sec2/sec_crypto.c
26292 views
// SPDX-License-Identifier: GPL-2.01/* Copyright (c) 2019 HiSilicon Limited. */23#include <crypto/aes.h>4#include <crypto/aead.h>5#include <crypto/algapi.h>6#include <crypto/authenc.h>7#include <crypto/des.h>8#include <crypto/hash.h>9#include <crypto/internal/aead.h>10#include <crypto/internal/des.h>11#include <crypto/sha1.h>12#include <crypto/sha2.h>13#include <crypto/skcipher.h>14#include <crypto/xts.h>15#include <linux/crypto.h>16#include <linux/dma-mapping.h>17#include <linux/idr.h>1819#include "sec.h"20#include "sec_crypto.h"2122#define SEC_PRIORITY 400123#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)24#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)25#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)26#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)27#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)2829/* SEC sqe(bd) bit operational relative MACRO */30#define SEC_DE_OFFSET 131#define SEC_CIPHER_OFFSET 432#define SEC_SCENE_OFFSET 333#define SEC_DST_SGL_OFFSET 234#define SEC_SRC_SGL_OFFSET 735#define SEC_CKEY_OFFSET 936#define SEC_CMODE_OFFSET 1237#define SEC_AKEY_OFFSET 538#define SEC_AEAD_ALG_OFFSET 1139#define SEC_AUTH_OFFSET 64041#define SEC_DE_OFFSET_V3 942#define SEC_SCENE_OFFSET_V3 543#define SEC_CKEY_OFFSET_V3 1344#define SEC_CTR_CNT_OFFSET 2545#define SEC_CTR_CNT_ROLLOVER 246#define SEC_SRC_SGL_OFFSET_V3 1147#define SEC_DST_SGL_OFFSET_V3 1448#define SEC_CALG_OFFSET_V3 449#define SEC_AKEY_OFFSET_V3 950#define SEC_MAC_OFFSET_V3 451#define SEC_AUTH_ALG_OFFSET_V3 1552#define SEC_CIPHER_AUTH_V3 0xbf53#define SEC_AUTH_CIPHER_V3 0x4054#define SEC_FLAG_OFFSET 755#define SEC_FLAG_MASK 0x078056#define SEC_TYPE_MASK 0x0F57#define SEC_DONE_MASK 0x000158#define SEC_ICV_MASK 0x000E5960#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))61#define SEC_SGL_SGE_NR 12862#define SEC_CIPHER_AUTH 0xfe63#define SEC_AUTH_CIPHER 0x164#define SEC_MAX_MAC_LEN 6465#define SEC_MAX_AAD_LEN 6553566#define SEC_MAX_CCM_AAD_LEN 6527967#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))6869#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ70#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)71#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \72SEC_MAX_MAC_LEN * 2)73#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)74#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)75#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \76SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))77#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \78SEC_PBUF_LEFT_SZ(depth))7980#define SEC_SQE_CFLAG 281#define SEC_SQE_AEAD_FLAG 382#define SEC_SQE_DONE 0x183#define SEC_ICV_ERR 0x284#define MAC_LEN_MASK 0x1U85#define MAX_INPUT_DATA_LEN 0xFFFE0086#define BITS_MASK 0xFF87#define WORD_MASK 0x388#define BYTE_BITS 0x889#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)90#define SEC_XTS_NAME_SZ 0x391#define IV_CM_CAL_NUM 292#define IV_CL_MASK 0x793#define IV_CL_MIN 294#define IV_CL_MID 495#define IV_CL_MAX 896#define IV_FLAGS_OFFSET 0x697#define IV_CM_OFFSET 0x398#define IV_LAST_BYTE1 199#define IV_LAST_BYTE2 2100#define IV_LAST_BYTE_MASK 0xFF101#define IV_CTR_INIT 0x1102#define IV_BYTE_OFFSET 0x8103#define SEC_GCM_MIN_AUTH_SZ 0x8104#define SEC_RETRY_MAX_CNT 5U105106static DEFINE_MUTEX(sec_algs_lock);107static unsigned int sec_available_devs;108109struct sec_skcipher {110u64 alg_msk;111struct skcipher_alg alg;112};113114struct sec_aead {115u64 alg_msk;116struct aead_alg alg;117};118119static int sec_aead_soft_crypto(struct sec_ctx *ctx,120struct aead_request *aead_req,121bool encrypt);122static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,123struct skcipher_request *sreq, bool encrypt);124125static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)126{127int req_id;128129spin_lock_bh(&qp_ctx->id_lock);130req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);131spin_unlock_bh(&qp_ctx->id_lock);132return req_id;133}134135static void sec_free_req_id(struct sec_req *req)136{137struct sec_qp_ctx *qp_ctx = req->qp_ctx;138int req_id = req->req_id;139140if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {141dev_err(req->ctx->dev, "free request id invalid!\n");142return;143}144145spin_lock_bh(&qp_ctx->id_lock);146idr_remove(&qp_ctx->req_idr, req_id);147spin_unlock_bh(&qp_ctx->id_lock);148}149150static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)151{152struct sec_sqe *bd = resp;153154status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;155status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;156status->flag = (le16_to_cpu(bd->type2.done_flag) &157SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;158status->tag = le16_to_cpu(bd->type2.tag);159status->err_type = bd->type2.error_type;160161return bd->type_cipher_auth & SEC_TYPE_MASK;162}163164static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)165{166struct sec_sqe3 *bd3 = resp;167168status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;169status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;170status->flag = (le16_to_cpu(bd3->done_flag) &171SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;172status->tag = le64_to_cpu(bd3->tag);173status->err_type = bd3->error_type;174175return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;176}177178static int sec_cb_status_check(struct sec_req *req,179struct bd_status *status)180{181struct sec_ctx *ctx = req->ctx;182183if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {184dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",185req->err_type, status->done);186return -EIO;187}188189if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {190if (unlikely(status->flag != SEC_SQE_CFLAG)) {191dev_err_ratelimited(ctx->dev, "flag[%u]\n",192status->flag);193return -EIO;194}195} else if (unlikely(ctx->alg_type == SEC_AEAD)) {196if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||197status->icv == SEC_ICV_ERR)) {198dev_err_ratelimited(ctx->dev,199"flag[%u], icv[%u]\n",200status->flag, status->icv);201return -EBADMSG;202}203}204205return 0;206}207208static int qp_send_message(struct sec_req *req)209{210struct sec_qp_ctx *qp_ctx = req->qp_ctx;211int ret;212213if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)214return -EBUSY;215216spin_lock_bh(&qp_ctx->req_lock);217if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {218spin_unlock_bh(&qp_ctx->req_lock);219return -EBUSY;220}221222if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {223req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);224qp_ctx->req_list[qp_ctx->send_head] = req;225}226227ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);228if (ret) {229spin_unlock_bh(&qp_ctx->req_lock);230return ret;231}232if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)233qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;234235spin_unlock_bh(&qp_ctx->req_lock);236237atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);238return -EINPROGRESS;239}240241static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)242{243struct sec_req *req, *tmp;244int ret;245246list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {247list_del(&req->list);248ctx->req_op->buf_unmap(ctx, req);249if (req->req_id >= 0)250sec_free_req_id(req);251252if (ctx->alg_type == SEC_AEAD)253ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,254req->c_req.encrypt);255else256ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,257req->c_req.encrypt);258259/* Wake up the busy thread first, then return the errno. */260crypto_request_complete(req->base, -EINPROGRESS);261crypto_request_complete(req->base, ret);262}263}264265static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)266{267struct sec_req *req, *tmp;268int ret;269270spin_lock_bh(&qp_ctx->backlog.lock);271list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {272ret = qp_send_message(req);273switch (ret) {274case -EINPROGRESS:275list_del(&req->list);276crypto_request_complete(req->base, -EINPROGRESS);277break;278case -EBUSY:279/* Device is busy and stop send any request. */280goto unlock;281default:282/* Release memory resources and send all requests through software. */283sec_alg_send_backlog_soft(ctx, qp_ctx);284goto unlock;285}286}287288unlock:289spin_unlock_bh(&qp_ctx->backlog.lock);290}291292static void sec_req_cb(struct hisi_qp *qp, void *resp)293{294struct sec_qp_ctx *qp_ctx = qp->qp_ctx;295struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;296u8 type_supported = qp_ctx->ctx->type_supported;297struct bd_status status;298struct sec_ctx *ctx;299struct sec_req *req;300int err;301u8 type;302303if (type_supported == SEC_BD_TYPE2) {304type = pre_parse_finished_bd(&status, resp);305req = qp_ctx->req_list[status.tag];306} else {307type = pre_parse_finished_bd3(&status, resp);308req = (void *)(uintptr_t)status.tag;309}310311if (unlikely(type != type_supported)) {312atomic64_inc(&dfx->err_bd_cnt);313pr_err("err bd type [%u]\n", type);314return;315}316317if (unlikely(!req)) {318atomic64_inc(&dfx->invalid_req_cnt);319atomic_inc(&qp->qp_status.used);320return;321}322323req->err_type = status.err_type;324ctx = req->ctx;325err = sec_cb_status_check(req, &status);326if (err)327atomic64_inc(&dfx->done_flag_cnt);328329atomic64_inc(&dfx->recv_cnt);330331ctx->req_op->buf_unmap(ctx, req);332333ctx->req_op->callback(ctx, req, err);334}335336static int sec_alg_send_message_retry(struct sec_req *req)337{338int ctr = 0;339int ret;340341do {342ret = qp_send_message(req);343} while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);344345return ret;346}347348static int sec_alg_try_enqueue(struct sec_req *req)349{350/* Check if any request is already backlogged */351if (!list_empty(&req->backlog->list))352return -EBUSY;353354/* Try to enqueue to HW ring */355return qp_send_message(req);356}357358359static int sec_alg_send_message_maybacklog(struct sec_req *req)360{361int ret;362363ret = sec_alg_try_enqueue(req);364if (ret != -EBUSY)365return ret;366367spin_lock_bh(&req->backlog->lock);368ret = sec_alg_try_enqueue(req);369if (ret == -EBUSY)370list_add_tail(&req->list, &req->backlog->list);371spin_unlock_bh(&req->backlog->lock);372373return ret;374}375376static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)377{378if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)379return sec_alg_send_message_maybacklog(req);380381return sec_alg_send_message_retry(req);382}383384static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)385{386u16 q_depth = res->depth;387int i;388389res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),390&res->c_ivin_dma, GFP_KERNEL);391if (!res->c_ivin)392return -ENOMEM;393394for (i = 1; i < q_depth; i++) {395res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;396res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;397}398399return 0;400}401402static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)403{404if (res->c_ivin)405dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),406res->c_ivin, res->c_ivin_dma);407}408409static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)410{411u16 q_depth = res->depth;412int i;413414res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),415&res->a_ivin_dma, GFP_KERNEL);416if (!res->a_ivin)417return -ENOMEM;418419for (i = 1; i < q_depth; i++) {420res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;421res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;422}423424return 0;425}426427static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)428{429if (res->a_ivin)430dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),431res->a_ivin, res->a_ivin_dma);432}433434static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)435{436u16 q_depth = res->depth;437int i;438439res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,440&res->out_mac_dma, GFP_KERNEL);441if (!res->out_mac)442return -ENOMEM;443444for (i = 1; i < q_depth; i++) {445res[i].out_mac_dma = res->out_mac_dma +446i * (SEC_MAX_MAC_LEN << 1);447res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);448}449450return 0;451}452453static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)454{455if (res->out_mac)456dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,457res->out_mac, res->out_mac_dma);458}459460static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)461{462if (res->pbuf)463dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),464res->pbuf, res->pbuf_dma);465}466467/*468* To improve performance, pbuffer is used for469* small packets (< 512Bytes) as IOMMU translation using.470*/471static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)472{473u16 q_depth = res->depth;474int size = SEC_PBUF_PAGE_NUM(q_depth);475int pbuf_page_offset;476int i, j, k;477478res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),479&res->pbuf_dma, GFP_KERNEL);480if (!res->pbuf)481return -ENOMEM;482483/*484* SEC_PBUF_PKG contains data pbuf, iv and485* out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>486* Every PAGE contains six SEC_PBUF_PKG487* The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG488* So we need SEC_PBUF_PAGE_NUM numbers of PAGE489* for the SEC_TOTAL_PBUF_SZ490*/491for (i = 0; i <= size; i++) {492pbuf_page_offset = PAGE_SIZE * i;493for (j = 0; j < SEC_PBUF_NUM; j++) {494k = i * SEC_PBUF_NUM + j;495if (k == q_depth)496break;497res[k].pbuf = res->pbuf +498j * SEC_PBUF_PKG + pbuf_page_offset;499res[k].pbuf_dma = res->pbuf_dma +500j * SEC_PBUF_PKG + pbuf_page_offset;501}502}503504return 0;505}506507static int sec_alg_resource_alloc(struct sec_ctx *ctx,508struct sec_qp_ctx *qp_ctx)509{510struct sec_alg_res *res = qp_ctx->res;511struct device *dev = ctx->dev;512int ret;513514ret = sec_alloc_civ_resource(dev, res);515if (ret)516return ret;517518if (ctx->alg_type == SEC_AEAD) {519ret = sec_alloc_aiv_resource(dev, res);520if (ret)521goto alloc_aiv_fail;522523ret = sec_alloc_mac_resource(dev, res);524if (ret)525goto alloc_mac_fail;526}527if (ctx->pbuf_supported) {528ret = sec_alloc_pbuf_resource(dev, res);529if (ret) {530dev_err(dev, "fail to alloc pbuf dma resource!\n");531goto alloc_pbuf_fail;532}533}534535return 0;536537alloc_pbuf_fail:538if (ctx->alg_type == SEC_AEAD)539sec_free_mac_resource(dev, qp_ctx->res);540alloc_mac_fail:541if (ctx->alg_type == SEC_AEAD)542sec_free_aiv_resource(dev, res);543alloc_aiv_fail:544sec_free_civ_resource(dev, res);545return ret;546}547548static void sec_alg_resource_free(struct sec_ctx *ctx,549struct sec_qp_ctx *qp_ctx)550{551struct device *dev = ctx->dev;552553sec_free_civ_resource(dev, qp_ctx->res);554555if (ctx->pbuf_supported)556sec_free_pbuf_resource(dev, qp_ctx->res);557if (ctx->alg_type == SEC_AEAD) {558sec_free_mac_resource(dev, qp_ctx->res);559sec_free_aiv_resource(dev, qp_ctx->res);560}561}562563static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)564{565u16 q_depth = qp_ctx->qp->sq_depth;566struct device *dev = ctx->dev;567int ret = -ENOMEM;568569qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);570if (!qp_ctx->req_list)571return ret;572573qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);574if (!qp_ctx->res)575goto err_free_req_list;576qp_ctx->res->depth = q_depth;577578qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);579if (IS_ERR(qp_ctx->c_in_pool)) {580dev_err(dev, "fail to create sgl pool for input!\n");581goto err_free_res;582}583584qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);585if (IS_ERR(qp_ctx->c_out_pool)) {586dev_err(dev, "fail to create sgl pool for output!\n");587goto err_free_c_in_pool;588}589590ret = sec_alg_resource_alloc(ctx, qp_ctx);591if (ret)592goto err_free_c_out_pool;593594return 0;595596err_free_c_out_pool:597hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);598err_free_c_in_pool:599hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);600err_free_res:601kfree(qp_ctx->res);602err_free_req_list:603kfree(qp_ctx->req_list);604return ret;605}606607static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)608{609struct device *dev = ctx->dev;610611sec_alg_resource_free(ctx, qp_ctx);612hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);613hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);614kfree(qp_ctx->res);615kfree(qp_ctx->req_list);616}617618static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)619{620struct sec_qp_ctx *qp_ctx;621struct hisi_qp *qp;622int ret;623624qp_ctx = &ctx->qp_ctx[qp_ctx_id];625qp = ctx->qps[qp_ctx_id];626qp->req_type = 0;627qp->qp_ctx = qp_ctx;628qp_ctx->qp = qp;629qp_ctx->ctx = ctx;630631qp->req_cb = sec_req_cb;632633spin_lock_init(&qp_ctx->req_lock);634idr_init(&qp_ctx->req_idr);635spin_lock_init(&qp_ctx->backlog.lock);636spin_lock_init(&qp_ctx->id_lock);637INIT_LIST_HEAD(&qp_ctx->backlog.list);638qp_ctx->send_head = 0;639640ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);641if (ret)642goto err_destroy_idr;643644ret = hisi_qm_start_qp(qp, 0);645if (ret < 0)646goto err_resource_free;647648return 0;649650err_resource_free:651sec_free_qp_ctx_resource(ctx, qp_ctx);652err_destroy_idr:653idr_destroy(&qp_ctx->req_idr);654return ret;655}656657static void sec_release_qp_ctx(struct sec_ctx *ctx,658struct sec_qp_ctx *qp_ctx)659{660hisi_qm_stop_qp(qp_ctx->qp);661sec_free_qp_ctx_resource(ctx, qp_ctx);662idr_destroy(&qp_ctx->req_idr);663}664665static int sec_ctx_base_init(struct sec_ctx *ctx)666{667struct sec_dev *sec;668int i, ret;669670ctx->qps = sec_create_qps();671if (!ctx->qps) {672pr_err("Can not create sec qps!\n");673return -ENODEV;674}675676sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);677ctx->sec = sec;678ctx->dev = &sec->qm.pdev->dev;679ctx->hlf_q_num = sec->ctx_q_num >> 1;680681ctx->pbuf_supported = ctx->sec->iommu_used;682ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),683GFP_KERNEL);684if (!ctx->qp_ctx) {685ret = -ENOMEM;686goto err_destroy_qps;687}688689for (i = 0; i < sec->ctx_q_num; i++) {690ret = sec_create_qp_ctx(ctx, i);691if (ret)692goto err_sec_release_qp_ctx;693}694695return 0;696697err_sec_release_qp_ctx:698for (i = i - 1; i >= 0; i--)699sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);700kfree(ctx->qp_ctx);701err_destroy_qps:702sec_destroy_qps(ctx->qps, sec->ctx_q_num);703return ret;704}705706static void sec_ctx_base_uninit(struct sec_ctx *ctx)707{708int i;709710for (i = 0; i < ctx->sec->ctx_q_num; i++)711sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);712713sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);714kfree(ctx->qp_ctx);715}716717static int sec_cipher_init(struct sec_ctx *ctx)718{719struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;720721c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,722&c_ctx->c_key_dma, GFP_KERNEL);723if (!c_ctx->c_key)724return -ENOMEM;725726return 0;727}728729static void sec_cipher_uninit(struct sec_ctx *ctx)730{731struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;732733memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);734dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,735c_ctx->c_key, c_ctx->c_key_dma);736}737738static int sec_auth_init(struct sec_ctx *ctx)739{740struct sec_auth_ctx *a_ctx = &ctx->a_ctx;741742a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,743&a_ctx->a_key_dma, GFP_KERNEL);744if (!a_ctx->a_key)745return -ENOMEM;746747return 0;748}749750static void sec_auth_uninit(struct sec_ctx *ctx)751{752struct sec_auth_ctx *a_ctx = &ctx->a_ctx;753754memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);755dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,756a_ctx->a_key, a_ctx->a_key_dma);757}758759static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)760{761const char *alg = crypto_tfm_alg_name(&tfm->base);762struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);763struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;764765c_ctx->fallback = false;766767c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,768CRYPTO_ALG_NEED_FALLBACK);769if (IS_ERR(c_ctx->fbtfm)) {770pr_err("failed to alloc fallback tfm for %s!\n", alg);771return PTR_ERR(c_ctx->fbtfm);772}773774return 0;775}776777static int sec_skcipher_init(struct crypto_skcipher *tfm)778{779struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);780int ret;781782ctx->alg_type = SEC_SKCIPHER;783crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));784ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);785if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {786pr_err("get error skcipher iv size!\n");787return -EINVAL;788}789790ret = sec_ctx_base_init(ctx);791if (ret)792return ret;793794ret = sec_cipher_init(ctx);795if (ret)796goto err_cipher_init;797798ret = sec_skcipher_fbtfm_init(tfm);799if (ret)800goto err_fbtfm_init;801802return 0;803804err_fbtfm_init:805sec_cipher_uninit(ctx);806err_cipher_init:807sec_ctx_base_uninit(ctx);808return ret;809}810811static void sec_skcipher_uninit(struct crypto_skcipher *tfm)812{813struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);814815if (ctx->c_ctx.fbtfm)816crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);817818sec_cipher_uninit(ctx);819sec_ctx_base_uninit(ctx);820}821822static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)823{824struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);825struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;826int ret;827828ret = verify_skcipher_des3_key(tfm, key);829if (ret)830return ret;831832switch (keylen) {833case SEC_DES3_2KEY_SIZE:834c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;835break;836case SEC_DES3_3KEY_SIZE:837c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;838break;839default:840return -EINVAL;841}842843return 0;844}845846static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,847const u32 keylen,848const enum sec_cmode c_mode)849{850if (c_mode == SEC_CMODE_XTS) {851switch (keylen) {852case SEC_XTS_MIN_KEY_SIZE:853c_ctx->c_key_len = SEC_CKEY_128BIT;854break;855case SEC_XTS_MID_KEY_SIZE:856c_ctx->fallback = true;857break;858case SEC_XTS_MAX_KEY_SIZE:859c_ctx->c_key_len = SEC_CKEY_256BIT;860break;861default:862pr_err("hisi_sec2: xts mode key error!\n");863return -EINVAL;864}865} else {866if (c_ctx->c_alg == SEC_CALG_SM4 &&867keylen != AES_KEYSIZE_128) {868pr_err("hisi_sec2: sm4 key error!\n");869return -EINVAL;870} else {871switch (keylen) {872case AES_KEYSIZE_128:873c_ctx->c_key_len = SEC_CKEY_128BIT;874break;875case AES_KEYSIZE_192:876c_ctx->c_key_len = SEC_CKEY_192BIT;877break;878case AES_KEYSIZE_256:879c_ctx->c_key_len = SEC_CKEY_256BIT;880break;881default:882pr_err("hisi_sec2: aes key error!\n");883return -EINVAL;884}885}886}887888return 0;889}890891static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,892const u32 keylen, const enum sec_calg c_alg,893const enum sec_cmode c_mode)894{895struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);896struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;897struct device *dev = ctx->dev;898int ret;899900if (c_mode == SEC_CMODE_XTS) {901ret = xts_verify_key(tfm, key, keylen);902if (ret) {903dev_err(dev, "xts mode key err!\n");904return ret;905}906}907908c_ctx->c_alg = c_alg;909c_ctx->c_mode = c_mode;910911switch (c_alg) {912case SEC_CALG_3DES:913ret = sec_skcipher_3des_setkey(tfm, key, keylen);914break;915case SEC_CALG_AES:916case SEC_CALG_SM4:917ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);918break;919default:920dev_err(dev, "sec c_alg err!\n");921return -EINVAL;922}923924if (ret) {925dev_err(dev, "set sec key err!\n");926return ret;927}928929memcpy(c_ctx->c_key, key, keylen);930if (c_ctx->fbtfm) {931ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);932if (ret) {933dev_err(dev, "failed to set fallback skcipher key!\n");934return ret;935}936}937return 0;938}939940#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \941static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\942u32 keylen) \943{ \944return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \945}946947GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)948GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)949GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)950GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)951GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)952GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)953GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)954GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)955GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)956957static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,958struct scatterlist *src)959{960struct aead_request *aead_req = req->aead_req.aead_req;961struct sec_cipher_req *c_req = &req->c_req;962struct sec_qp_ctx *qp_ctx = req->qp_ctx;963struct sec_request_buf *buf = &req->buf;964struct device *dev = ctx->dev;965int copy_size, pbuf_length;966int req_id = req->req_id;967struct crypto_aead *tfm;968u8 *mac_offset, *pbuf;969size_t authsize;970971if (ctx->alg_type == SEC_AEAD)972copy_size = aead_req->cryptlen + aead_req->assoclen;973else974copy_size = c_req->c_len;975976977pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;978pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);979if (unlikely(pbuf_length != copy_size)) {980dev_err(dev, "copy src data to pbuf error!\n");981return -EINVAL;982}983if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {984tfm = crypto_aead_reqtfm(aead_req);985authsize = crypto_aead_authsize(tfm);986mac_offset = pbuf + copy_size - authsize;987memcpy(req->aead_req.out_mac, mac_offset, authsize);988}989990if (req->req_id < 0) {991buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);992if (unlikely(dma_mapping_error(dev, buf->in_dma)))993return -ENOMEM;994995buf->out_dma = buf->in_dma;996return 0;997}998999req->in_dma = qp_ctx->res[req_id].pbuf_dma;1000c_req->c_out_dma = req->in_dma;10011002return 0;1003}10041005static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,1006struct scatterlist *dst)1007{1008struct aead_request *aead_req = req->aead_req.aead_req;1009struct sec_cipher_req *c_req = &req->c_req;1010struct sec_qp_ctx *qp_ctx = req->qp_ctx;1011struct sec_request_buf *buf = &req->buf;1012int copy_size, pbuf_length;1013int req_id = req->req_id;10141015if (ctx->alg_type == SEC_AEAD)1016copy_size = c_req->c_len + aead_req->assoclen;1017else1018copy_size = c_req->c_len;10191020if (req->req_id < 0)1021pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);1022else1023pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,1024copy_size);1025if (unlikely(pbuf_length != copy_size))1026dev_err(ctx->dev, "copy pbuf data to dst error!\n");10271028if (req->req_id < 0)1029dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);1030}10311032static int sec_aead_mac_init(struct sec_aead_req *req)1033{1034struct aead_request *aead_req = req->aead_req;1035struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);1036size_t authsize = crypto_aead_authsize(tfm);1037struct scatterlist *sgl = aead_req->src;1038u8 *mac_out = req->out_mac;1039size_t copy_size;1040off_t skip_size;10411042/* Copy input mac */1043skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;1044copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);1045if (unlikely(copy_size != authsize))1046return -EINVAL;10471048return 0;1049}10501051static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)1052{1053hw_sge->buf = sg_dma_address(sgl);1054hw_sge->len = cpu_to_le32(sg_dma_len(sgl));1055hw_sge->page_ctrl = sg_virt(sgl);1056}10571058static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,1059struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,1060int dma_dir)1061{1062struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;1063u32 i, sg_n, sg_n_mapped;1064struct scatterlist *sg;1065u32 sge_var = 0;10661067sg_n = sg_nents(src);1068sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);1069if (unlikely(!sg_n_mapped)) {1070dev_err(dev, "dma mapping for SG error!\n");1071return -EINVAL;1072} else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {1073dev_err(dev, "the number of entries in input scatterlist error!\n");1074dma_unmap_sg(dev, src, sg_n, dma_dir);1075return -EINVAL;1076}10771078for_each_sg(src, sg, sg_n_mapped, i) {1079fill_sg_to_hw_sge(sg, curr_hw_sge);1080curr_hw_sge++;1081sge_var++;1082}10831084src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);1085src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);1086src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);1087*hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);1088if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {1089dma_unmap_sg(dev, src, sg_n, dma_dir);1090return -ENOMEM;1091}10921093return 0;1094}10951096static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,1097dma_addr_t src_in, int dma_dir)1098{1099dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);1100dma_unmap_sg(dev, src, sg_nents(src), dma_dir);1101}11021103static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,1104struct scatterlist *src, struct scatterlist *dst)1105{1106struct sec_hw_sgl *src_in = &req->buf.data_buf.in;1107struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;1108int ret;11091110if (dst == src) {1111ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,1112DMA_BIDIRECTIONAL);1113req->buf.out_dma = req->buf.in_dma;1114return ret;1115}11161117ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);1118if (unlikely(ret))1119return ret;11201121ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,1122DMA_FROM_DEVICE);1123if (unlikely(ret)) {1124sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);1125return ret;1126}11271128return 0;1129}11301131static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,1132struct scatterlist *src, struct scatterlist *dst)1133{1134struct sec_cipher_req *c_req = &req->c_req;1135struct sec_aead_req *a_req = &req->aead_req;1136struct sec_qp_ctx *qp_ctx = req->qp_ctx;1137struct sec_alg_res *res = &qp_ctx->res[req->req_id];1138struct device *dev = ctx->dev;1139enum dma_data_direction src_direction;1140int ret;11411142if (req->use_pbuf) {1143c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;1144c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;1145if (ctx->alg_type == SEC_AEAD) {1146a_req->a_ivin = res->a_ivin;1147a_req->a_ivin_dma = res->a_ivin_dma;1148a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;1149a_req->out_mac_dma = res->pbuf_dma +1150SEC_PBUF_MAC_OFFSET;1151}1152return sec_cipher_pbuf_map(ctx, req, src);1153}11541155c_req->c_ivin = res->c_ivin;1156c_req->c_ivin_dma = res->c_ivin_dma;1157if (ctx->alg_type == SEC_AEAD) {1158a_req->a_ivin = res->a_ivin;1159a_req->a_ivin_dma = res->a_ivin_dma;1160a_req->out_mac = res->out_mac;1161a_req->out_mac_dma = res->out_mac_dma;1162}11631164src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;1165req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,1166qp_ctx->c_in_pool,1167req->req_id,1168&req->in_dma, src_direction);1169if (IS_ERR(req->in)) {1170dev_err(dev, "fail to dma map input sgl buffers!\n");1171return PTR_ERR(req->in);1172}11731174if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {1175ret = sec_aead_mac_init(a_req);1176if (unlikely(ret)) {1177dev_err(dev, "fail to init mac data for ICV!\n");1178hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);1179return ret;1180}1181}11821183if (dst == src) {1184c_req->c_out = req->in;1185c_req->c_out_dma = req->in_dma;1186} else {1187c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,1188qp_ctx->c_out_pool,1189req->req_id,1190&c_req->c_out_dma,1191DMA_FROM_DEVICE);11921193if (IS_ERR(c_req->c_out)) {1194dev_err(dev, "fail to dma map output sgl buffers!\n");1195hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);1196return PTR_ERR(c_req->c_out);1197}1198}11991200return 0;1201}12021203static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,1204struct scatterlist *src, struct scatterlist *dst)1205{1206struct sec_aead_req *a_req = &req->aead_req;1207struct sec_cipher_req *c_req = &req->c_req;1208bool is_aead = (ctx->alg_type == SEC_AEAD);1209struct device *dev = ctx->dev;1210int ret = -ENOMEM;12111212if (req->req_id >= 0)1213return sec_cipher_map_inner(ctx, req, src, dst);12141215c_req->c_ivin = c_req->c_ivin_buf;1216c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,1217SEC_IV_SIZE, DMA_TO_DEVICE);1218if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))1219return -ENOMEM;12201221if (is_aead) {1222a_req->a_ivin = a_req->a_ivin_buf;1223a_req->out_mac = a_req->out_mac_buf;1224a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,1225SEC_IV_SIZE, DMA_TO_DEVICE);1226if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))1227goto free_c_ivin_dma;12281229a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,1230SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);1231if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))1232goto free_a_ivin_dma;1233}1234if (req->use_pbuf) {1235ret = sec_cipher_pbuf_map(ctx, req, src);1236if (unlikely(ret))1237goto free_out_mac_dma;12381239return 0;1240}12411242if (!c_req->encrypt && is_aead) {1243ret = sec_aead_mac_init(a_req);1244if (unlikely(ret)) {1245dev_err(dev, "fail to init mac data for ICV!\n");1246goto free_out_mac_dma;1247}1248}12491250ret = sec_cipher_map_sgl(dev, req, src, dst);1251if (unlikely(ret)) {1252dev_err(dev, "fail to dma map input sgl buffers!\n");1253goto free_out_mac_dma;1254}12551256return 0;12571258free_out_mac_dma:1259if (is_aead)1260dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);1261free_a_ivin_dma:1262if (is_aead)1263dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);1264free_c_ivin_dma:1265dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);1266return ret;1267}12681269static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,1270struct scatterlist *src, struct scatterlist *dst)1271{1272struct sec_aead_req *a_req = &req->aead_req;1273struct sec_cipher_req *c_req = &req->c_req;1274struct device *dev = ctx->dev;12751276if (req->req_id >= 0) {1277if (req->use_pbuf) {1278sec_cipher_pbuf_unmap(ctx, req, dst);1279} else {1280if (dst != src) {1281hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);1282hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);1283} else {1284hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);1285}1286}1287return;1288}12891290if (req->use_pbuf) {1291sec_cipher_pbuf_unmap(ctx, req, dst);1292} else {1293if (dst != src) {1294sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);1295sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);1296} else {1297sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);1298}1299}13001301dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);1302if (ctx->alg_type == SEC_AEAD) {1303dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);1304dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);1305}1306}13071308static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)1309{1310struct skcipher_request *sq = req->c_req.sk_req;13111312return sec_cipher_map(ctx, req, sq->src, sq->dst);1313}13141315static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)1316{1317struct skcipher_request *sq = req->c_req.sk_req;13181319sec_cipher_unmap(ctx, req, sq->src, sq->dst);1320}13211322static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,1323struct crypto_authenc_keys *keys)1324{1325switch (keys->enckeylen) {1326case AES_KEYSIZE_128:1327c_ctx->c_key_len = SEC_CKEY_128BIT;1328break;1329case AES_KEYSIZE_192:1330c_ctx->c_key_len = SEC_CKEY_192BIT;1331break;1332case AES_KEYSIZE_256:1333c_ctx->c_key_len = SEC_CKEY_256BIT;1334break;1335default:1336pr_err("hisi_sec2: aead aes key error!\n");1337return -EINVAL;1338}1339memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);13401341return 0;1342}13431344static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,1345struct crypto_authenc_keys *keys)1346{1347struct crypto_shash *hash_tfm = ctx->hash_tfm;1348int blocksize, digestsize, ret;13491350blocksize = crypto_shash_blocksize(hash_tfm);1351digestsize = crypto_shash_digestsize(hash_tfm);1352if (keys->authkeylen > blocksize) {1353ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,1354keys->authkeylen, ctx->a_key);1355if (ret) {1356pr_err("hisi_sec2: aead auth digest error!\n");1357return -EINVAL;1358}1359ctx->a_key_len = digestsize;1360} else {1361if (keys->authkeylen)1362memcpy(ctx->a_key, keys->authkey, keys->authkeylen);1363ctx->a_key_len = keys->authkeylen;1364}13651366return 0;1367}13681369static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)1370{1371struct crypto_tfm *tfm = crypto_aead_tfm(aead);1372struct sec_ctx *ctx = crypto_tfm_ctx(tfm);1373struct sec_auth_ctx *a_ctx = &ctx->a_ctx;13741375return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);1376}13771378static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,1379struct crypto_aead *tfm, const u8 *key,1380unsigned int keylen)1381{1382crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);1383crypto_aead_set_flags(a_ctx->fallback_aead_tfm,1384crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);1385return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);1386}13871388static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,1389const u32 keylen, const enum sec_hash_alg a_alg,1390const enum sec_calg c_alg,1391const enum sec_cmode c_mode)1392{1393struct sec_ctx *ctx = crypto_aead_ctx(tfm);1394struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;1395struct sec_auth_ctx *a_ctx = &ctx->a_ctx;1396struct device *dev = ctx->dev;1397struct crypto_authenc_keys keys;1398int ret;13991400ctx->a_ctx.a_alg = a_alg;1401ctx->c_ctx.c_alg = c_alg;1402c_ctx->c_mode = c_mode;14031404if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {1405ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);1406if (ret) {1407dev_err(dev, "set sec aes ccm cipher key err!\n");1408return ret;1409}1410memcpy(c_ctx->c_key, key, keylen);14111412return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);1413}14141415ret = crypto_authenc_extractkeys(&keys, key, keylen);1416if (ret) {1417dev_err(dev, "sec extract aead keys err!\n");1418goto bad_key;1419}14201421ret = sec_aead_aes_set_key(c_ctx, &keys);1422if (ret) {1423dev_err(dev, "set sec cipher key err!\n");1424goto bad_key;1425}14261427ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);1428if (ret) {1429dev_err(dev, "set sec auth key err!\n");1430goto bad_key;1431}14321433ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);1434if (ret) {1435dev_err(dev, "set sec fallback key err!\n");1436goto bad_key;1437}14381439return 0;14401441bad_key:1442memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));1443return ret;1444}144514461447#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \1448static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \1449{ \1450return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \1451}14521453GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)1454GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)1455GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)1456GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)1457GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)1458GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)1459GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)14601461static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)1462{1463struct aead_request *aq = req->aead_req.aead_req;14641465return sec_cipher_map(ctx, req, aq->src, aq->dst);1466}14671468static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)1469{1470struct aead_request *aq = req->aead_req.aead_req;14711472sec_cipher_unmap(ctx, req, aq->src, aq->dst);1473}14741475static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)1476{1477int ret;14781479ret = ctx->req_op->buf_map(ctx, req);1480if (unlikely(ret))1481return ret;14821483ctx->req_op->do_transfer(ctx, req);14841485ret = ctx->req_op->bd_fill(ctx, req);1486if (unlikely(ret))1487goto unmap_req_buf;14881489return ret;14901491unmap_req_buf:1492ctx->req_op->buf_unmap(ctx, req);1493return ret;1494}14951496static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)1497{1498ctx->req_op->buf_unmap(ctx, req);1499}15001501static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)1502{1503struct skcipher_request *sk_req = req->c_req.sk_req;1504struct sec_cipher_req *c_req = &req->c_req;15051506memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);1507}15081509static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)1510{1511struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;1512struct sec_cipher_req *c_req = &req->c_req;1513struct sec_sqe *sec_sqe = &req->sec_sqe;1514u8 scene, sa_type, da_type;1515u8 bd_type, cipher;1516u8 de = 0;15171518memset(sec_sqe, 0, sizeof(struct sec_sqe));15191520sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);1521sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);1522if (req->req_id < 0) {1523sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);1524sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);1525} else {1526sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);1527sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);1528}1529if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)1530de = 0x1 << SEC_DE_OFFSET;15311532sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<1533SEC_CMODE_OFFSET);1534sec_sqe->type2.c_alg = c_ctx->c_alg;1535sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<1536SEC_CKEY_OFFSET);15371538bd_type = SEC_BD_TYPE2;1539if (c_req->encrypt)1540cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;1541else1542cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;1543sec_sqe->type_cipher_auth = bd_type | cipher;15441545/* Set destination and source address type */1546if (req->use_pbuf) {1547sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;1548da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;1549} else {1550sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;1551da_type = SEC_SGL << SEC_DST_SGL_OFFSET;1552}15531554sec_sqe->sdm_addr_type |= da_type;1555scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;15561557sec_sqe->sds_sa_type = (de | scene | sa_type);15581559sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);15601561return 0;1562}15631564static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)1565{1566struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;1567struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;1568struct sec_cipher_req *c_req = &req->c_req;1569u32 bd_param = 0;1570u16 cipher;15711572memset(sec_sqe3, 0, sizeof(struct sec_sqe3));15731574sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);1575sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);1576if (req->req_id < 0) {1577sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);1578sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);1579} else {1580sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);1581sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);1582}1583if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)1584bd_param |= 0x1 << SEC_DE_OFFSET_V3;15851586sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |1587c_ctx->c_mode;1588sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<1589SEC_CKEY_OFFSET_V3);15901591if (c_req->encrypt)1592cipher = SEC_CIPHER_ENC;1593else1594cipher = SEC_CIPHER_DEC;1595sec_sqe3->c_icv_key |= cpu_to_le16(cipher);15961597/* Set the CTR counter mode is 128bit rollover */1598sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<1599SEC_CTR_CNT_OFFSET);16001601if (req->use_pbuf) {1602bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;1603bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;1604} else {1605bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;1606bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;1607}16081609bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;16101611bd_param |= SEC_BD_TYPE3;1612sec_sqe3->bd_param = cpu_to_le32(bd_param);16131614sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);1615sec_sqe3->tag = cpu_to_le64((unsigned long)req);16161617return 0;1618}16191620/* increment counter (128-bit int) */1621static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)1622{1623do {1624--bits;1625nums += counter[bits];1626counter[bits] = nums & BITS_MASK;1627nums >>= BYTE_BITS;1628} while (bits && nums);1629}16301631static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)1632{1633struct aead_request *aead_req = req->aead_req.aead_req;1634struct skcipher_request *sk_req = req->c_req.sk_req;1635u32 iv_size = req->ctx->c_ctx.ivsize;1636struct scatterlist *sgl;1637unsigned int cryptlen;1638size_t sz;1639u8 *iv;16401641if (alg_type == SEC_SKCIPHER) {1642sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;1643iv = sk_req->iv;1644cryptlen = sk_req->cryptlen;1645} else {1646sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;1647iv = aead_req->iv;1648cryptlen = aead_req->cryptlen;1649}16501651if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {1652sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,1653cryptlen - iv_size);1654if (unlikely(sz != iv_size))1655dev_err(req->ctx->dev, "copy output iv error!\n");1656} else {1657sz = (cryptlen + iv_size - 1) / iv_size;1658ctr_iv_inc(iv, iv_size, sz);1659}1660}16611662static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,1663int err)1664{1665struct sec_qp_ctx *qp_ctx = req->qp_ctx;16661667if (req->req_id >= 0)1668sec_free_req_id(req);16691670/* IV output at encrypto of CBC/CTR mode */1671if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||1672ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)1673sec_update_iv(req, SEC_SKCIPHER);16741675crypto_request_complete(req->base, err);1676sec_alg_send_backlog(ctx, qp_ctx);1677}16781679static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)1680{1681struct aead_request *aead_req = req->aead_req.aead_req;1682struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);1683size_t authsize = crypto_aead_authsize(tfm);1684struct sec_aead_req *a_req = &req->aead_req;1685struct sec_cipher_req *c_req = &req->c_req;1686u32 data_size = aead_req->cryptlen;1687u8 flage = 0;1688u8 cm, cl;16891690/* the specification has been checked in aead_iv_demension_check() */1691cl = c_req->c_ivin[0] + 1;1692c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;1693memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);1694c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;16951696/* the last 3bit is L' */1697flage |= c_req->c_ivin[0] & IV_CL_MASK;16981699/* the M' is bit3~bit5, the Flags is bit6 */1700cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;1701flage |= cm << IV_CM_OFFSET;1702if (aead_req->assoclen)1703flage |= 0x01 << IV_FLAGS_OFFSET;17041705memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);1706a_req->a_ivin[0] = flage;17071708/*1709* the last 32bit is counter's initial number,1710* but the nonce uses the first 16bit1711* the tail 16bit fill with the cipher length1712*/1713if (!c_req->encrypt)1714data_size = aead_req->cryptlen - authsize;17151716a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =1717data_size & IV_LAST_BYTE_MASK;1718data_size >>= IV_BYTE_OFFSET;1719a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =1720data_size & IV_LAST_BYTE_MASK;1721}17221723static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)1724{1725struct aead_request *aead_req = req->aead_req.aead_req;1726struct sec_aead_req *a_req = &req->aead_req;1727struct sec_cipher_req *c_req = &req->c_req;17281729memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);17301731if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {1732/*1733* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},1734* the counter must set to 0x011735* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}1736*/1737set_aead_auth_iv(ctx, req);1738} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {1739/* GCM 12Byte Cipher_IV == Auth_IV */1740memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);1741}1742}17431744static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,1745struct sec_req *req, struct sec_sqe *sec_sqe)1746{1747struct sec_aead_req *a_req = &req->aead_req;1748struct aead_request *aq = a_req->aead_req;1749struct crypto_aead *tfm = crypto_aead_reqtfm(aq);1750size_t authsize = crypto_aead_authsize(tfm);17511752/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */1753sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);17541755/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */1756sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;1757sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);1758sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;17591760if (dir)1761sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;1762else1763sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;17641765sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);1766sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);1767sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);17681769sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);1770}17711772static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,1773struct sec_req *req, struct sec_sqe3 *sqe3)1774{1775struct sec_aead_req *a_req = &req->aead_req;1776struct aead_request *aq = a_req->aead_req;1777struct crypto_aead *tfm = crypto_aead_reqtfm(aq);1778size_t authsize = crypto_aead_authsize(tfm);17791780/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */1781sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);17821783/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */1784sqe3->a_key_addr = sqe3->c_key_addr;1785sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);1786sqe3->auth_mac_key |= SEC_NO_AUTH;17871788if (dir)1789sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;1790else1791sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;17921793sqe3->a_len_key = cpu_to_le32(aq->assoclen);1794sqe3->auth_src_offset = cpu_to_le16(0x0);1795sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);1796sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);1797}17981799static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,1800struct sec_req *req, struct sec_sqe *sec_sqe)1801{1802struct sec_aead_req *a_req = &req->aead_req;1803struct sec_cipher_req *c_req = &req->c_req;1804struct aead_request *aq = a_req->aead_req;1805struct crypto_aead *tfm = crypto_aead_reqtfm(aq);1806size_t authsize = crypto_aead_authsize(tfm);18071808sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);18091810sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));18111812sec_sqe->type2.mac_key_alg |=1813cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);18141815sec_sqe->type2.mac_key_alg |=1816cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);18171818if (dir) {1819sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;1820sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;1821} else {1822sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;1823sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;1824}1825sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);18261827sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);18281829sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);1830}18311832static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)1833{1834struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;1835struct sec_sqe *sec_sqe = &req->sec_sqe;1836int ret;18371838ret = sec_skcipher_bd_fill(ctx, req);1839if (unlikely(ret)) {1840dev_err(ctx->dev, "skcipher bd fill is error!\n");1841return ret;1842}18431844if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||1845ctx->c_ctx.c_mode == SEC_CMODE_GCM)1846sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);1847else1848sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);18491850return 0;1851}18521853static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,1854struct sec_req *req, struct sec_sqe3 *sqe3)1855{1856struct sec_aead_req *a_req = &req->aead_req;1857struct sec_cipher_req *c_req = &req->c_req;1858struct aead_request *aq = a_req->aead_req;1859struct crypto_aead *tfm = crypto_aead_reqtfm(aq);1860size_t authsize = crypto_aead_authsize(tfm);18611862sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);18631864sqe3->auth_mac_key |=1865cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);18661867sqe3->auth_mac_key |=1868cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);18691870sqe3->auth_mac_key |=1871cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);18721873if (dir) {1874sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);1875sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;1876} else {1877sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);1878sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;1879}1880sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);18811882sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);18831884sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);1885}18861887static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)1888{1889struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;1890struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;1891int ret;18921893ret = sec_skcipher_bd_fill_v3(ctx, req);1894if (unlikely(ret)) {1895dev_err(ctx->dev, "skcipher bd3 fill is error!\n");1896return ret;1897}18981899if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||1900ctx->c_ctx.c_mode == SEC_CMODE_GCM)1901sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,1902req, sec_sqe3);1903else1904sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,1905req, sec_sqe3);19061907return 0;1908}19091910static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)1911{1912struct aead_request *a_req = req->aead_req.aead_req;1913struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);1914size_t authsize = crypto_aead_authsize(tfm);1915struct sec_qp_ctx *qp_ctx = req->qp_ctx;1916size_t sz;19171918if (!err && req->c_req.encrypt) {1919if (c->c_ctx.c_mode == SEC_CMODE_CBC)1920sec_update_iv(req, SEC_AEAD);19211922sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,1923authsize, a_req->cryptlen + a_req->assoclen);1924if (unlikely(sz != authsize)) {1925dev_err(c->dev, "copy out mac err!\n");1926err = -EINVAL;1927}1928}19291930if (req->req_id >= 0)1931sec_free_req_id(req);19321933crypto_request_complete(req->base, err);1934sec_alg_send_backlog(c, qp_ctx);1935}19361937static void sec_request_uninit(struct sec_req *req)1938{1939if (req->req_id >= 0)1940sec_free_req_id(req);1941}19421943static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)1944{1945struct sec_qp_ctx *qp_ctx;1946int i;19471948for (i = 0; i < ctx->sec->ctx_q_num; i++) {1949qp_ctx = &ctx->qp_ctx[i];1950req->req_id = sec_alloc_req_id(req, qp_ctx);1951if (req->req_id >= 0)1952break;1953}19541955req->qp_ctx = qp_ctx;1956req->backlog = &qp_ctx->backlog;19571958return 0;1959}19601961static int sec_process(struct sec_ctx *ctx, struct sec_req *req)1962{1963int ret;19641965ret = sec_request_init(ctx, req);1966if (unlikely(ret))1967return ret;19681969ret = sec_request_transfer(ctx, req);1970if (unlikely(ret))1971goto err_uninit_req;19721973/* Output IV as decrypto */1974if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||1975ctx->c_ctx.c_mode == SEC_CMODE_CTR))1976sec_update_iv(req, ctx->alg_type);19771978ret = ctx->req_op->bd_send(ctx, req);1979if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {1980dev_err_ratelimited(ctx->dev, "send sec request failed!\n");1981goto err_send_req;1982}19831984return ret;19851986err_send_req:1987/* As failing, restore the IV from user */1988if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {1989if (ctx->alg_type == SEC_SKCIPHER)1990memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,1991ctx->c_ctx.ivsize);1992else1993memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,1994ctx->c_ctx.ivsize);1995}19961997sec_request_untransfer(ctx, req);19981999err_uninit_req:2000sec_request_uninit(req);2001if (ctx->alg_type == SEC_AEAD)2002ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,2003req->c_req.encrypt);2004else2005ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,2006req->c_req.encrypt);2007return ret;2008}20092010static const struct sec_req_op sec_skcipher_req_ops = {2011.buf_map = sec_skcipher_sgl_map,2012.buf_unmap = sec_skcipher_sgl_unmap,2013.do_transfer = sec_skcipher_copy_iv,2014.bd_fill = sec_skcipher_bd_fill,2015.bd_send = sec_bd_send,2016.callback = sec_skcipher_callback,2017.process = sec_process,2018};20192020static const struct sec_req_op sec_aead_req_ops = {2021.buf_map = sec_aead_sgl_map,2022.buf_unmap = sec_aead_sgl_unmap,2023.do_transfer = sec_aead_set_iv,2024.bd_fill = sec_aead_bd_fill,2025.bd_send = sec_bd_send,2026.callback = sec_aead_callback,2027.process = sec_process,2028};20292030static const struct sec_req_op sec_skcipher_req_ops_v3 = {2031.buf_map = sec_skcipher_sgl_map,2032.buf_unmap = sec_skcipher_sgl_unmap,2033.do_transfer = sec_skcipher_copy_iv,2034.bd_fill = sec_skcipher_bd_fill_v3,2035.bd_send = sec_bd_send,2036.callback = sec_skcipher_callback,2037.process = sec_process,2038};20392040static const struct sec_req_op sec_aead_req_ops_v3 = {2041.buf_map = sec_aead_sgl_map,2042.buf_unmap = sec_aead_sgl_unmap,2043.do_transfer = sec_aead_set_iv,2044.bd_fill = sec_aead_bd_fill_v3,2045.bd_send = sec_bd_send,2046.callback = sec_aead_callback,2047.process = sec_process,2048};20492050static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)2051{2052struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);2053int ret;20542055ret = sec_skcipher_init(tfm);2056if (ret)2057return ret;20582059if (ctx->sec->qm.ver < QM_HW_V3) {2060ctx->type_supported = SEC_BD_TYPE2;2061ctx->req_op = &sec_skcipher_req_ops;2062} else {2063ctx->type_supported = SEC_BD_TYPE3;2064ctx->req_op = &sec_skcipher_req_ops_v3;2065}20662067return ret;2068}20692070static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)2071{2072sec_skcipher_uninit(tfm);2073}20742075static int sec_aead_init(struct crypto_aead *tfm)2076{2077struct sec_ctx *ctx = crypto_aead_ctx(tfm);2078int ret;20792080crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));2081ctx->alg_type = SEC_AEAD;2082ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);2083if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||2084ctx->c_ctx.ivsize > SEC_IV_SIZE) {2085pr_err("get error aead iv size!\n");2086return -EINVAL;2087}20882089ret = sec_ctx_base_init(ctx);2090if (ret)2091return ret;2092if (ctx->sec->qm.ver < QM_HW_V3) {2093ctx->type_supported = SEC_BD_TYPE2;2094ctx->req_op = &sec_aead_req_ops;2095} else {2096ctx->type_supported = SEC_BD_TYPE3;2097ctx->req_op = &sec_aead_req_ops_v3;2098}20992100ret = sec_auth_init(ctx);2101if (ret)2102goto err_auth_init;21032104ret = sec_cipher_init(ctx);2105if (ret)2106goto err_cipher_init;21072108return ret;21092110err_cipher_init:2111sec_auth_uninit(ctx);2112err_auth_init:2113sec_ctx_base_uninit(ctx);2114return ret;2115}21162117static void sec_aead_exit(struct crypto_aead *tfm)2118{2119struct sec_ctx *ctx = crypto_aead_ctx(tfm);21202121sec_cipher_uninit(ctx);2122sec_auth_uninit(ctx);2123sec_ctx_base_uninit(ctx);2124}21252126static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)2127{2128struct aead_alg *alg = crypto_aead_alg(tfm);2129struct sec_ctx *ctx = crypto_aead_ctx(tfm);2130struct sec_auth_ctx *a_ctx = &ctx->a_ctx;2131const char *aead_name = alg->base.cra_name;2132int ret;21332134ret = sec_aead_init(tfm);2135if (ret) {2136pr_err("hisi_sec2: aead init error!\n");2137return ret;2138}21392140a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);2141if (IS_ERR(a_ctx->hash_tfm)) {2142dev_err(ctx->dev, "aead alloc shash error!\n");2143sec_aead_exit(tfm);2144return PTR_ERR(a_ctx->hash_tfm);2145}21462147a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,2148CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);2149if (IS_ERR(a_ctx->fallback_aead_tfm)) {2150dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");2151crypto_free_shash(ctx->a_ctx.hash_tfm);2152sec_aead_exit(tfm);2153return PTR_ERR(a_ctx->fallback_aead_tfm);2154}21552156return 0;2157}21582159static void sec_aead_ctx_exit(struct crypto_aead *tfm)2160{2161struct sec_ctx *ctx = crypto_aead_ctx(tfm);21622163crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);2164crypto_free_shash(ctx->a_ctx.hash_tfm);2165sec_aead_exit(tfm);2166}21672168static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)2169{2170struct aead_alg *alg = crypto_aead_alg(tfm);2171struct sec_ctx *ctx = crypto_aead_ctx(tfm);2172struct sec_auth_ctx *a_ctx = &ctx->a_ctx;2173const char *aead_name = alg->base.cra_name;2174int ret;21752176ret = sec_aead_init(tfm);2177if (ret) {2178dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");2179return ret;2180}21812182a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,2183CRYPTO_ALG_NEED_FALLBACK |2184CRYPTO_ALG_ASYNC);2185if (IS_ERR(a_ctx->fallback_aead_tfm)) {2186dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");2187sec_aead_exit(tfm);2188return PTR_ERR(a_ctx->fallback_aead_tfm);2189}21902191return 0;2192}21932194static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)2195{2196struct sec_ctx *ctx = crypto_aead_ctx(tfm);21972198crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);2199sec_aead_exit(tfm);2200}22012202static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)2203{2204return sec_aead_ctx_init(tfm, "sha1");2205}22062207static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)2208{2209return sec_aead_ctx_init(tfm, "sha256");2210}22112212static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)2213{2214return sec_aead_ctx_init(tfm, "sha512");2215}22162217static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)2218{2219u32 cryptlen = sreq->c_req.sk_req->cryptlen;2220struct device *dev = ctx->dev;2221u8 c_mode = ctx->c_ctx.c_mode;2222int ret = 0;22232224switch (c_mode) {2225case SEC_CMODE_XTS:2226if (unlikely(cryptlen < AES_BLOCK_SIZE)) {2227dev_err(dev, "skcipher XTS mode input length error!\n");2228ret = -EINVAL;2229}2230break;2231case SEC_CMODE_ECB:2232case SEC_CMODE_CBC:2233if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {2234dev_err(dev, "skcipher AES input length error!\n");2235ret = -EINVAL;2236}2237break;2238case SEC_CMODE_CTR:2239break;2240default:2241ret = -EINVAL;2242}22432244return ret;2245}22462247static int sec_skcipher_param_check(struct sec_ctx *ctx,2248struct sec_req *sreq, bool *need_fallback)2249{2250struct skcipher_request *sk_req = sreq->c_req.sk_req;2251struct device *dev = ctx->dev;2252u8 c_alg = ctx->c_ctx.c_alg;22532254if (unlikely(!sk_req->src || !sk_req->dst)) {2255dev_err(dev, "skcipher input param error!\n");2256return -EINVAL;2257}22582259if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)2260*need_fallback = true;22612262sreq->c_req.c_len = sk_req->cryptlen;22632264if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)2265sreq->use_pbuf = true;2266else2267sreq->use_pbuf = false;22682269if (c_alg == SEC_CALG_3DES) {2270if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {2271dev_err(dev, "skcipher 3des input length error!\n");2272return -EINVAL;2273}2274return 0;2275} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {2276return sec_skcipher_cryptlen_check(ctx, sreq);2277}22782279dev_err(dev, "skcipher algorithm error!\n");22802281return -EINVAL;2282}22832284static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,2285struct skcipher_request *sreq, bool encrypt)2286{2287struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;2288SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);2289struct device *dev = ctx->dev;2290int ret;22912292if (!c_ctx->fbtfm) {2293dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");2294return -EINVAL;2295}22962297skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);22982299/* software need sync mode to do crypto */2300skcipher_request_set_callback(subreq, sreq->base.flags,2301NULL, NULL);2302skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,2303sreq->cryptlen, sreq->iv);2304if (encrypt)2305ret = crypto_skcipher_encrypt(subreq);2306else2307ret = crypto_skcipher_decrypt(subreq);23082309skcipher_request_zero(subreq);23102311return ret;2312}23132314static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)2315{2316struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);2317struct sec_req *req = skcipher_request_ctx_dma(sk_req);2318struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);2319bool need_fallback = false;2320int ret;23212322if (!sk_req->cryptlen) {2323if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)2324return -EINVAL;2325return 0;2326}23272328req->flag = sk_req->base.flags;2329req->c_req.sk_req = sk_req;2330req->c_req.encrypt = encrypt;2331req->ctx = ctx;2332req->base = &sk_req->base;23332334ret = sec_skcipher_param_check(ctx, req, &need_fallback);2335if (unlikely(ret))2336return -EINVAL;23372338if (unlikely(ctx->c_ctx.fallback || need_fallback))2339return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);23402341return ctx->req_op->process(ctx, req);2342}23432344static int sec_skcipher_encrypt(struct skcipher_request *sk_req)2345{2346return sec_skcipher_crypto(sk_req, true);2347}23482349static int sec_skcipher_decrypt(struct skcipher_request *sk_req)2350{2351return sec_skcipher_crypto(sk_req, false);2352}23532354#define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \2355sec_min_key_size, sec_max_key_size, blk_size, iv_size)\2356{\2357.base = {\2358.cra_name = sec_cra_name,\2359.cra_driver_name = "hisi_sec_"sec_cra_name,\2360.cra_priority = SEC_PRIORITY,\2361.cra_flags = CRYPTO_ALG_ASYNC |\2362CRYPTO_ALG_NEED_FALLBACK,\2363.cra_blocksize = blk_size,\2364.cra_ctxsize = sizeof(struct sec_ctx),\2365.cra_module = THIS_MODULE,\2366},\2367.init = sec_skcipher_ctx_init,\2368.exit = sec_skcipher_ctx_exit,\2369.setkey = sec_set_key,\2370.decrypt = sec_skcipher_decrypt,\2371.encrypt = sec_skcipher_encrypt,\2372.min_keysize = sec_min_key_size,\2373.max_keysize = sec_max_key_size,\2374.ivsize = iv_size,\2375}23762377static struct sec_skcipher sec_skciphers[] = {2378{2379.alg_msk = BIT(0),2380.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,2381AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),2382},2383{2384.alg_msk = BIT(1),2385.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,2386AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),2387},2388{2389.alg_msk = BIT(2),2390.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,2391AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),2392},2393{2394.alg_msk = BIT(3),2395.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,2396SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),2397},2398{2399.alg_msk = BIT(12),2400.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,2401AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),2402},2403{2404.alg_msk = BIT(13),2405.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,2406AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),2407},2408{2409.alg_msk = BIT(14),2410.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,2411SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),2412},2413{2414.alg_msk = BIT(23),2415.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,2416SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),2417},2418{2419.alg_msk = BIT(24),2420.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,2421SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,2422DES3_EDE_BLOCK_SIZE),2423},2424};24252426static int aead_iv_demension_check(struct aead_request *aead_req)2427{2428u8 cl;24292430cl = aead_req->iv[0] + 1;2431if (cl < IV_CL_MIN || cl > IV_CL_MAX)2432return -EINVAL;24332434if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))2435return -EOVERFLOW;24362437return 0;2438}24392440static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)2441{2442struct aead_request *req = sreq->aead_req.aead_req;2443struct crypto_aead *tfm = crypto_aead_reqtfm(req);2444size_t sz = crypto_aead_authsize(tfm);2445u8 c_mode = ctx->c_ctx.c_mode;2446int ret;24472448if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))2449return -EINVAL;24502451if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||2452req->assoclen > SEC_MAX_AAD_LEN))2453return -EINVAL;24542455if (c_mode == SEC_CMODE_CCM) {2456if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))2457return -EINVAL;24582459ret = aead_iv_demension_check(req);2460if (unlikely(ret))2461return -EINVAL;2462} else if (c_mode == SEC_CMODE_CBC) {2463if (unlikely(sz & WORD_MASK))2464return -EINVAL;2465if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))2466return -EINVAL;2467} else if (c_mode == SEC_CMODE_GCM) {2468if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))2469return -EINVAL;2470}24712472return 0;2473}24742475static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)2476{2477struct aead_request *req = sreq->aead_req.aead_req;2478struct device *dev = ctx->dev;2479u8 c_alg = ctx->c_ctx.c_alg;24802481if (unlikely(!req->src || !req->dst)) {2482dev_err(dev, "aead input param error!\n");2483return -EINVAL;2484}24852486if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&2487sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {2488dev_err(dev, "aead cbc mode input data length error!\n");2489return -EINVAL;2490}24912492/* Support AES or SM4 */2493if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {2494dev_err(dev, "aead crypto alg error!\n");2495return -EINVAL;2496}24972498if (unlikely(sec_aead_spec_check(ctx, sreq))) {2499*need_fallback = true;2500return -EINVAL;2501}25022503if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=2504SEC_PBUF_SZ)2505sreq->use_pbuf = true;2506else2507sreq->use_pbuf = false;25082509return 0;2510}25112512static int sec_aead_soft_crypto(struct sec_ctx *ctx,2513struct aead_request *aead_req,2514bool encrypt)2515{2516struct sec_auth_ctx *a_ctx = &ctx->a_ctx;2517struct aead_request *subreq;2518int ret;25192520subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);2521if (!subreq)2522return -ENOMEM;25232524aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);2525aead_request_set_callback(subreq, aead_req->base.flags,2526aead_req->base.complete, aead_req->base.data);2527aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,2528aead_req->cryptlen, aead_req->iv);2529aead_request_set_ad(subreq, aead_req->assoclen);25302531if (encrypt)2532ret = crypto_aead_encrypt(subreq);2533else2534ret = crypto_aead_decrypt(subreq);2535aead_request_free(subreq);25362537return ret;2538}25392540static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)2541{2542struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);2543struct sec_req *req = aead_request_ctx_dma(a_req);2544struct sec_ctx *ctx = crypto_aead_ctx(tfm);2545size_t sz = crypto_aead_authsize(tfm);2546bool need_fallback = false;2547int ret;25482549req->flag = a_req->base.flags;2550req->aead_req.aead_req = a_req;2551req->c_req.encrypt = encrypt;2552req->ctx = ctx;2553req->base = &a_req->base;2554req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);25552556ret = sec_aead_param_check(ctx, req, &need_fallback);2557if (unlikely(ret)) {2558if (need_fallback)2559return sec_aead_soft_crypto(ctx, a_req, encrypt);2560return -EINVAL;2561}25622563return ctx->req_op->process(ctx, req);2564}25652566static int sec_aead_encrypt(struct aead_request *a_req)2567{2568return sec_aead_crypto(a_req, true);2569}25702571static int sec_aead_decrypt(struct aead_request *a_req)2572{2573return sec_aead_crypto(a_req, false);2574}25752576#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\2577ctx_exit, blk_size, iv_size, max_authsize)\2578{\2579.base = {\2580.cra_name = sec_cra_name,\2581.cra_driver_name = "hisi_sec_"sec_cra_name,\2582.cra_priority = SEC_PRIORITY,\2583.cra_flags = CRYPTO_ALG_ASYNC |\2584CRYPTO_ALG_NEED_FALLBACK,\2585.cra_blocksize = blk_size,\2586.cra_ctxsize = sizeof(struct sec_ctx),\2587.cra_module = THIS_MODULE,\2588},\2589.init = ctx_init,\2590.exit = ctx_exit,\2591.setkey = sec_set_key,\2592.setauthsize = sec_aead_setauthsize,\2593.decrypt = sec_aead_decrypt,\2594.encrypt = sec_aead_encrypt,\2595.ivsize = iv_size,\2596.maxauthsize = max_authsize,\2597}25982599static struct sec_aead sec_aeads[] = {2600{2601.alg_msk = BIT(6),2602.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,2603sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,2604AES_BLOCK_SIZE),2605},2606{2607.alg_msk = BIT(7),2608.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,2609sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,2610AES_BLOCK_SIZE),2611},2612{2613.alg_msk = BIT(17),2614.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,2615sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,2616AES_BLOCK_SIZE),2617},2618{2619.alg_msk = BIT(18),2620.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,2621sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,2622AES_BLOCK_SIZE),2623},2624{2625.alg_msk = BIT(43),2626.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,2627sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,2628AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),2629},2630{2631.alg_msk = BIT(44),2632.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,2633sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,2634AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),2635},2636{2637.alg_msk = BIT(45),2638.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,2639sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,2640AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),2641},2642};26432644static void sec_unregister_skcipher(u64 alg_mask, int end)2645{2646int i;26472648for (i = 0; i < end; i++)2649if (sec_skciphers[i].alg_msk & alg_mask)2650crypto_unregister_skcipher(&sec_skciphers[i].alg);2651}26522653static int sec_register_skcipher(u64 alg_mask)2654{2655int i, ret, count;26562657count = ARRAY_SIZE(sec_skciphers);26582659for (i = 0; i < count; i++) {2660if (!(sec_skciphers[i].alg_msk & alg_mask))2661continue;26622663ret = crypto_register_skcipher(&sec_skciphers[i].alg);2664if (ret)2665goto err;2666}26672668return 0;26692670err:2671sec_unregister_skcipher(alg_mask, i);26722673return ret;2674}26752676static void sec_unregister_aead(u64 alg_mask, int end)2677{2678int i;26792680for (i = 0; i < end; i++)2681if (sec_aeads[i].alg_msk & alg_mask)2682crypto_unregister_aead(&sec_aeads[i].alg);2683}26842685static int sec_register_aead(u64 alg_mask)2686{2687int i, ret, count;26882689count = ARRAY_SIZE(sec_aeads);26902691for (i = 0; i < count; i++) {2692if (!(sec_aeads[i].alg_msk & alg_mask))2693continue;26942695ret = crypto_register_aead(&sec_aeads[i].alg);2696if (ret)2697goto err;2698}26992700return 0;27012702err:2703sec_unregister_aead(alg_mask, i);27042705return ret;2706}27072708int sec_register_to_crypto(struct hisi_qm *qm)2709{2710u64 alg_mask;2711int ret = 0;27122713alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,2714SEC_DRV_ALG_BITMAP_LOW_TB);27152716mutex_lock(&sec_algs_lock);2717if (sec_available_devs) {2718sec_available_devs++;2719goto unlock;2720}27212722ret = sec_register_skcipher(alg_mask);2723if (ret)2724goto unlock;27252726ret = sec_register_aead(alg_mask);2727if (ret)2728goto unreg_skcipher;27292730sec_available_devs++;2731mutex_unlock(&sec_algs_lock);27322733return 0;27342735unreg_skcipher:2736sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));2737unlock:2738mutex_unlock(&sec_algs_lock);2739return ret;2740}27412742void sec_unregister_from_crypto(struct hisi_qm *qm)2743{2744u64 alg_mask;27452746alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,2747SEC_DRV_ALG_BITMAP_LOW_TB);27482749mutex_lock(&sec_algs_lock);2750if (--sec_available_devs)2751goto unlock;27522753sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));2754sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));27552756unlock:2757mutex_unlock(&sec_algs_lock);2758}275927602761