Path: blob/master/drivers/crypto/picoxcell_crypto.c
15109 views
/*1* Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles2*3* This program is free software; you can redistribute it and/or modify4* it under the terms of the GNU General Public License as published by5* the Free Software Foundation; either version 2 of the License, or6* (at your option) any later version.7*8* This program is distributed in the hope that it will be useful,9* but WITHOUT ANY WARRANTY; without even the implied warranty of10* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the11* GNU General Public License for more details.12*13* You should have received a copy of the GNU General Public License14* along with this program; if not, write to the Free Software15* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA16*/17#include <crypto/aead.h>18#include <crypto/aes.h>19#include <crypto/algapi.h>20#include <crypto/authenc.h>21#include <crypto/des.h>22#include <crypto/md5.h>23#include <crypto/sha.h>24#include <crypto/internal/skcipher.h>25#include <linux/clk.h>26#include <linux/crypto.h>27#include <linux/delay.h>28#include <linux/dma-mapping.h>29#include <linux/dmapool.h>30#include <linux/err.h>31#include <linux/init.h>32#include <linux/interrupt.h>33#include <linux/io.h>34#include <linux/list.h>35#include <linux/module.h>36#include <linux/platform_device.h>37#include <linux/pm.h>38#include <linux/rtnetlink.h>39#include <linux/scatterlist.h>40#include <linux/sched.h>41#include <linux/slab.h>42#include <linux/timer.h>4344#include "picoxcell_crypto_regs.h"4546/*47* The threshold for the number of entries in the CMD FIFO available before48* the CMD0_CNT interrupt is raised. Increasing this value will reduce the49* number of interrupts raised to the CPU.50*/51#define CMD0_IRQ_THRESHOLD 15253/*54* The timeout period (in jiffies) for a PDU. When the the number of PDUs in55* flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.56* When there are packets in flight but lower than the threshold, we enable57* the timer and at expiry, attempt to remove any processed packets from the58* queue and if there are still packets left, schedule the timer again.59*/60#define PACKET_TIMEOUT 16162/* The priority to register each algorithm with. */63#define SPACC_CRYPTO_ALG_PRIORITY 100006465#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 1666#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 6467#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 6468#define SPACC_CRYPTO_IPSEC_MAX_CTXS 3269#define SPACC_CRYPTO_IPSEC_FIFO_SZ 3270#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 6471#define SPACC_CRYPTO_L2_HASH_PG_SZ 6472#define SPACC_CRYPTO_L2_MAX_CTXS 12873#define SPACC_CRYPTO_L2_FIFO_SZ 1287475#define MAX_DDT_LEN 167677/* DDT format. This must match the hardware DDT format exactly. */78struct spacc_ddt {79dma_addr_t p;80u32 len;81};8283/*84* Asynchronous crypto request structure.85*86* This structure defines a request that is either queued for processing or87* being processed.88*/89struct spacc_req {90struct list_head list;91struct spacc_engine *engine;92struct crypto_async_request *req;93int result;94bool is_encrypt;95unsigned ctx_id;96dma_addr_t src_addr, dst_addr;97struct spacc_ddt *src_ddt, *dst_ddt;98void (*complete)(struct spacc_req *req);99100/* AEAD specific bits. */101u8 *giv;102size_t giv_len;103dma_addr_t giv_pa;104};105106struct spacc_engine {107void __iomem *regs;108struct list_head pending;109int next_ctx;110spinlock_t hw_lock;111int in_flight;112struct list_head completed;113struct list_head in_progress;114struct tasklet_struct complete;115unsigned long fifo_sz;116void __iomem *cipher_ctx_base;117void __iomem *hash_key_base;118struct spacc_alg *algs;119unsigned num_algs;120struct list_head registered_algs;121size_t cipher_pg_sz;122size_t hash_pg_sz;123const char *name;124struct clk *clk;125struct device *dev;126unsigned max_ctxs;127struct timer_list packet_timeout;128unsigned stat_irq_thresh;129struct dma_pool *req_pool;130};131132/* Algorithm type mask. */133#define SPACC_CRYPTO_ALG_MASK 0x7134135/* SPACC definition of a crypto algorithm. */136struct spacc_alg {137unsigned long ctrl_default;138unsigned long type;139struct crypto_alg alg;140struct spacc_engine *engine;141struct list_head entry;142int key_offs;143int iv_offs;144};145146/* Generic context structure for any algorithm type. */147struct spacc_generic_ctx {148struct spacc_engine *engine;149int flags;150int key_offs;151int iv_offs;152};153154/* Block cipher context. */155struct spacc_ablk_ctx {156struct spacc_generic_ctx generic;157u8 key[AES_MAX_KEY_SIZE];158u8 key_len;159/*160* The fallback cipher. If the operation can't be done in hardware,161* fallback to a software version.162*/163struct crypto_ablkcipher *sw_cipher;164};165166/* AEAD cipher context. */167struct spacc_aead_ctx {168struct spacc_generic_ctx generic;169u8 cipher_key[AES_MAX_KEY_SIZE];170u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];171u8 cipher_key_len;172u8 hash_key_len;173struct crypto_aead *sw_cipher;174size_t auth_size;175u8 salt[AES_BLOCK_SIZE];176};177178static int spacc_ablk_submit(struct spacc_req *req);179180static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)181{182return alg ? container_of(alg, struct spacc_alg, alg) : NULL;183}184185static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)186{187u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);188189return fifo_stat & SPA_FIFO_CMD_FULL;190}191192/*193* Given a cipher context, and a context number, get the base address of the194* context page.195*196* Returns the address of the context page where the key/context may197* be written.198*/199static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,200unsigned indx,201bool is_cipher_ctx)202{203return is_cipher_ctx ? ctx->engine->cipher_ctx_base +204(indx * ctx->engine->cipher_pg_sz) :205ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);206}207208/* The context pages can only be written with 32-bit accesses. */209static inline void memcpy_toio32(u32 __iomem *dst, const void *src,210unsigned count)211{212const u32 *src32 = (const u32 *) src;213214while (count--)215writel(*src32++, dst++);216}217218static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,219void __iomem *page_addr, const u8 *key,220size_t key_len, const u8 *iv, size_t iv_len)221{222void __iomem *key_ptr = page_addr + ctx->key_offs;223void __iomem *iv_ptr = page_addr + ctx->iv_offs;224225memcpy_toio32(key_ptr, key, key_len / 4);226memcpy_toio32(iv_ptr, iv, iv_len / 4);227}228229/*230* Load a context into the engines context memory.231*232* Returns the index of the context page where the context was loaded.233*/234static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,235const u8 *ciph_key, size_t ciph_len,236const u8 *iv, size_t ivlen, const u8 *hash_key,237size_t hash_len)238{239unsigned indx = ctx->engine->next_ctx++;240void __iomem *ciph_page_addr, *hash_page_addr;241242ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);243hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);244245ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;246spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,247ivlen);248writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |249(1 << SPA_KEY_SZ_CIPHER_OFFSET),250ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);251252if (hash_key) {253memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);254writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),255ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);256}257258return indx;259}260261/* Count the number of scatterlist entries in a scatterlist. */262static int sg_count(struct scatterlist *sg_list, int nbytes)263{264struct scatterlist *sg = sg_list;265int sg_nents = 0;266267while (nbytes > 0) {268++sg_nents;269nbytes -= sg->length;270sg = sg_next(sg);271}272273return sg_nents;274}275276static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)277{278ddt->p = phys;279ddt->len = len;280}281282/*283* Take a crypto request and scatterlists for the data and turn them into DDTs284* for passing to the crypto engines. This also DMA maps the data so that the285* crypto engines can DMA to/from them.286*/287static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,288struct scatterlist *payload,289unsigned nbytes,290enum dma_data_direction dir,291dma_addr_t *ddt_phys)292{293unsigned nents, mapped_ents;294struct scatterlist *cur;295struct spacc_ddt *ddt;296int i;297298nents = sg_count(payload, nbytes);299mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);300301if (mapped_ents + 1 > MAX_DDT_LEN)302goto out;303304ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);305if (!ddt)306goto out;307308for_each_sg(payload, cur, mapped_ents, i)309ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));310ddt_set(&ddt[mapped_ents], 0, 0);311312return ddt;313314out:315dma_unmap_sg(engine->dev, payload, nents, dir);316return NULL;317}318319static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)320{321struct aead_request *areq = container_of(req->req, struct aead_request,322base);323struct spacc_engine *engine = req->engine;324struct spacc_ddt *src_ddt, *dst_ddt;325unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));326unsigned nents = sg_count(areq->src, areq->cryptlen);327dma_addr_t iv_addr;328struct scatterlist *cur;329int i, dst_ents, src_ents, assoc_ents;330u8 *iv = giv ? giv : areq->iv;331332src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);333if (!src_ddt)334return -ENOMEM;335336dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);337if (!dst_ddt) {338dma_pool_free(engine->req_pool, src_ddt, req->src_addr);339return -ENOMEM;340}341342req->src_ddt = src_ddt;343req->dst_ddt = dst_ddt;344345assoc_ents = dma_map_sg(engine->dev, areq->assoc,346sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);347if (areq->src != areq->dst) {348src_ents = dma_map_sg(engine->dev, areq->src, nents,349DMA_TO_DEVICE);350dst_ents = dma_map_sg(engine->dev, areq->dst, nents,351DMA_FROM_DEVICE);352} else {353src_ents = dma_map_sg(engine->dev, areq->src, nents,354DMA_BIDIRECTIONAL);355dst_ents = 0;356}357358/*359* Map the IV/GIV. For the GIV it needs to be bidirectional as it is360* formed by the crypto block and sent as the ESP IV for IPSEC.361*/362iv_addr = dma_map_single(engine->dev, iv, ivsize,363giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);364req->giv_pa = iv_addr;365366/*367* Map the associated data. For decryption we don't copy the368* associated data.369*/370for_each_sg(areq->assoc, cur, assoc_ents, i) {371ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));372if (req->is_encrypt)373ddt_set(dst_ddt++, sg_dma_address(cur),374sg_dma_len(cur));375}376ddt_set(src_ddt++, iv_addr, ivsize);377378if (giv || req->is_encrypt)379ddt_set(dst_ddt++, iv_addr, ivsize);380381/*382* Now map in the payload for the source and destination and terminate383* with the NULL pointers.384*/385for_each_sg(areq->src, cur, src_ents, i) {386ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));387if (areq->src == areq->dst)388ddt_set(dst_ddt++, sg_dma_address(cur),389sg_dma_len(cur));390}391392for_each_sg(areq->dst, cur, dst_ents, i)393ddt_set(dst_ddt++, sg_dma_address(cur),394sg_dma_len(cur));395396ddt_set(src_ddt, 0, 0);397ddt_set(dst_ddt, 0, 0);398399return 0;400}401402static void spacc_aead_free_ddts(struct spacc_req *req)403{404struct aead_request *areq = container_of(req->req, struct aead_request,405base);406struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);407struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);408struct spacc_engine *engine = aead_ctx->generic.engine;409unsigned ivsize = alg->alg.cra_aead.ivsize;410unsigned nents = sg_count(areq->src, areq->cryptlen);411412if (areq->src != areq->dst) {413dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);414dma_unmap_sg(engine->dev, areq->dst,415sg_count(areq->dst, areq->cryptlen),416DMA_FROM_DEVICE);417} else418dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);419420dma_unmap_sg(engine->dev, areq->assoc,421sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);422423dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);424425dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);426dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);427}428429static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,430dma_addr_t ddt_addr, struct scatterlist *payload,431unsigned nbytes, enum dma_data_direction dir)432{433unsigned nents = sg_count(payload, nbytes);434435dma_unmap_sg(req->engine->dev, payload, nents, dir);436dma_pool_free(req->engine->req_pool, ddt, ddt_addr);437}438439/*440* Set key for a DES operation in an AEAD cipher. This also performs weak key441* checking if required.442*/443static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,444unsigned int len)445{446struct crypto_tfm *tfm = crypto_aead_tfm(aead);447struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);448u32 tmp[DES_EXPKEY_WORDS];449450if (unlikely(!des_ekey(tmp, key)) &&451(crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {452tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;453return -EINVAL;454}455456memcpy(ctx->cipher_key, key, len);457ctx->cipher_key_len = len;458459return 0;460}461462/* Set the key for the AES block cipher component of the AEAD transform. */463static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,464unsigned int len)465{466struct crypto_tfm *tfm = crypto_aead_tfm(aead);467struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);468469/*470* IPSec engine only supports 128 and 256 bit AES keys. If we get a471* request for any other size (192 bits) then we need to do a software472* fallback.473*/474if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {475/*476* Set the fallback transform to use the same request flags as477* the hardware transform.478*/479ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;480ctx->sw_cipher->base.crt_flags |=481tfm->crt_flags & CRYPTO_TFM_REQ_MASK;482return crypto_aead_setkey(ctx->sw_cipher, key, len);483}484485memcpy(ctx->cipher_key, key, len);486ctx->cipher_key_len = len;487488return 0;489}490491static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,492unsigned int keylen)493{494struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);495struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);496struct rtattr *rta = (void *)key;497struct crypto_authenc_key_param *param;498unsigned int authkeylen, enckeylen;499int err = -EINVAL;500501if (!RTA_OK(rta, keylen))502goto badkey;503504if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)505goto badkey;506507if (RTA_PAYLOAD(rta) < sizeof(*param))508goto badkey;509510param = RTA_DATA(rta);511enckeylen = be32_to_cpu(param->enckeylen);512513key += RTA_ALIGN(rta->rta_len);514keylen -= RTA_ALIGN(rta->rta_len);515516if (keylen < enckeylen)517goto badkey;518519authkeylen = keylen - enckeylen;520521if (enckeylen > AES_MAX_KEY_SIZE)522goto badkey;523524if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==525SPA_CTRL_CIPH_ALG_AES)526err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);527else528err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);529530if (err)531goto badkey;532533memcpy(ctx->hash_ctx, key, authkeylen);534ctx->hash_key_len = authkeylen;535536return 0;537538badkey:539crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);540return -EINVAL;541}542543static int spacc_aead_setauthsize(struct crypto_aead *tfm,544unsigned int authsize)545{546struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));547548ctx->auth_size = authsize;549550return 0;551}552553/*554* Check if an AEAD request requires a fallback operation. Some requests can't555* be completed in hardware because the hardware may not support certain key556* sizes. In these cases we need to complete the request in software.557*/558static int spacc_aead_need_fallback(struct spacc_req *req)559{560struct aead_request *aead_req;561struct crypto_tfm *tfm = req->req->tfm;562struct crypto_alg *alg = req->req->tfm->__crt_alg;563struct spacc_alg *spacc_alg = to_spacc_alg(alg);564struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);565566aead_req = container_of(req->req, struct aead_request, base);567/*568* If we have a non-supported key-length, then we need to do a569* software fallback.570*/571if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==572SPA_CTRL_CIPH_ALG_AES &&573ctx->cipher_key_len != AES_KEYSIZE_128 &&574ctx->cipher_key_len != AES_KEYSIZE_256)575return 1;576577return 0;578}579580static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,581bool is_encrypt)582{583struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));584struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);585int err;586587if (ctx->sw_cipher) {588/*589* Change the request to use the software fallback transform,590* and once the ciphering has completed, put the old transform591* back into the request.592*/593aead_request_set_tfm(req, ctx->sw_cipher);594err = is_encrypt ? crypto_aead_encrypt(req) :595crypto_aead_decrypt(req);596aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));597} else598err = -EINVAL;599600return err;601}602603static void spacc_aead_complete(struct spacc_req *req)604{605spacc_aead_free_ddts(req);606req->req->complete(req->req, req->result);607}608609static int spacc_aead_submit(struct spacc_req *req)610{611struct crypto_tfm *tfm = req->req->tfm;612struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);613struct crypto_alg *alg = req->req->tfm->__crt_alg;614struct spacc_alg *spacc_alg = to_spacc_alg(alg);615struct spacc_engine *engine = ctx->generic.engine;616u32 ctrl, proc_len, assoc_len;617struct aead_request *aead_req =618container_of(req->req, struct aead_request, base);619620req->result = -EINPROGRESS;621req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,622ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,623ctx->hash_ctx, ctx->hash_key_len);624625/* Set the source and destination DDT pointers. */626writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);627writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);628writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);629630assoc_len = aead_req->assoclen;631proc_len = aead_req->cryptlen + assoc_len;632633/*634* If we aren't generating an IV, then we need to include the IV in the635* associated data so that it is included in the hash.636*/637if (!req->giv) {638assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));639proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));640} else641proc_len += req->giv_len;642643/*644* If we are decrypting, we need to take the length of the ICV out of645* the processing length.646*/647if (!req->is_encrypt)648proc_len -= ctx->auth_size;649650writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);651writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);652writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);653writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);654writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);655656ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |657(1 << SPA_CTRL_ICV_APPEND);658if (req->is_encrypt)659ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);660else661ctrl |= (1 << SPA_CTRL_KEY_EXP);662663mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);664665writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);666667return -EINPROGRESS;668}669670static int spacc_req_submit(struct spacc_req *req);671672static void spacc_push(struct spacc_engine *engine)673{674struct spacc_req *req;675676while (!list_empty(&engine->pending) &&677engine->in_flight + 1 <= engine->fifo_sz) {678679++engine->in_flight;680req = list_first_entry(&engine->pending, struct spacc_req,681list);682list_move_tail(&req->list, &engine->in_progress);683684req->result = spacc_req_submit(req);685}686}687688/*689* Setup an AEAD request for processing. This will configure the engine, load690* the context and then start the packet processing.691*692* @giv Pointer to destination address for a generated IV. If the693* request does not need to generate an IV then this should be set to NULL.694*/695static int spacc_aead_setup(struct aead_request *req, u8 *giv,696unsigned alg_type, bool is_encrypt)697{698struct crypto_alg *alg = req->base.tfm->__crt_alg;699struct spacc_engine *engine = to_spacc_alg(alg)->engine;700struct spacc_req *dev_req = aead_request_ctx(req);701int err = -EINPROGRESS;702unsigned long flags;703unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));704705dev_req->giv = giv;706dev_req->giv_len = ivsize;707dev_req->req = &req->base;708dev_req->is_encrypt = is_encrypt;709dev_req->result = -EBUSY;710dev_req->engine = engine;711dev_req->complete = spacc_aead_complete;712713if (unlikely(spacc_aead_need_fallback(dev_req)))714return spacc_aead_do_fallback(req, alg_type, is_encrypt);715716spacc_aead_make_ddts(dev_req, dev_req->giv);717718err = -EINPROGRESS;719spin_lock_irqsave(&engine->hw_lock, flags);720if (unlikely(spacc_fifo_cmd_full(engine)) ||721engine->in_flight + 1 > engine->fifo_sz) {722if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {723err = -EBUSY;724spin_unlock_irqrestore(&engine->hw_lock, flags);725goto out_free_ddts;726}727list_add_tail(&dev_req->list, &engine->pending);728} else {729list_add_tail(&dev_req->list, &engine->pending);730spacc_push(engine);731}732spin_unlock_irqrestore(&engine->hw_lock, flags);733734goto out;735736out_free_ddts:737spacc_aead_free_ddts(dev_req);738out:739return err;740}741742static int spacc_aead_encrypt(struct aead_request *req)743{744struct crypto_aead *aead = crypto_aead_reqtfm(req);745struct crypto_tfm *tfm = crypto_aead_tfm(aead);746struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);747748return spacc_aead_setup(req, NULL, alg->type, 1);749}750751static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)752{753struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);754struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);755size_t ivsize = crypto_aead_ivsize(tfm);756struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);757unsigned len;758__be64 seq;759760memcpy(req->areq.iv, ctx->salt, ivsize);761len = ivsize;762if (ivsize > sizeof(u64)) {763memset(req->giv, 0, ivsize - sizeof(u64));764len = sizeof(u64);765}766seq = cpu_to_be64(req->seq);767memcpy(req->giv + ivsize - len, &seq, len);768769return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);770}771772static int spacc_aead_decrypt(struct aead_request *req)773{774struct crypto_aead *aead = crypto_aead_reqtfm(req);775struct crypto_tfm *tfm = crypto_aead_tfm(aead);776struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);777778return spacc_aead_setup(req, NULL, alg->type, 0);779}780781/*782* Initialise a new AEAD context. This is responsible for allocating the783* fallback cipher and initialising the context.784*/785static int spacc_aead_cra_init(struct crypto_tfm *tfm)786{787struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);788struct crypto_alg *alg = tfm->__crt_alg;789struct spacc_alg *spacc_alg = to_spacc_alg(alg);790struct spacc_engine *engine = spacc_alg->engine;791792ctx->generic.flags = spacc_alg->type;793ctx->generic.engine = engine;794ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,795CRYPTO_ALG_ASYNC |796CRYPTO_ALG_NEED_FALLBACK);797if (IS_ERR(ctx->sw_cipher)) {798dev_warn(engine->dev, "failed to allocate fallback for %s\n",799alg->cra_name);800ctx->sw_cipher = NULL;801}802ctx->generic.key_offs = spacc_alg->key_offs;803ctx->generic.iv_offs = spacc_alg->iv_offs;804805get_random_bytes(ctx->salt, sizeof(ctx->salt));806807tfm->crt_aead.reqsize = sizeof(struct spacc_req);808809return 0;810}811812/*813* Destructor for an AEAD context. This is called when the transform is freed814* and must free the fallback cipher.815*/816static void spacc_aead_cra_exit(struct crypto_tfm *tfm)817{818struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);819820if (ctx->sw_cipher)821crypto_free_aead(ctx->sw_cipher);822ctx->sw_cipher = NULL;823}824825/*826* Set the DES key for a block cipher transform. This also performs weak key827* checking if the transform has requested it.828*/829static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,830unsigned int len)831{832struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);833struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);834u32 tmp[DES_EXPKEY_WORDS];835836if (len > DES3_EDE_KEY_SIZE) {837crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);838return -EINVAL;839}840841if (unlikely(!des_ekey(tmp, key)) &&842(crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {843tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;844return -EINVAL;845}846847memcpy(ctx->key, key, len);848ctx->key_len = len;849850return 0;851}852853/*854* Set the key for an AES block cipher. Some key lengths are not supported in855* hardware so this must also check whether a fallback is needed.856*/857static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,858unsigned int len)859{860struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);861struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);862int err = 0;863864if (len > AES_MAX_KEY_SIZE) {865crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);866return -EINVAL;867}868869/*870* IPSec engine only supports 128 and 256 bit AES keys. If we get a871* request for any other size (192 bits) then we need to do a software872* fallback.873*/874if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&875ctx->sw_cipher) {876/*877* Set the fallback transform to use the same request flags as878* the hardware transform.879*/880ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;881ctx->sw_cipher->base.crt_flags |=882cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;883884err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);885if (err)886goto sw_setkey_failed;887} else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&888!ctx->sw_cipher)889err = -EINVAL;890891memcpy(ctx->key, key, len);892ctx->key_len = len;893894sw_setkey_failed:895if (err && ctx->sw_cipher) {896tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;897tfm->crt_flags |=898ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;899}900901return err;902}903904static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,905const u8 *key, unsigned int len)906{907struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);908struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);909int err = 0;910911if (len > AES_MAX_KEY_SIZE) {912crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);913err = -EINVAL;914goto out;915}916917memcpy(ctx->key, key, len);918ctx->key_len = len;919920out:921return err;922}923924static int spacc_ablk_need_fallback(struct spacc_req *req)925{926struct spacc_ablk_ctx *ctx;927struct crypto_tfm *tfm = req->req->tfm;928struct crypto_alg *alg = req->req->tfm->__crt_alg;929struct spacc_alg *spacc_alg = to_spacc_alg(alg);930931ctx = crypto_tfm_ctx(tfm);932933return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==934SPA_CTRL_CIPH_ALG_AES &&935ctx->key_len != AES_KEYSIZE_128 &&936ctx->key_len != AES_KEYSIZE_256;937}938939static void spacc_ablk_complete(struct spacc_req *req)940{941struct ablkcipher_request *ablk_req =942container_of(req->req, struct ablkcipher_request, base);943944if (ablk_req->src != ablk_req->dst) {945spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,946ablk_req->nbytes, DMA_TO_DEVICE);947spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,948ablk_req->nbytes, DMA_FROM_DEVICE);949} else950spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,951ablk_req->nbytes, DMA_BIDIRECTIONAL);952953req->req->complete(req->req, req->result);954}955956static int spacc_ablk_submit(struct spacc_req *req)957{958struct crypto_tfm *tfm = req->req->tfm;959struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);960struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);961struct crypto_alg *alg = req->req->tfm->__crt_alg;962struct spacc_alg *spacc_alg = to_spacc_alg(alg);963struct spacc_engine *engine = ctx->generic.engine;964u32 ctrl;965966req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,967ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,968NULL, 0);969970writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);971writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);972writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);973974writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);975writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);976writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);977writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);978979ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |980(req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :981(1 << SPA_CTRL_KEY_EXP));982983mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);984985writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);986987return -EINPROGRESS;988}989990static int spacc_ablk_do_fallback(struct ablkcipher_request *req,991unsigned alg_type, bool is_encrypt)992{993struct crypto_tfm *old_tfm =994crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));995struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);996int err;997998if (!ctx->sw_cipher)999return -EINVAL;10001001/*1002* Change the request to use the software fallback transform, and once1003* the ciphering has completed, put the old transform back into the1004* request.1005*/1006ablkcipher_request_set_tfm(req, ctx->sw_cipher);1007err = is_encrypt ? crypto_ablkcipher_encrypt(req) :1008crypto_ablkcipher_decrypt(req);1009ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));10101011return err;1012}10131014static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,1015bool is_encrypt)1016{1017struct crypto_alg *alg = req->base.tfm->__crt_alg;1018struct spacc_engine *engine = to_spacc_alg(alg)->engine;1019struct spacc_req *dev_req = ablkcipher_request_ctx(req);1020unsigned long flags;1021int err = -ENOMEM;10221023dev_req->req = &req->base;1024dev_req->is_encrypt = is_encrypt;1025dev_req->engine = engine;1026dev_req->complete = spacc_ablk_complete;1027dev_req->result = -EINPROGRESS;10281029if (unlikely(spacc_ablk_need_fallback(dev_req)))1030return spacc_ablk_do_fallback(req, alg_type, is_encrypt);10311032/*1033* Create the DDT's for the engine. If we share the same source and1034* destination then we can optimize by reusing the DDT's.1035*/1036if (req->src != req->dst) {1037dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,1038req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);1039if (!dev_req->src_ddt)1040goto out;10411042dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,1043req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);1044if (!dev_req->dst_ddt)1045goto out_free_src;1046} else {1047dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,1048req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);1049if (!dev_req->dst_ddt)1050goto out;10511052dev_req->src_ddt = NULL;1053dev_req->src_addr = dev_req->dst_addr;1054}10551056err = -EINPROGRESS;1057spin_lock_irqsave(&engine->hw_lock, flags);1058/*1059* Check if the engine will accept the operation now. If it won't then1060* we either stick it on the end of a pending list if we can backlog,1061* or bailout with an error if not.1062*/1063if (unlikely(spacc_fifo_cmd_full(engine)) ||1064engine->in_flight + 1 > engine->fifo_sz) {1065if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {1066err = -EBUSY;1067spin_unlock_irqrestore(&engine->hw_lock, flags);1068goto out_free_ddts;1069}1070list_add_tail(&dev_req->list, &engine->pending);1071} else {1072list_add_tail(&dev_req->list, &engine->pending);1073spacc_push(engine);1074}1075spin_unlock_irqrestore(&engine->hw_lock, flags);10761077goto out;10781079out_free_ddts:1080spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,1081req->nbytes, req->src == req->dst ?1082DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);1083out_free_src:1084if (req->src != req->dst)1085spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,1086req->src, req->nbytes, DMA_TO_DEVICE);1087out:1088return err;1089}10901091static int spacc_ablk_cra_init(struct crypto_tfm *tfm)1092{1093struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);1094struct crypto_alg *alg = tfm->__crt_alg;1095struct spacc_alg *spacc_alg = to_spacc_alg(alg);1096struct spacc_engine *engine = spacc_alg->engine;10971098ctx->generic.flags = spacc_alg->type;1099ctx->generic.engine = engine;1100if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {1101ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,1102CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);1103if (IS_ERR(ctx->sw_cipher)) {1104dev_warn(engine->dev, "failed to allocate fallback for %s\n",1105alg->cra_name);1106ctx->sw_cipher = NULL;1107}1108}1109ctx->generic.key_offs = spacc_alg->key_offs;1110ctx->generic.iv_offs = spacc_alg->iv_offs;11111112tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);11131114return 0;1115}11161117static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)1118{1119struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);11201121if (ctx->sw_cipher)1122crypto_free_ablkcipher(ctx->sw_cipher);1123ctx->sw_cipher = NULL;1124}11251126static int spacc_ablk_encrypt(struct ablkcipher_request *req)1127{1128struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);1129struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);1130struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);11311132return spacc_ablk_setup(req, alg->type, 1);1133}11341135static int spacc_ablk_decrypt(struct ablkcipher_request *req)1136{1137struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);1138struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);1139struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);11401141return spacc_ablk_setup(req, alg->type, 0);1142}11431144static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)1145{1146return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &1147SPA_FIFO_STAT_EMPTY;1148}11491150static void spacc_process_done(struct spacc_engine *engine)1151{1152struct spacc_req *req;1153unsigned long flags;11541155spin_lock_irqsave(&engine->hw_lock, flags);11561157while (!spacc_fifo_stat_empty(engine)) {1158req = list_first_entry(&engine->in_progress, struct spacc_req,1159list);1160list_move_tail(&req->list, &engine->completed);1161--engine->in_flight;11621163/* POP the status register. */1164writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);1165req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &1166SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;11671168/*1169* Convert the SPAcc error status into the standard POSIX error1170* codes.1171*/1172if (unlikely(req->result)) {1173switch (req->result) {1174case SPA_STATUS_ICV_FAIL:1175req->result = -EBADMSG;1176break;11771178case SPA_STATUS_MEMORY_ERROR:1179dev_warn(engine->dev,1180"memory error triggered\n");1181req->result = -EFAULT;1182break;11831184case SPA_STATUS_BLOCK_ERROR:1185dev_warn(engine->dev,1186"block error triggered\n");1187req->result = -EIO;1188break;1189}1190}1191}11921193tasklet_schedule(&engine->complete);11941195spin_unlock_irqrestore(&engine->hw_lock, flags);1196}11971198static irqreturn_t spacc_spacc_irq(int irq, void *dev)1199{1200struct spacc_engine *engine = (struct spacc_engine *)dev;1201u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);12021203writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);1204spacc_process_done(engine);12051206return IRQ_HANDLED;1207}12081209static void spacc_packet_timeout(unsigned long data)1210{1211struct spacc_engine *engine = (struct spacc_engine *)data;12121213spacc_process_done(engine);1214}12151216static int spacc_req_submit(struct spacc_req *req)1217{1218struct crypto_alg *alg = req->req->tfm->__crt_alg;12191220if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))1221return spacc_aead_submit(req);1222else1223return spacc_ablk_submit(req);1224}12251226static void spacc_spacc_complete(unsigned long data)1227{1228struct spacc_engine *engine = (struct spacc_engine *)data;1229struct spacc_req *req, *tmp;1230unsigned long flags;1231LIST_HEAD(completed);12321233spin_lock_irqsave(&engine->hw_lock, flags);12341235list_splice_init(&engine->completed, &completed);1236spacc_push(engine);1237if (engine->in_flight)1238mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);12391240spin_unlock_irqrestore(&engine->hw_lock, flags);12411242list_for_each_entry_safe(req, tmp, &completed, list) {1243req->complete(req);1244list_del(&req->list);1245}1246}12471248#ifdef CONFIG_PM1249static int spacc_suspend(struct device *dev)1250{1251struct platform_device *pdev = to_platform_device(dev);1252struct spacc_engine *engine = platform_get_drvdata(pdev);12531254/*1255* We only support standby mode. All we have to do is gate the clock to1256* the spacc. The hardware will preserve state until we turn it back1257* on again.1258*/1259clk_disable(engine->clk);12601261return 0;1262}12631264static int spacc_resume(struct device *dev)1265{1266struct platform_device *pdev = to_platform_device(dev);1267struct spacc_engine *engine = platform_get_drvdata(pdev);12681269return clk_enable(engine->clk);1270}12711272static const struct dev_pm_ops spacc_pm_ops = {1273.suspend = spacc_suspend,1274.resume = spacc_resume,1275};1276#endif /* CONFIG_PM */12771278static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)1279{1280return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;1281}12821283static ssize_t spacc_stat_irq_thresh_show(struct device *dev,1284struct device_attribute *attr,1285char *buf)1286{1287struct spacc_engine *engine = spacc_dev_to_engine(dev);12881289return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);1290}12911292static ssize_t spacc_stat_irq_thresh_store(struct device *dev,1293struct device_attribute *attr,1294const char *buf, size_t len)1295{1296struct spacc_engine *engine = spacc_dev_to_engine(dev);1297unsigned long thresh;12981299if (strict_strtoul(buf, 0, &thresh))1300return -EINVAL;13011302thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);13031304engine->stat_irq_thresh = thresh;1305writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,1306engine->regs + SPA_IRQ_CTRL_REG_OFFSET);13071308return len;1309}1310static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,1311spacc_stat_irq_thresh_store);13121313static struct spacc_alg ipsec_engine_algs[] = {1314{1315.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,1316.key_offs = 0,1317.iv_offs = AES_MAX_KEY_SIZE,1318.alg = {1319.cra_name = "cbc(aes)",1320.cra_driver_name = "cbc-aes-picoxcell",1321.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1322.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |1323CRYPTO_ALG_ASYNC |1324CRYPTO_ALG_NEED_FALLBACK,1325.cra_blocksize = AES_BLOCK_SIZE,1326.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1327.cra_type = &crypto_ablkcipher_type,1328.cra_module = THIS_MODULE,1329.cra_ablkcipher = {1330.setkey = spacc_aes_setkey,1331.encrypt = spacc_ablk_encrypt,1332.decrypt = spacc_ablk_decrypt,1333.min_keysize = AES_MIN_KEY_SIZE,1334.max_keysize = AES_MAX_KEY_SIZE,1335.ivsize = AES_BLOCK_SIZE,1336},1337.cra_init = spacc_ablk_cra_init,1338.cra_exit = spacc_ablk_cra_exit,1339},1340},1341{1342.key_offs = 0,1343.iv_offs = AES_MAX_KEY_SIZE,1344.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,1345.alg = {1346.cra_name = "ecb(aes)",1347.cra_driver_name = "ecb-aes-picoxcell",1348.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1349.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |1350CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,1351.cra_blocksize = AES_BLOCK_SIZE,1352.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1353.cra_type = &crypto_ablkcipher_type,1354.cra_module = THIS_MODULE,1355.cra_ablkcipher = {1356.setkey = spacc_aes_setkey,1357.encrypt = spacc_ablk_encrypt,1358.decrypt = spacc_ablk_decrypt,1359.min_keysize = AES_MIN_KEY_SIZE,1360.max_keysize = AES_MAX_KEY_SIZE,1361},1362.cra_init = spacc_ablk_cra_init,1363.cra_exit = spacc_ablk_cra_exit,1364},1365},1366{1367.key_offs = DES_BLOCK_SIZE,1368.iv_offs = 0,1369.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,1370.alg = {1371.cra_name = "cbc(des)",1372.cra_driver_name = "cbc-des-picoxcell",1373.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1374.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,1375.cra_blocksize = DES_BLOCK_SIZE,1376.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1377.cra_type = &crypto_ablkcipher_type,1378.cra_module = THIS_MODULE,1379.cra_ablkcipher = {1380.setkey = spacc_des_setkey,1381.encrypt = spacc_ablk_encrypt,1382.decrypt = spacc_ablk_decrypt,1383.min_keysize = DES_KEY_SIZE,1384.max_keysize = DES_KEY_SIZE,1385.ivsize = DES_BLOCK_SIZE,1386},1387.cra_init = spacc_ablk_cra_init,1388.cra_exit = spacc_ablk_cra_exit,1389},1390},1391{1392.key_offs = DES_BLOCK_SIZE,1393.iv_offs = 0,1394.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,1395.alg = {1396.cra_name = "ecb(des)",1397.cra_driver_name = "ecb-des-picoxcell",1398.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1399.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,1400.cra_blocksize = DES_BLOCK_SIZE,1401.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1402.cra_type = &crypto_ablkcipher_type,1403.cra_module = THIS_MODULE,1404.cra_ablkcipher = {1405.setkey = spacc_des_setkey,1406.encrypt = spacc_ablk_encrypt,1407.decrypt = spacc_ablk_decrypt,1408.min_keysize = DES_KEY_SIZE,1409.max_keysize = DES_KEY_SIZE,1410},1411.cra_init = spacc_ablk_cra_init,1412.cra_exit = spacc_ablk_cra_exit,1413},1414},1415{1416.key_offs = DES_BLOCK_SIZE,1417.iv_offs = 0,1418.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,1419.alg = {1420.cra_name = "cbc(des3_ede)",1421.cra_driver_name = "cbc-des3-ede-picoxcell",1422.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1423.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,1424.cra_blocksize = DES3_EDE_BLOCK_SIZE,1425.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1426.cra_type = &crypto_ablkcipher_type,1427.cra_module = THIS_MODULE,1428.cra_ablkcipher = {1429.setkey = spacc_des_setkey,1430.encrypt = spacc_ablk_encrypt,1431.decrypt = spacc_ablk_decrypt,1432.min_keysize = DES3_EDE_KEY_SIZE,1433.max_keysize = DES3_EDE_KEY_SIZE,1434.ivsize = DES3_EDE_BLOCK_SIZE,1435},1436.cra_init = spacc_ablk_cra_init,1437.cra_exit = spacc_ablk_cra_exit,1438},1439},1440{1441.key_offs = DES_BLOCK_SIZE,1442.iv_offs = 0,1443.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,1444.alg = {1445.cra_name = "ecb(des3_ede)",1446.cra_driver_name = "ecb-des3-ede-picoxcell",1447.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1448.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,1449.cra_blocksize = DES3_EDE_BLOCK_SIZE,1450.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1451.cra_type = &crypto_ablkcipher_type,1452.cra_module = THIS_MODULE,1453.cra_ablkcipher = {1454.setkey = spacc_des_setkey,1455.encrypt = spacc_ablk_encrypt,1456.decrypt = spacc_ablk_decrypt,1457.min_keysize = DES3_EDE_KEY_SIZE,1458.max_keysize = DES3_EDE_KEY_SIZE,1459},1460.cra_init = spacc_ablk_cra_init,1461.cra_exit = spacc_ablk_cra_exit,1462},1463},1464{1465.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |1466SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,1467.key_offs = 0,1468.iv_offs = AES_MAX_KEY_SIZE,1469.alg = {1470.cra_name = "authenc(hmac(sha1),cbc(aes))",1471.cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",1472.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1473.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,1474.cra_blocksize = AES_BLOCK_SIZE,1475.cra_ctxsize = sizeof(struct spacc_aead_ctx),1476.cra_type = &crypto_aead_type,1477.cra_module = THIS_MODULE,1478.cra_aead = {1479.setkey = spacc_aead_setkey,1480.setauthsize = spacc_aead_setauthsize,1481.encrypt = spacc_aead_encrypt,1482.decrypt = spacc_aead_decrypt,1483.givencrypt = spacc_aead_givencrypt,1484.ivsize = AES_BLOCK_SIZE,1485.maxauthsize = SHA1_DIGEST_SIZE,1486},1487.cra_init = spacc_aead_cra_init,1488.cra_exit = spacc_aead_cra_exit,1489},1490},1491{1492.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |1493SPA_CTRL_HASH_ALG_SHA256 |1494SPA_CTRL_HASH_MODE_HMAC,1495.key_offs = 0,1496.iv_offs = AES_MAX_KEY_SIZE,1497.alg = {1498.cra_name = "authenc(hmac(sha256),cbc(aes))",1499.cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",1500.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1501.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,1502.cra_blocksize = AES_BLOCK_SIZE,1503.cra_ctxsize = sizeof(struct spacc_aead_ctx),1504.cra_type = &crypto_aead_type,1505.cra_module = THIS_MODULE,1506.cra_aead = {1507.setkey = spacc_aead_setkey,1508.setauthsize = spacc_aead_setauthsize,1509.encrypt = spacc_aead_encrypt,1510.decrypt = spacc_aead_decrypt,1511.givencrypt = spacc_aead_givencrypt,1512.ivsize = AES_BLOCK_SIZE,1513.maxauthsize = SHA256_DIGEST_SIZE,1514},1515.cra_init = spacc_aead_cra_init,1516.cra_exit = spacc_aead_cra_exit,1517},1518},1519{1520.key_offs = 0,1521.iv_offs = AES_MAX_KEY_SIZE,1522.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |1523SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,1524.alg = {1525.cra_name = "authenc(hmac(md5),cbc(aes))",1526.cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",1527.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1528.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,1529.cra_blocksize = AES_BLOCK_SIZE,1530.cra_ctxsize = sizeof(struct spacc_aead_ctx),1531.cra_type = &crypto_aead_type,1532.cra_module = THIS_MODULE,1533.cra_aead = {1534.setkey = spacc_aead_setkey,1535.setauthsize = spacc_aead_setauthsize,1536.encrypt = spacc_aead_encrypt,1537.decrypt = spacc_aead_decrypt,1538.givencrypt = spacc_aead_givencrypt,1539.ivsize = AES_BLOCK_SIZE,1540.maxauthsize = MD5_DIGEST_SIZE,1541},1542.cra_init = spacc_aead_cra_init,1543.cra_exit = spacc_aead_cra_exit,1544},1545},1546{1547.key_offs = DES_BLOCK_SIZE,1548.iv_offs = 0,1549.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |1550SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,1551.alg = {1552.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",1553.cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",1554.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1555.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,1556.cra_blocksize = DES3_EDE_BLOCK_SIZE,1557.cra_ctxsize = sizeof(struct spacc_aead_ctx),1558.cra_type = &crypto_aead_type,1559.cra_module = THIS_MODULE,1560.cra_aead = {1561.setkey = spacc_aead_setkey,1562.setauthsize = spacc_aead_setauthsize,1563.encrypt = spacc_aead_encrypt,1564.decrypt = spacc_aead_decrypt,1565.givencrypt = spacc_aead_givencrypt,1566.ivsize = DES3_EDE_BLOCK_SIZE,1567.maxauthsize = SHA1_DIGEST_SIZE,1568},1569.cra_init = spacc_aead_cra_init,1570.cra_exit = spacc_aead_cra_exit,1571},1572},1573{1574.key_offs = DES_BLOCK_SIZE,1575.iv_offs = 0,1576.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |1577SPA_CTRL_HASH_ALG_SHA256 |1578SPA_CTRL_HASH_MODE_HMAC,1579.alg = {1580.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",1581.cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",1582.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1583.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,1584.cra_blocksize = DES3_EDE_BLOCK_SIZE,1585.cra_ctxsize = sizeof(struct spacc_aead_ctx),1586.cra_type = &crypto_aead_type,1587.cra_module = THIS_MODULE,1588.cra_aead = {1589.setkey = spacc_aead_setkey,1590.setauthsize = spacc_aead_setauthsize,1591.encrypt = spacc_aead_encrypt,1592.decrypt = spacc_aead_decrypt,1593.givencrypt = spacc_aead_givencrypt,1594.ivsize = DES3_EDE_BLOCK_SIZE,1595.maxauthsize = SHA256_DIGEST_SIZE,1596},1597.cra_init = spacc_aead_cra_init,1598.cra_exit = spacc_aead_cra_exit,1599},1600},1601{1602.key_offs = DES_BLOCK_SIZE,1603.iv_offs = 0,1604.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |1605SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,1606.alg = {1607.cra_name = "authenc(hmac(md5),cbc(des3_ede))",1608.cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",1609.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1610.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,1611.cra_blocksize = DES3_EDE_BLOCK_SIZE,1612.cra_ctxsize = sizeof(struct spacc_aead_ctx),1613.cra_type = &crypto_aead_type,1614.cra_module = THIS_MODULE,1615.cra_aead = {1616.setkey = spacc_aead_setkey,1617.setauthsize = spacc_aead_setauthsize,1618.encrypt = spacc_aead_encrypt,1619.decrypt = spacc_aead_decrypt,1620.givencrypt = spacc_aead_givencrypt,1621.ivsize = DES3_EDE_BLOCK_SIZE,1622.maxauthsize = MD5_DIGEST_SIZE,1623},1624.cra_init = spacc_aead_cra_init,1625.cra_exit = spacc_aead_cra_exit,1626},1627},1628};16291630static struct spacc_alg l2_engine_algs[] = {1631{1632.key_offs = 0,1633.iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,1634.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |1635SPA_CTRL_CIPH_MODE_F8,1636.alg = {1637.cra_name = "f8(kasumi)",1638.cra_driver_name = "f8-kasumi-picoxcell",1639.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,1640.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC,1641.cra_blocksize = 8,1642.cra_ctxsize = sizeof(struct spacc_ablk_ctx),1643.cra_type = &crypto_ablkcipher_type,1644.cra_module = THIS_MODULE,1645.cra_ablkcipher = {1646.setkey = spacc_kasumi_f8_setkey,1647.encrypt = spacc_ablk_encrypt,1648.decrypt = spacc_ablk_decrypt,1649.min_keysize = 16,1650.max_keysize = 16,1651.ivsize = 8,1652},1653.cra_init = spacc_ablk_cra_init,1654.cra_exit = spacc_ablk_cra_exit,1655},1656},1657};16581659static int __devinit spacc_probe(struct platform_device *pdev,1660unsigned max_ctxs, size_t cipher_pg_sz,1661size_t hash_pg_sz, size_t fifo_sz,1662struct spacc_alg *algs, size_t num_algs)1663{1664int i, err, ret = -EINVAL;1665struct resource *mem, *irq;1666struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),1667GFP_KERNEL);1668if (!engine)1669return -ENOMEM;16701671engine->max_ctxs = max_ctxs;1672engine->cipher_pg_sz = cipher_pg_sz;1673engine->hash_pg_sz = hash_pg_sz;1674engine->fifo_sz = fifo_sz;1675engine->algs = algs;1676engine->num_algs = num_algs;1677engine->name = dev_name(&pdev->dev);16781679mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);1680irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);1681if (!mem || !irq) {1682dev_err(&pdev->dev, "no memory/irq resource for engine\n");1683return -ENXIO;1684}16851686if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),1687engine->name))1688return -ENOMEM;16891690engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));1691if (!engine->regs) {1692dev_err(&pdev->dev, "memory map failed\n");1693return -ENOMEM;1694}16951696if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,1697engine->name, engine)) {1698dev_err(engine->dev, "failed to request IRQ\n");1699return -EBUSY;1700}17011702engine->dev = &pdev->dev;1703engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;1704engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;17051706engine->req_pool = dmam_pool_create(engine->name, engine->dev,1707MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);1708if (!engine->req_pool)1709return -ENOMEM;17101711spin_lock_init(&engine->hw_lock);17121713engine->clk = clk_get(&pdev->dev, NULL);1714if (IS_ERR(engine->clk)) {1715dev_info(&pdev->dev, "clk unavailable\n");1716device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);1717return PTR_ERR(engine->clk);1718}17191720if (clk_enable(engine->clk)) {1721dev_info(&pdev->dev, "unable to enable clk\n");1722clk_put(engine->clk);1723return -EIO;1724}17251726err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);1727if (err) {1728clk_disable(engine->clk);1729clk_put(engine->clk);1730return err;1731}173217331734/*1735* Use an IRQ threshold of 50% as a default. This seems to be a1736* reasonable trade off of latency against throughput but can be1737* changed at runtime.1738*/1739engine->stat_irq_thresh = (engine->fifo_sz / 2);17401741/*1742* Configure the interrupts. We only use the STAT_CNT interrupt as we1743* only submit a new packet for processing when we complete another in1744* the queue. This minimizes time spent in the interrupt handler.1745*/1746writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,1747engine->regs + SPA_IRQ_CTRL_REG_OFFSET);1748writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,1749engine->regs + SPA_IRQ_EN_REG_OFFSET);17501751setup_timer(&engine->packet_timeout, spacc_packet_timeout,1752(unsigned long)engine);17531754INIT_LIST_HEAD(&engine->pending);1755INIT_LIST_HEAD(&engine->completed);1756INIT_LIST_HEAD(&engine->in_progress);1757engine->in_flight = 0;1758tasklet_init(&engine->complete, spacc_spacc_complete,1759(unsigned long)engine);17601761platform_set_drvdata(pdev, engine);17621763INIT_LIST_HEAD(&engine->registered_algs);1764for (i = 0; i < engine->num_algs; ++i) {1765engine->algs[i].engine = engine;1766err = crypto_register_alg(&engine->algs[i].alg);1767if (!err) {1768list_add_tail(&engine->algs[i].entry,1769&engine->registered_algs);1770ret = 0;1771}1772if (err)1773dev_err(engine->dev, "failed to register alg \"%s\"\n",1774engine->algs[i].alg.cra_name);1775else1776dev_dbg(engine->dev, "registered alg \"%s\"\n",1777engine->algs[i].alg.cra_name);1778}17791780return ret;1781}17821783static int __devexit spacc_remove(struct platform_device *pdev)1784{1785struct spacc_alg *alg, *next;1786struct spacc_engine *engine = platform_get_drvdata(pdev);17871788del_timer_sync(&engine->packet_timeout);1789device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);17901791list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {1792list_del(&alg->entry);1793crypto_unregister_alg(&alg->alg);1794}17951796clk_disable(engine->clk);1797clk_put(engine->clk);17981799return 0;1800}18011802static int __devinit ipsec_probe(struct platform_device *pdev)1803{1804return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,1805SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,1806SPACC_CRYPTO_IPSEC_HASH_PG_SZ,1807SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,1808ARRAY_SIZE(ipsec_engine_algs));1809}18101811static struct platform_driver ipsec_driver = {1812.probe = ipsec_probe,1813.remove = __devexit_p(spacc_remove),1814.driver = {1815.name = "picoxcell-ipsec",1816#ifdef CONFIG_PM1817.pm = &spacc_pm_ops,1818#endif /* CONFIG_PM */1819},1820};18211822static int __devinit l2_probe(struct platform_device *pdev)1823{1824return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,1825SPACC_CRYPTO_L2_CIPHER_PG_SZ,1826SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,1827l2_engine_algs, ARRAY_SIZE(l2_engine_algs));1828}18291830static struct platform_driver l2_driver = {1831.probe = l2_probe,1832.remove = __devexit_p(spacc_remove),1833.driver = {1834.name = "picoxcell-l2",1835#ifdef CONFIG_PM1836.pm = &spacc_pm_ops,1837#endif /* CONFIG_PM */1838},1839};18401841static int __init spacc_init(void)1842{1843int ret = platform_driver_register(&ipsec_driver);1844if (ret) {1845pr_err("failed to register ipsec spacc driver");1846goto out;1847}18481849ret = platform_driver_register(&l2_driver);1850if (ret) {1851pr_err("failed to register l2 spacc driver");1852goto l2_failed;1853}18541855return 0;18561857l2_failed:1858platform_driver_unregister(&ipsec_driver);1859out:1860return ret;1861}1862module_init(spacc_init);18631864static void __exit spacc_exit(void)1865{1866platform_driver_unregister(&ipsec_driver);1867platform_driver_unregister(&l2_driver);1868}1869module_exit(spacc_exit);18701871MODULE_LICENSE("GPL");1872MODULE_AUTHOR("Jamie Iles");187318741875