#define KMSG_COMPONENT "phmac_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/cpacf.h>
#include <asm/pkey.h>
#include <crypto/engine.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <linux/atomic.h>
#include <linux/cpufeature.h>
#include <linux/delay.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/spinlock.h>
static struct crypto_engine *phmac_crypto_engine;
#define MAX_QLEN 10
struct hash_walk_helper {
struct crypto_hash_walk walk;
const u8 *walkaddr;
int walkbytes;
};
static inline int hwh_prepare(struct ahash_request *req,
struct hash_walk_helper *hwh)
{
hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk);
if (hwh->walkbytes < 0)
return hwh->walkbytes;
hwh->walkaddr = hwh->walk.data;
return 0;
}
static inline int hwh_advance(struct hash_walk_helper *hwh, int n)
{
if (n < 0)
return crypto_hash_walk_done(&hwh->walk, n);
hwh->walkbytes -= n;
hwh->walkaddr += n;
if (hwh->walkbytes > 0)
return 0;
hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0);
if (hwh->walkbytes < 0)
return hwh->walkbytes;
hwh->walkaddr = hwh->walk.data;
return 0;
}
#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
#define MAX_IMBL_SIZE sizeof(u128)
#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
#define SHA2_CV_SIZE(bs) ((bs) >> 1)
#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
#define PHMAC_MAX_KEYSIZE 256
#define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32)
#define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32)
#define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE
struct phmac_protkey {
u32 type;
u32 len;
u8 protkey[PHMAC_MAX_PK_SIZE];
};
#define PK_STATE_NO_KEY 0
#define PK_STATE_CONVERT_IN_PROGRESS 1
#define PK_STATE_VALID 2
struct phmac_tfm_ctx {
u8 keybuf[PHMAC_MAX_KEYSIZE];
unsigned int keylen;
long fc;
atomic_t via_engine_ctr;
spinlock_t pk_lock;
int pk_state;
struct phmac_protkey pk;
};
union kmac_gr0 {
unsigned long reg;
struct {
unsigned long : 48;
unsigned long ikp : 1;
unsigned long iimp : 1;
unsigned long ccup : 1;
unsigned long : 6;
unsigned long fc : 7;
};
};
struct kmac_sha2_ctx {
u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE];
union kmac_gr0 gr0;
u8 buf[MAX_BLOCK_SIZE];
u64 buflen[2];
};
struct phmac_req_ctx {
struct hash_walk_helper hwh;
struct kmac_sha2_ctx kmac_ctx;
bool final;
};
struct hmac_clrkey_token {
u8 type;
u8 res0[3];
u8 version;
u8 res1[3];
u32 keytype;
u32 len;
u8 key[];
} __packed;
static int hash_key(const u8 *in, unsigned int inlen,
u8 *digest, unsigned int digestsize)
{
unsigned long func;
union {
struct sha256_paramblock {
u32 h[8];
u64 mbl;
} sha256;
struct sha512_paramblock {
u64 h[8];
u128 mbl;
} sha512;
} __packed param;
#define PARAM_INIT(x, y, z) \
param.sha##x.h[0] = SHA##y ## _H0; \
param.sha##x.h[1] = SHA##y ## _H1; \
param.sha##x.h[2] = SHA##y ## _H2; \
param.sha##x.h[3] = SHA##y ## _H3; \
param.sha##x.h[4] = SHA##y ## _H4; \
param.sha##x.h[5] = SHA##y ## _H5; \
param.sha##x.h[6] = SHA##y ## _H6; \
param.sha##x.h[7] = SHA##y ## _H7; \
param.sha##x.mbl = (z)
switch (digestsize) {
case SHA224_DIGEST_SIZE:
func = CPACF_KLMD_SHA_256;
PARAM_INIT(256, 224, inlen * 8);
break;
case SHA256_DIGEST_SIZE:
func = CPACF_KLMD_SHA_256;
PARAM_INIT(256, 256, inlen * 8);
break;
case SHA384_DIGEST_SIZE:
func = CPACF_KLMD_SHA_512;
PARAM_INIT(512, 384, inlen * 8);
break;
case SHA512_DIGEST_SIZE:
func = CPACF_KLMD_SHA_512;
PARAM_INIT(512, 512, inlen * 8);
break;
default:
return -EINVAL;
}
#undef PARAM_INIT
cpacf_klmd(func, ¶m, in, inlen);
memcpy(digest, ¶m, digestsize);
return 0;
}
static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen,
unsigned int digestsize, u8 *dest)
{
struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest;
unsigned int blocksize;
int rc;
token->type = 0x00;
token->version = 0x02;
switch (digestsize) {
case SHA224_DIGEST_SIZE:
case SHA256_DIGEST_SIZE:
token->keytype = PKEY_KEYTYPE_HMAC_512;
blocksize = 64;
break;
case SHA384_DIGEST_SIZE:
case SHA512_DIGEST_SIZE:
token->keytype = PKEY_KEYTYPE_HMAC_1024;
blocksize = 128;
break;
default:
return -EINVAL;
}
token->len = blocksize;
if (clrkeylen > blocksize) {
rc = hash_key(clrkey, clrkeylen, token->key, digestsize);
if (rc)
return rc;
} else {
memcpy(token->key, clrkey, clrkeylen);
}
return 0;
}
static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
const u8 *key, unsigned int keylen)
{
if (keylen > sizeof(tfm_ctx->keybuf))
return -EINVAL;
memcpy(tfm_ctx->keybuf, key, keylen);
tfm_ctx->keylen = keylen;
return 0;
}
static inline int convert_key(const u8 *key, unsigned int keylen,
struct phmac_protkey *pk)
{
int rc, i;
pk->len = sizeof(pk->protkey);
for (rc = -EIO, i = 0; rc && i < 5; i++) {
if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
rc = -EINTR;
goto out;
}
rc = pkey_key2protkey(key, keylen,
pk->protkey, &pk->len, &pk->type,
PKEY_XFLAG_NOMEMALLOC);
}
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
{
struct phmac_protkey pk;
int rc;
spin_lock_bh(&tfm_ctx->pk_lock);
tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
spin_unlock_bh(&tfm_ctx->pk_lock);
rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk);
spin_lock_bh(&tfm_ctx->pk_lock);
if (rc) {
tfm_ctx->pk_state = rc;
} else {
tfm_ctx->pk_state = PK_STATE_VALID;
tfm_ctx->pk = pk;
}
spin_unlock_bh(&tfm_ctx->pk_lock);
memzero_explicit(&pk, sizeof(pk));
pr_debug("rc=%d\n", rc);
return rc;
}
static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
u64 buflen_hi, unsigned int blocksize)
{
u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
switch (blocksize) {
case SHA256_BLOCK_SIZE:
*(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
break;
case SHA512_BLOCK_SIZE:
*(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
break;
default:
break;
}
}
static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
struct hash_walk_helper *hwh = &req_ctx->hwh;
unsigned int bs = crypto_ahash_blocksize(tfm);
unsigned int offset, k, n;
int rc = 0;
while (hwh->walkbytes > 0) {
offset = ctx->buflen[0] % bs;
if (offset + hwh->walkbytes < bs)
goto store;
if (offset) {
n = bs - offset;
memcpy(ctx->buf + offset, hwh->walkaddr, n);
ctx->gr0.iimp = 1;
for (;;) {
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
if (likely(k == bs))
break;
if (unlikely(k > 0)) {
rc = -EIO;
goto out;
}
if (!maysleep) {
rc = -EKEYEXPIRED;
goto out;
}
rc = phmac_convert_key(tfm_ctx);
if (rc)
goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
}
ctx->buflen[0] += n;
if (ctx->buflen[0] < n)
ctx->buflen[1]++;
rc = hwh_advance(hwh, n);
if (unlikely(rc))
goto out;
offset = 0;
}
while (hwh->walkbytes >= bs) {
n = (hwh->walkbytes / bs) * bs;
ctx->gr0.iimp = 1;
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n);
if (likely(k > 0)) {
ctx->buflen[0] += k;
if (ctx->buflen[0] < k)
ctx->buflen[1]++;
rc = hwh_advance(hwh, k);
if (unlikely(rc))
goto out;
}
if (unlikely(k < n)) {
if (!maysleep) {
rc = -EKEYEXPIRED;
goto out;
}
rc = phmac_convert_key(tfm_ctx);
if (rc)
goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
}
}
store:
if (hwh->walkbytes) {
memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
ctx->buflen[0] += hwh->walkbytes;
if (ctx->buflen[0] < hwh->walkbytes)
ctx->buflen[1]++;
rc = hwh_advance(hwh, hwh->walkbytes);
if (unlikely(rc))
goto out;
}
}
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
unsigned int ds = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_ahash_blocksize(tfm);
unsigned int k, n;
int rc = 0;
n = ctx->buflen[0] % bs;
ctx->gr0.iimp = 0;
kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
for (;;) {
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n);
if (likely(k == n))
break;
if (unlikely(k > 0)) {
rc = -EIO;
goto out;
}
if (!maysleep) {
rc = -EKEYEXPIRED;
goto out;
}
rc = phmac_convert_key(tfm_ctx);
if (rc)
goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
}
memcpy(req->result, ctx->param, ds);
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
unsigned int bs = crypto_ahash_blocksize(tfm);
int rc = 0;
memset(req_ctx, 0, sizeof(*req_ctx));
if (!tfm_ctx->fc) {
rc = -ENOKEY;
goto out;
}
kmac_ctx->gr0.fc = tfm_ctx->fc;
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_update(struct ahash_request *req)
{
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
struct hash_walk_helper *hwh = &req_ctx->hwh;
int rc;
rc = hwh_prepare(req, hwh);
if (rc)
goto out;
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false);
if (rc == 0)
goto out;
}
if (rc == 0 || rc == -EKEYEXPIRED) {
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
atomic_dec(&tfm_ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS) {
hwh_advance(hwh, rc);
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
}
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_final(struct ahash_request *req)
{
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
int rc = 0;
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false);
if (rc == 0)
goto out;
}
if (rc == 0 || rc == -EKEYEXPIRED) {
req->nbytes = 0;
req_ctx->final = true;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
atomic_dec(&tfm_ctx->via_engine_ctr);
}
out:
if (rc != -EINPROGRESS)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_finup(struct ahash_request *req)
{
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
struct hash_walk_helper *hwh = &req_ctx->hwh;
int rc;
rc = hwh_prepare(req, hwh);
if (rc)
goto out;
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false);
if (rc == 0)
req->nbytes = 0;
}
if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false);
if (rc == 0)
goto out;
}
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->final = true;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
atomic_dec(&tfm_ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS)
hwh_advance(hwh, rc);
out:
if (rc != -EINPROGRESS)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_digest(struct ahash_request *req)
{
int rc;
rc = phmac_init(req);
if (rc)
goto out;
rc = phmac_finup(req);
out:
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_setkey(struct crypto_ahash *tfm,
const u8 *key, unsigned int keylen)
{
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_ahash_blocksize(tfm);
unsigned int tmpkeylen;
u8 *tmpkey = NULL;
int rc = 0;
if (!crypto_ahash_tested(tfm)) {
tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
tmpkey = kzalloc(tmpkeylen, GFP_KERNEL);
if (!tmpkey) {
rc = -ENOMEM;
goto out;
}
rc = make_clrkey_token(key, keylen, ds, tmpkey);
if (rc)
goto out;
keylen = tmpkeylen;
key = tmpkey;
}
rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen);
if (rc)
goto out;
rc = phmac_convert_key(tfm_ctx);
if (rc)
goto out;
switch (ds) {
case SHA224_DIGEST_SIZE:
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
rc = -EINVAL;
else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224;
break;
case SHA256_DIGEST_SIZE:
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
rc = -EINVAL;
else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256;
break;
case SHA384_DIGEST_SIZE:
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
rc = -EINVAL;
else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384;
break;
case SHA512_DIGEST_SIZE:
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
rc = -EINVAL;
else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512;
break;
default:
tfm_ctx->fc = 0;
rc = -EINVAL;
}
out:
kfree(tmpkey);
pr_debug("rc=%d\n", rc);
return rc;
}
static int phmac_export(struct ahash_request *req, void *out)
{
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
memcpy(out, ctx, sizeof(*ctx));
return 0;
}
static int phmac_import(struct ahash_request *req, const void *in)
{
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
memset(req_ctx, 0, sizeof(*req_ctx));
memcpy(ctx, in, sizeof(*ctx));
return 0;
}
static int phmac_init_tfm(struct crypto_ahash *tfm)
{
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
memset(tfm_ctx, 0, sizeof(*tfm_ctx));
spin_lock_init(&tfm_ctx->pk_lock);
crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx));
return 0;
}
static void phmac_exit_tfm(struct crypto_ahash *tfm)
{
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf));
memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk));
}
static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
struct hash_walk_helper *hwh = &req_ctx->hwh;
int rc = -EINVAL;
if (req->nbytes) {
rc = phmac_kmac_update(req, true);
if (rc == -EKEYEXPIRED) {
pr_debug("rescheduling request\n");
cond_resched();
return -ENOSPC;
} else if (rc) {
hwh_advance(hwh, rc);
goto out;
}
req->nbytes = 0;
}
if (req_ctx->final) {
rc = phmac_kmac_final(req, true);
if (rc == -EKEYEXPIRED) {
pr_debug("rescheduling request\n");
cond_resched();
return -ENOSPC;
}
}
out:
if (rc || req_ctx->final)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("request complete with rc=%d\n", rc);
local_bh_disable();
atomic_dec(&tfm_ctx->via_engine_ctr);
crypto_finalize_hash_request(engine, req, rc);
local_bh_enable();
return rc;
}
#define S390_ASYNC_PHMAC_ALG(x) \
{ \
.base = { \
.init = phmac_init, \
.update = phmac_update, \
.final = phmac_final, \
.finup = phmac_finup, \
.digest = phmac_digest, \
.setkey = phmac_setkey, \
.import = phmac_import, \
.export = phmac_export, \
.init_tfm = phmac_init_tfm, \
.exit_tfm = phmac_exit_tfm, \
.halg = { \
.digestsize = SHA##x##_DIGEST_SIZE, \
.statesize = sizeof(struct kmac_sha2_ctx), \
.base = { \
.cra_name = "phmac(sha" #x ")", \
.cra_driver_name = "phmac_s390_sha" #x, \
.cra_blocksize = SHA##x##_BLOCK_SIZE, \
.cra_priority = 400, \
.cra_flags = CRYPTO_ALG_ASYNC | \
CRYPTO_ALG_NO_FALLBACK, \
.cra_ctxsize = sizeof(struct phmac_tfm_ctx), \
.cra_module = THIS_MODULE, \
}, \
}, \
}, \
.op = { \
.do_one_request = phmac_do_one_request, \
}, \
}
static struct phmac_alg {
unsigned int fc;
struct ahash_engine_alg alg;
bool registered;
} phmac_algs[] = {
{
.fc = CPACF_KMAC_PHMAC_SHA_224,
.alg = S390_ASYNC_PHMAC_ALG(224),
}, {
.fc = CPACF_KMAC_PHMAC_SHA_256,
.alg = S390_ASYNC_PHMAC_ALG(256),
}, {
.fc = CPACF_KMAC_PHMAC_SHA_384,
.alg = S390_ASYNC_PHMAC_ALG(384),
}, {
.fc = CPACF_KMAC_PHMAC_SHA_512,
.alg = S390_ASYNC_PHMAC_ALG(512),
}
};
static struct miscdevice phmac_dev = {
.name = "phmac",
.minor = MISC_DYNAMIC_MINOR,
};
static void s390_phmac_exit(void)
{
struct phmac_alg *phmac;
int i;
if (phmac_crypto_engine) {
crypto_engine_stop(phmac_crypto_engine);
crypto_engine_exit(phmac_crypto_engine);
}
for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
phmac = &phmac_algs[i];
if (phmac->registered)
crypto_engine_unregister_ahash(&phmac->alg);
}
misc_deregister(&phmac_dev);
}
static int __init s390_phmac_init(void)
{
struct phmac_alg *phmac;
int i, rc;
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
return -ENODEV;
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
return -ENODEV;
rc = misc_register(&phmac_dev);
if (rc)
return rc;
phmac_crypto_engine =
crypto_engine_alloc_init_and_set(phmac_dev.this_device,
true, false, MAX_QLEN);
if (!phmac_crypto_engine) {
rc = -ENOMEM;
goto out_err;
}
rc = crypto_engine_start(phmac_crypto_engine);
if (rc) {
crypto_engine_exit(phmac_crypto_engine);
phmac_crypto_engine = NULL;
goto out_err;
}
for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
phmac = &phmac_algs[i];
if (!cpacf_query_func(CPACF_KMAC, phmac->fc))
continue;
rc = crypto_engine_register_ahash(&phmac->alg);
if (rc)
goto out_err;
phmac->registered = true;
pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
}
return 0;
out_err:
s390_phmac_exit();
return rc;
}
module_init(s390_phmac_init);
module_exit(s390_phmac_exit);
MODULE_ALIAS_CRYPTO("phmac(sha224)");
MODULE_ALIAS_CRYPTO("phmac(sha256)");
MODULE_ALIAS_CRYPTO("phmac(sha384)");
MODULE_ALIAS_CRYPTO("phmac(sha512)");
MODULE_DESCRIPTION("S390 HMAC driver for protected keys");
MODULE_LICENSE("GPL");