Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/crypto/geode-aes.c
15109 views
1
/* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
2
*
3
* This program is free software; you can redistribute it and/or modify
4
* it under the terms of the GNU General Public License as published by
5
* the Free Software Foundation; either version 2 of the License, or
6
* (at your option) any later version.
7
*/
8
9
#include <linux/module.h>
10
#include <linux/kernel.h>
11
#include <linux/pci.h>
12
#include <linux/pci_ids.h>
13
#include <linux/crypto.h>
14
#include <linux/spinlock.h>
15
#include <crypto/algapi.h>
16
#include <crypto/aes.h>
17
18
#include <linux/io.h>
19
#include <linux/delay.h>
20
21
#include "geode-aes.h"
22
23
/* Static structures */
24
25
static void __iomem *_iobase;
26
static spinlock_t lock;
27
28
/* Write a 128 bit field (either a writable key or IV) */
29
static inline void
30
_writefield(u32 offset, void *value)
31
{
32
int i;
33
for (i = 0; i < 4; i++)
34
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
35
}
36
37
/* Read a 128 bit field (either a writable key or IV) */
38
static inline void
39
_readfield(u32 offset, void *value)
40
{
41
int i;
42
for (i = 0; i < 4; i++)
43
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
44
}
45
46
static int
47
do_crypt(void *src, void *dst, int len, u32 flags)
48
{
49
u32 status;
50
u32 counter = AES_OP_TIMEOUT;
51
52
iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
53
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
54
iowrite32(len, _iobase + AES_LENA_REG);
55
56
/* Start the operation */
57
iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
58
59
do {
60
status = ioread32(_iobase + AES_INTR_REG);
61
cpu_relax();
62
} while (!(status & AES_INTRA_PENDING) && --counter);
63
64
/* Clear the event */
65
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
66
return counter ? 0 : 1;
67
}
68
69
static unsigned int
70
geode_aes_crypt(struct geode_aes_op *op)
71
{
72
u32 flags = 0;
73
unsigned long iflags;
74
int ret;
75
76
if (op->len == 0)
77
return 0;
78
79
/* If the source and destination is the same, then
80
* we need to turn on the coherent flags, otherwise
81
* we don't need to worry
82
*/
83
84
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
85
86
if (op->dir == AES_DIR_ENCRYPT)
87
flags |= AES_CTRL_ENCRYPT;
88
89
/* Start the critical section */
90
91
spin_lock_irqsave(&lock, iflags);
92
93
if (op->mode == AES_MODE_CBC) {
94
flags |= AES_CTRL_CBC;
95
_writefield(AES_WRITEIV0_REG, op->iv);
96
}
97
98
if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
99
flags |= AES_CTRL_WRKEY;
100
_writefield(AES_WRITEKEY0_REG, op->key);
101
}
102
103
ret = do_crypt(op->src, op->dst, op->len, flags);
104
BUG_ON(ret);
105
106
if (op->mode == AES_MODE_CBC)
107
_readfield(AES_WRITEIV0_REG, op->iv);
108
109
spin_unlock_irqrestore(&lock, iflags);
110
111
return op->len;
112
}
113
114
/* CRYPTO-API Functions */
115
116
static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
117
unsigned int len)
118
{
119
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
120
unsigned int ret;
121
122
op->keylen = len;
123
124
if (len == AES_KEYSIZE_128) {
125
memcpy(op->key, key, len);
126
return 0;
127
}
128
129
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
130
/* not supported at all */
131
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
132
return -EINVAL;
133
}
134
135
/*
136
* The requested key size is not supported by HW, do a fallback
137
*/
138
op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
139
op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
140
141
ret = crypto_cipher_setkey(op->fallback.cip, key, len);
142
if (ret) {
143
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
144
tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
145
}
146
return ret;
147
}
148
149
static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
150
unsigned int len)
151
{
152
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
153
unsigned int ret;
154
155
op->keylen = len;
156
157
if (len == AES_KEYSIZE_128) {
158
memcpy(op->key, key, len);
159
return 0;
160
}
161
162
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
163
/* not supported at all */
164
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
165
return -EINVAL;
166
}
167
168
/*
169
* The requested key size is not supported by HW, do a fallback
170
*/
171
op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
172
op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
173
174
ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
175
if (ret) {
176
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
177
tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
178
}
179
return ret;
180
}
181
182
static int fallback_blk_dec(struct blkcipher_desc *desc,
183
struct scatterlist *dst, struct scatterlist *src,
184
unsigned int nbytes)
185
{
186
unsigned int ret;
187
struct crypto_blkcipher *tfm;
188
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
189
190
tfm = desc->tfm;
191
desc->tfm = op->fallback.blk;
192
193
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
194
195
desc->tfm = tfm;
196
return ret;
197
}
198
static int fallback_blk_enc(struct blkcipher_desc *desc,
199
struct scatterlist *dst, struct scatterlist *src,
200
unsigned int nbytes)
201
{
202
unsigned int ret;
203
struct crypto_blkcipher *tfm;
204
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
205
206
tfm = desc->tfm;
207
desc->tfm = op->fallback.blk;
208
209
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
210
211
desc->tfm = tfm;
212
return ret;
213
}
214
215
static void
216
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
217
{
218
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
219
220
if (unlikely(op->keylen != AES_KEYSIZE_128)) {
221
crypto_cipher_encrypt_one(op->fallback.cip, out, in);
222
return;
223
}
224
225
op->src = (void *) in;
226
op->dst = (void *) out;
227
op->mode = AES_MODE_ECB;
228
op->flags = 0;
229
op->len = AES_MIN_BLOCK_SIZE;
230
op->dir = AES_DIR_ENCRYPT;
231
232
geode_aes_crypt(op);
233
}
234
235
236
static void
237
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
238
{
239
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
240
241
if (unlikely(op->keylen != AES_KEYSIZE_128)) {
242
crypto_cipher_decrypt_one(op->fallback.cip, out, in);
243
return;
244
}
245
246
op->src = (void *) in;
247
op->dst = (void *) out;
248
op->mode = AES_MODE_ECB;
249
op->flags = 0;
250
op->len = AES_MIN_BLOCK_SIZE;
251
op->dir = AES_DIR_DECRYPT;
252
253
geode_aes_crypt(op);
254
}
255
256
static int fallback_init_cip(struct crypto_tfm *tfm)
257
{
258
const char *name = tfm->__crt_alg->cra_name;
259
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
260
261
op->fallback.cip = crypto_alloc_cipher(name, 0,
262
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
263
264
if (IS_ERR(op->fallback.cip)) {
265
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
266
return PTR_ERR(op->fallback.cip);
267
}
268
269
return 0;
270
}
271
272
static void fallback_exit_cip(struct crypto_tfm *tfm)
273
{
274
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
275
276
crypto_free_cipher(op->fallback.cip);
277
op->fallback.cip = NULL;
278
}
279
280
static struct crypto_alg geode_alg = {
281
.cra_name = "aes",
282
.cra_driver_name = "geode-aes",
283
.cra_priority = 300,
284
.cra_alignmask = 15,
285
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
286
CRYPTO_ALG_NEED_FALLBACK,
287
.cra_init = fallback_init_cip,
288
.cra_exit = fallback_exit_cip,
289
.cra_blocksize = AES_MIN_BLOCK_SIZE,
290
.cra_ctxsize = sizeof(struct geode_aes_op),
291
.cra_module = THIS_MODULE,
292
.cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
293
.cra_u = {
294
.cipher = {
295
.cia_min_keysize = AES_MIN_KEY_SIZE,
296
.cia_max_keysize = AES_MAX_KEY_SIZE,
297
.cia_setkey = geode_setkey_cip,
298
.cia_encrypt = geode_encrypt,
299
.cia_decrypt = geode_decrypt
300
}
301
}
302
};
303
304
static int
305
geode_cbc_decrypt(struct blkcipher_desc *desc,
306
struct scatterlist *dst, struct scatterlist *src,
307
unsigned int nbytes)
308
{
309
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
310
struct blkcipher_walk walk;
311
int err, ret;
312
313
if (unlikely(op->keylen != AES_KEYSIZE_128))
314
return fallback_blk_dec(desc, dst, src, nbytes);
315
316
blkcipher_walk_init(&walk, dst, src, nbytes);
317
err = blkcipher_walk_virt(desc, &walk);
318
op->iv = walk.iv;
319
320
while ((nbytes = walk.nbytes)) {
321
op->src = walk.src.virt.addr,
322
op->dst = walk.dst.virt.addr;
323
op->mode = AES_MODE_CBC;
324
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
325
op->dir = AES_DIR_DECRYPT;
326
327
ret = geode_aes_crypt(op);
328
329
nbytes -= ret;
330
err = blkcipher_walk_done(desc, &walk, nbytes);
331
}
332
333
return err;
334
}
335
336
static int
337
geode_cbc_encrypt(struct blkcipher_desc *desc,
338
struct scatterlist *dst, struct scatterlist *src,
339
unsigned int nbytes)
340
{
341
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
342
struct blkcipher_walk walk;
343
int err, ret;
344
345
if (unlikely(op->keylen != AES_KEYSIZE_128))
346
return fallback_blk_enc(desc, dst, src, nbytes);
347
348
blkcipher_walk_init(&walk, dst, src, nbytes);
349
err = blkcipher_walk_virt(desc, &walk);
350
op->iv = walk.iv;
351
352
while ((nbytes = walk.nbytes)) {
353
op->src = walk.src.virt.addr,
354
op->dst = walk.dst.virt.addr;
355
op->mode = AES_MODE_CBC;
356
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
357
op->dir = AES_DIR_ENCRYPT;
358
359
ret = geode_aes_crypt(op);
360
nbytes -= ret;
361
err = blkcipher_walk_done(desc, &walk, nbytes);
362
}
363
364
return err;
365
}
366
367
static int fallback_init_blk(struct crypto_tfm *tfm)
368
{
369
const char *name = tfm->__crt_alg->cra_name;
370
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
371
372
op->fallback.blk = crypto_alloc_blkcipher(name, 0,
373
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
374
375
if (IS_ERR(op->fallback.blk)) {
376
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
377
return PTR_ERR(op->fallback.blk);
378
}
379
380
return 0;
381
}
382
383
static void fallback_exit_blk(struct crypto_tfm *tfm)
384
{
385
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
386
387
crypto_free_blkcipher(op->fallback.blk);
388
op->fallback.blk = NULL;
389
}
390
391
static struct crypto_alg geode_cbc_alg = {
392
.cra_name = "cbc(aes)",
393
.cra_driver_name = "cbc-aes-geode",
394
.cra_priority = 400,
395
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396
CRYPTO_ALG_NEED_FALLBACK,
397
.cra_init = fallback_init_blk,
398
.cra_exit = fallback_exit_blk,
399
.cra_blocksize = AES_MIN_BLOCK_SIZE,
400
.cra_ctxsize = sizeof(struct geode_aes_op),
401
.cra_alignmask = 15,
402
.cra_type = &crypto_blkcipher_type,
403
.cra_module = THIS_MODULE,
404
.cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
405
.cra_u = {
406
.blkcipher = {
407
.min_keysize = AES_MIN_KEY_SIZE,
408
.max_keysize = AES_MAX_KEY_SIZE,
409
.setkey = geode_setkey_blk,
410
.encrypt = geode_cbc_encrypt,
411
.decrypt = geode_cbc_decrypt,
412
.ivsize = AES_IV_LENGTH,
413
}
414
}
415
};
416
417
static int
418
geode_ecb_decrypt(struct blkcipher_desc *desc,
419
struct scatterlist *dst, struct scatterlist *src,
420
unsigned int nbytes)
421
{
422
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
423
struct blkcipher_walk walk;
424
int err, ret;
425
426
if (unlikely(op->keylen != AES_KEYSIZE_128))
427
return fallback_blk_dec(desc, dst, src, nbytes);
428
429
blkcipher_walk_init(&walk, dst, src, nbytes);
430
err = blkcipher_walk_virt(desc, &walk);
431
432
while ((nbytes = walk.nbytes)) {
433
op->src = walk.src.virt.addr,
434
op->dst = walk.dst.virt.addr;
435
op->mode = AES_MODE_ECB;
436
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
437
op->dir = AES_DIR_DECRYPT;
438
439
ret = geode_aes_crypt(op);
440
nbytes -= ret;
441
err = blkcipher_walk_done(desc, &walk, nbytes);
442
}
443
444
return err;
445
}
446
447
static int
448
geode_ecb_encrypt(struct blkcipher_desc *desc,
449
struct scatterlist *dst, struct scatterlist *src,
450
unsigned int nbytes)
451
{
452
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
453
struct blkcipher_walk walk;
454
int err, ret;
455
456
if (unlikely(op->keylen != AES_KEYSIZE_128))
457
return fallback_blk_enc(desc, dst, src, nbytes);
458
459
blkcipher_walk_init(&walk, dst, src, nbytes);
460
err = blkcipher_walk_virt(desc, &walk);
461
462
while ((nbytes = walk.nbytes)) {
463
op->src = walk.src.virt.addr,
464
op->dst = walk.dst.virt.addr;
465
op->mode = AES_MODE_ECB;
466
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
467
op->dir = AES_DIR_ENCRYPT;
468
469
ret = geode_aes_crypt(op);
470
nbytes -= ret;
471
ret = blkcipher_walk_done(desc, &walk, nbytes);
472
}
473
474
return err;
475
}
476
477
static struct crypto_alg geode_ecb_alg = {
478
.cra_name = "ecb(aes)",
479
.cra_driver_name = "ecb-aes-geode",
480
.cra_priority = 400,
481
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
482
CRYPTO_ALG_NEED_FALLBACK,
483
.cra_init = fallback_init_blk,
484
.cra_exit = fallback_exit_blk,
485
.cra_blocksize = AES_MIN_BLOCK_SIZE,
486
.cra_ctxsize = sizeof(struct geode_aes_op),
487
.cra_alignmask = 15,
488
.cra_type = &crypto_blkcipher_type,
489
.cra_module = THIS_MODULE,
490
.cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
491
.cra_u = {
492
.blkcipher = {
493
.min_keysize = AES_MIN_KEY_SIZE,
494
.max_keysize = AES_MAX_KEY_SIZE,
495
.setkey = geode_setkey_blk,
496
.encrypt = geode_ecb_encrypt,
497
.decrypt = geode_ecb_decrypt,
498
}
499
}
500
};
501
502
static void __devexit
503
geode_aes_remove(struct pci_dev *dev)
504
{
505
crypto_unregister_alg(&geode_alg);
506
crypto_unregister_alg(&geode_ecb_alg);
507
crypto_unregister_alg(&geode_cbc_alg);
508
509
pci_iounmap(dev, _iobase);
510
_iobase = NULL;
511
512
pci_release_regions(dev);
513
pci_disable_device(dev);
514
}
515
516
517
static int __devinit
518
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
519
{
520
int ret;
521
ret = pci_enable_device(dev);
522
if (ret)
523
return ret;
524
525
ret = pci_request_regions(dev, "geode-aes");
526
if (ret)
527
goto eenable;
528
529
_iobase = pci_iomap(dev, 0, 0);
530
531
if (_iobase == NULL) {
532
ret = -ENOMEM;
533
goto erequest;
534
}
535
536
spin_lock_init(&lock);
537
538
/* Clear any pending activity */
539
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
540
541
ret = crypto_register_alg(&geode_alg);
542
if (ret)
543
goto eiomap;
544
545
ret = crypto_register_alg(&geode_ecb_alg);
546
if (ret)
547
goto ealg;
548
549
ret = crypto_register_alg(&geode_cbc_alg);
550
if (ret)
551
goto eecb;
552
553
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
554
return 0;
555
556
eecb:
557
crypto_unregister_alg(&geode_ecb_alg);
558
559
ealg:
560
crypto_unregister_alg(&geode_alg);
561
562
eiomap:
563
pci_iounmap(dev, _iobase);
564
565
erequest:
566
pci_release_regions(dev);
567
568
eenable:
569
pci_disable_device(dev);
570
571
printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
572
return ret;
573
}
574
575
static struct pci_device_id geode_aes_tbl[] = {
576
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
577
{ 0, }
578
};
579
580
MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
581
582
static struct pci_driver geode_aes_driver = {
583
.name = "Geode LX AES",
584
.id_table = geode_aes_tbl,
585
.probe = geode_aes_probe,
586
.remove = __devexit_p(geode_aes_remove)
587
};
588
589
static int __init
590
geode_aes_init(void)
591
{
592
return pci_register_driver(&geode_aes_driver);
593
}
594
595
static void __exit
596
geode_aes_exit(void)
597
{
598
pci_unregister_driver(&geode_aes_driver);
599
}
600
601
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
602
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
603
MODULE_LICENSE("GPL");
604
605
module_init(geode_aes_init);
606
module_exit(geode_aes_exit);
607
608