Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/crypto/openssl/engines/e_padlock.c
34866 views
1
/*
2
* Copyright 2004-2023 The OpenSSL Project Authors. All Rights Reserved.
3
*
4
* Licensed under the Apache License 2.0 (the "License"). You may not use
5
* this file except in compliance with the License. You can obtain a copy
6
* in the file LICENSE in the source distribution or at
7
* https://www.openssl.org/source/license.html
8
*/
9
10
/*
11
* This file uses the low level AES and engine functions (which are deprecated
12
* for non-internal use) in order to implement the padlock engine AES ciphers.
13
*/
14
#define OPENSSL_SUPPRESS_DEPRECATED
15
16
#include <stdio.h>
17
#include <string.h>
18
19
#include <openssl/opensslconf.h>
20
#include <openssl/crypto.h>
21
#include <openssl/engine.h>
22
#include <openssl/evp.h>
23
#include <openssl/aes.h>
24
#include <openssl/rand.h>
25
#include <openssl/err.h>
26
#include <openssl/modes.h>
27
28
#ifndef OPENSSL_NO_PADLOCKENG
29
30
/*
31
* VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it
32
* doesn't exist elsewhere, but it even can't be compiled on other platforms!
33
*/
34
35
# undef COMPILE_PADLOCKENG
36
# if defined(PADLOCK_ASM)
37
# define COMPILE_PADLOCKENG
38
# ifdef OPENSSL_NO_DYNAMIC_ENGINE
39
static ENGINE *ENGINE_padlock(void);
40
# endif
41
# endif
42
43
# ifdef OPENSSL_NO_DYNAMIC_ENGINE
44
void engine_load_padlock_int(void);
45
void engine_load_padlock_int(void)
46
{
47
/* On non-x86 CPUs it just returns. */
48
# ifdef COMPILE_PADLOCKENG
49
ENGINE *toadd = ENGINE_padlock();
50
if (!toadd)
51
return;
52
ERR_set_mark();
53
ENGINE_add(toadd);
54
/*
55
* If the "add" worked, it gets a structural reference. So either way, we
56
* release our just-created reference.
57
*/
58
ENGINE_free(toadd);
59
/*
60
* If the "add" didn't work, it was probably a conflict because it was
61
* already added (eg. someone calling ENGINE_load_blah then calling
62
* ENGINE_load_builtin_engines() perhaps).
63
*/
64
ERR_pop_to_mark();
65
# endif
66
}
67
68
# endif
69
70
# ifdef COMPILE_PADLOCKENG
71
72
/* Function for ENGINE detection and control */
73
static int padlock_available(void);
74
static int padlock_init(ENGINE *e);
75
76
/* RNG Stuff */
77
static RAND_METHOD padlock_rand;
78
79
/* Cipher Stuff */
80
static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
81
const int **nids, int nid);
82
83
/* Engine names */
84
static const char *padlock_id = "padlock";
85
static char padlock_name[100];
86
87
/* Available features */
88
static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
89
static int padlock_use_rng = 0; /* Random Number Generator */
90
91
/* ===== Engine "management" functions ===== */
92
93
/* Prepare the ENGINE structure for registration */
94
static int padlock_bind_helper(ENGINE *e)
95
{
96
/* Check available features */
97
padlock_available();
98
99
/*
100
* RNG is currently disabled for reasons discussed in commentary just
101
* before padlock_rand_bytes function.
102
*/
103
padlock_use_rng = 0;
104
105
/* Generate a nice engine name with available features */
106
BIO_snprintf(padlock_name, sizeof(padlock_name),
107
"VIA PadLock (%s, %s)",
108
padlock_use_rng ? "RNG" : "no-RNG",
109
padlock_use_ace ? "ACE" : "no-ACE");
110
111
/* Register everything or return with an error */
112
if (!ENGINE_set_id(e, padlock_id) ||
113
!ENGINE_set_name(e, padlock_name) ||
114
!ENGINE_set_init_function(e, padlock_init) ||
115
(padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) ||
116
(padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) {
117
return 0;
118
}
119
120
/* Everything looks good */
121
return 1;
122
}
123
124
# ifdef OPENSSL_NO_DYNAMIC_ENGINE
125
/* Constructor */
126
static ENGINE *ENGINE_padlock(void)
127
{
128
ENGINE *eng = ENGINE_new();
129
130
if (eng == NULL) {
131
return NULL;
132
}
133
134
if (!padlock_bind_helper(eng)) {
135
ENGINE_free(eng);
136
return NULL;
137
}
138
139
return eng;
140
}
141
# endif
142
143
/* Check availability of the engine */
144
static int padlock_init(ENGINE *e)
145
{
146
return (padlock_use_rng || padlock_use_ace);
147
}
148
149
# ifndef AES_ASM
150
static int padlock_aes_set_encrypt_key(const unsigned char *userKey,
151
const int bits,
152
AES_KEY *key);
153
static int padlock_aes_set_decrypt_key(const unsigned char *userKey,
154
const int bits,
155
AES_KEY *key);
156
# define AES_ASM
157
# define AES_set_encrypt_key padlock_aes_set_encrypt_key
158
# define AES_set_decrypt_key padlock_aes_set_decrypt_key
159
# include "../crypto/aes/aes_core.c"
160
# endif
161
162
/*
163
* This stuff is needed if this ENGINE is being compiled into a
164
* self-contained shared-library.
165
*/
166
# ifndef OPENSSL_NO_DYNAMIC_ENGINE
167
static int padlock_bind_fn(ENGINE *e, const char *id)
168
{
169
if (id && (strcmp(id, padlock_id) != 0)) {
170
return 0;
171
}
172
173
if (!padlock_bind_helper(e)) {
174
return 0;
175
}
176
177
return 1;
178
}
179
180
IMPLEMENT_DYNAMIC_CHECK_FN()
181
IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn)
182
# endif /* !OPENSSL_NO_DYNAMIC_ENGINE */
183
/* ===== Here comes the "real" engine ===== */
184
185
/* Some AES-related constants */
186
# define AES_BLOCK_SIZE 16
187
# define AES_KEY_SIZE_128 16
188
# define AES_KEY_SIZE_192 24
189
# define AES_KEY_SIZE_256 32
190
/*
191
* Here we store the status information relevant to the current context.
192
*/
193
/*
194
* BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on
195
* the order of items in this structure. Don't blindly modify, reorder,
196
* etc!
197
*/
198
struct padlock_cipher_data {
199
unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
200
union {
201
unsigned int pad[4];
202
struct {
203
int rounds:4;
204
int dgst:1; /* n/a in C3 */
205
int align:1; /* n/a in C3 */
206
int ciphr:1; /* n/a in C3 */
207
unsigned int keygen:1;
208
int interm:1;
209
unsigned int encdec:1;
210
int ksize:2;
211
} b;
212
} cword; /* Control word */
213
AES_KEY ks; /* Encryption key */
214
};
215
216
/* Interface to assembler module */
217
unsigned int padlock_capability(void);
218
void padlock_key_bswap(AES_KEY *key);
219
void padlock_verify_context(struct padlock_cipher_data *ctx);
220
void padlock_reload_key(void);
221
void padlock_aes_block(void *out, const void *inp,
222
struct padlock_cipher_data *ctx);
223
int padlock_ecb_encrypt(void *out, const void *inp,
224
struct padlock_cipher_data *ctx, size_t len);
225
int padlock_cbc_encrypt(void *out, const void *inp,
226
struct padlock_cipher_data *ctx, size_t len);
227
int padlock_cfb_encrypt(void *out, const void *inp,
228
struct padlock_cipher_data *ctx, size_t len);
229
int padlock_ofb_encrypt(void *out, const void *inp,
230
struct padlock_cipher_data *ctx, size_t len);
231
int padlock_ctr32_encrypt(void *out, const void *inp,
232
struct padlock_cipher_data *ctx, size_t len);
233
int padlock_xstore(void *out, int edx);
234
void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
235
void padlock_sha1(void *ctx, const void *inp, size_t len);
236
void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
237
void padlock_sha256(void *ctx, const void *inp, size_t len);
238
239
/*
240
* Load supported features of the CPU to see if the PadLock is available.
241
*/
242
static int padlock_available(void)
243
{
244
unsigned int edx = padlock_capability();
245
246
/* Fill up some flags */
247
padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
248
padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
249
250
return padlock_use_ace + padlock_use_rng;
251
}
252
253
/* ===== AES encryption/decryption ===== */
254
255
# if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
256
# define NID_aes_128_cfb NID_aes_128_cfb128
257
# endif
258
259
# if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
260
# define NID_aes_128_ofb NID_aes_128_ofb128
261
# endif
262
263
# if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
264
# define NID_aes_192_cfb NID_aes_192_cfb128
265
# endif
266
267
# if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
268
# define NID_aes_192_ofb NID_aes_192_ofb128
269
# endif
270
271
# if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
272
# define NID_aes_256_cfb NID_aes_256_cfb128
273
# endif
274
275
# if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
276
# define NID_aes_256_ofb NID_aes_256_ofb128
277
# endif
278
279
/* List of supported ciphers. */
280
static const int padlock_cipher_nids[] = {
281
NID_aes_128_ecb,
282
NID_aes_128_cbc,
283
NID_aes_128_cfb,
284
NID_aes_128_ofb,
285
NID_aes_128_ctr,
286
287
NID_aes_192_ecb,
288
NID_aes_192_cbc,
289
NID_aes_192_cfb,
290
NID_aes_192_ofb,
291
NID_aes_192_ctr,
292
293
NID_aes_256_ecb,
294
NID_aes_256_cbc,
295
NID_aes_256_cfb,
296
NID_aes_256_ofb,
297
NID_aes_256_ctr
298
};
299
300
static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) /
301
sizeof(padlock_cipher_nids[0]));
302
303
/* Function prototypes ... */
304
static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
305
const unsigned char *iv, int enc);
306
307
# define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
308
( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
309
# define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
310
NEAREST_ALIGNED(EVP_CIPHER_CTX_get_cipher_data(ctx)))
311
312
static int
313
padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
314
const unsigned char *in_arg, size_t nbytes)
315
{
316
return padlock_ecb_encrypt(out_arg, in_arg,
317
ALIGNED_CIPHER_DATA(ctx), nbytes);
318
}
319
320
static int
321
padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
322
const unsigned char *in_arg, size_t nbytes)
323
{
324
struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
325
int ret;
326
327
memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
328
if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes)))
329
memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
330
return ret;
331
}
332
333
static int
334
padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
335
const unsigned char *in_arg, size_t nbytes)
336
{
337
struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
338
size_t chunk;
339
340
if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */
341
unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
342
343
if (chunk >= AES_BLOCK_SIZE)
344
return 0; /* bogus value */
345
346
if (EVP_CIPHER_CTX_is_encrypting(ctx))
347
while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
348
ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
349
chunk++, nbytes--;
350
} else
351
while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
352
unsigned char c = *(in_arg++);
353
*(out_arg++) = c ^ ivp[chunk];
354
ivp[chunk++] = c, nbytes--;
355
}
356
357
EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
358
}
359
360
if (nbytes == 0)
361
return 1;
362
363
memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
364
365
if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
366
if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk))
367
return 0;
368
nbytes -= chunk;
369
}
370
371
if (nbytes) {
372
unsigned char *ivp = cdata->iv;
373
374
out_arg += chunk;
375
in_arg += chunk;
376
EVP_CIPHER_CTX_set_num(ctx, nbytes);
377
if (cdata->cword.b.encdec) {
378
cdata->cword.b.encdec = 0;
379
padlock_reload_key();
380
padlock_aes_block(ivp, ivp, cdata);
381
cdata->cword.b.encdec = 1;
382
padlock_reload_key();
383
while (nbytes) {
384
unsigned char c = *(in_arg++);
385
*(out_arg++) = c ^ *ivp;
386
*(ivp++) = c, nbytes--;
387
}
388
} else {
389
padlock_reload_key();
390
padlock_aes_block(ivp, ivp, cdata);
391
padlock_reload_key();
392
while (nbytes) {
393
*ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
394
ivp++, nbytes--;
395
}
396
}
397
}
398
399
memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
400
401
return 1;
402
}
403
404
static int
405
padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
406
const unsigned char *in_arg, size_t nbytes)
407
{
408
struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
409
size_t chunk;
410
411
/*
412
* ctx->num is maintained in byte-oriented modes, such as CFB and OFB...
413
*/
414
if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */
415
unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
416
417
if (chunk >= AES_BLOCK_SIZE)
418
return 0; /* bogus value */
419
420
while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
421
*(out_arg++) = *(in_arg++) ^ ivp[chunk];
422
chunk++, nbytes--;
423
}
424
425
EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
426
}
427
428
if (nbytes == 0)
429
return 1;
430
431
memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
432
433
if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
434
if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk))
435
return 0;
436
nbytes -= chunk;
437
}
438
439
if (nbytes) {
440
unsigned char *ivp = cdata->iv;
441
442
out_arg += chunk;
443
in_arg += chunk;
444
EVP_CIPHER_CTX_set_num(ctx, nbytes);
445
padlock_reload_key(); /* empirically found */
446
padlock_aes_block(ivp, ivp, cdata);
447
padlock_reload_key(); /* empirically found */
448
while (nbytes) {
449
*(out_arg++) = *(in_arg++) ^ *ivp;
450
ivp++, nbytes--;
451
}
452
}
453
454
memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
455
456
return 1;
457
}
458
459
static void padlock_ctr32_encrypt_glue(const unsigned char *in,
460
unsigned char *out, size_t blocks,
461
struct padlock_cipher_data *ctx,
462
const unsigned char *ivec)
463
{
464
memcpy(ctx->iv, ivec, AES_BLOCK_SIZE);
465
padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks);
466
}
467
468
static int
469
padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
470
const unsigned char *in_arg, size_t nbytes)
471
{
472
struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
473
int n = EVP_CIPHER_CTX_get_num(ctx);
474
unsigned int num;
475
476
if (n < 0)
477
return 0;
478
num = (unsigned int)n;
479
480
CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes,
481
cdata, EVP_CIPHER_CTX_iv_noconst(ctx),
482
EVP_CIPHER_CTX_buf_noconst(ctx), &num,
483
(ctr128_f) padlock_ctr32_encrypt_glue);
484
485
EVP_CIPHER_CTX_set_num(ctx, (size_t)num);
486
return 1;
487
}
488
489
# define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
490
# define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
491
# define EVP_CIPHER_block_size_OFB 1
492
# define EVP_CIPHER_block_size_CFB 1
493
# define EVP_CIPHER_block_size_CTR 1
494
495
/*
496
* Declaring so many ciphers by hand would be a pain. Instead introduce a bit
497
* of preprocessor magic :-)
498
*/
499
# define DECLARE_AES_EVP(ksize,lmode,umode) \
500
static EVP_CIPHER *_hidden_aes_##ksize##_##lmode = NULL; \
501
static const EVP_CIPHER *padlock_aes_##ksize##_##lmode(void) \
502
{ \
503
if (_hidden_aes_##ksize##_##lmode == NULL \
504
&& ((_hidden_aes_##ksize##_##lmode = \
505
EVP_CIPHER_meth_new(NID_aes_##ksize##_##lmode, \
506
EVP_CIPHER_block_size_##umode, \
507
AES_KEY_SIZE_##ksize)) == NULL \
508
|| !EVP_CIPHER_meth_set_iv_length(_hidden_aes_##ksize##_##lmode, \
509
AES_BLOCK_SIZE) \
510
|| !EVP_CIPHER_meth_set_flags(_hidden_aes_##ksize##_##lmode, \
511
0 | EVP_CIPH_##umode##_MODE) \
512
|| !EVP_CIPHER_meth_set_init(_hidden_aes_##ksize##_##lmode, \
513
padlock_aes_init_key) \
514
|| !EVP_CIPHER_meth_set_do_cipher(_hidden_aes_##ksize##_##lmode, \
515
padlock_##lmode##_cipher) \
516
|| !EVP_CIPHER_meth_set_impl_ctx_size(_hidden_aes_##ksize##_##lmode, \
517
sizeof(struct padlock_cipher_data) + 16) \
518
|| !EVP_CIPHER_meth_set_set_asn1_params(_hidden_aes_##ksize##_##lmode, \
519
EVP_CIPHER_set_asn1_iv) \
520
|| !EVP_CIPHER_meth_set_get_asn1_params(_hidden_aes_##ksize##_##lmode, \
521
EVP_CIPHER_get_asn1_iv))) { \
522
EVP_CIPHER_meth_free(_hidden_aes_##ksize##_##lmode); \
523
_hidden_aes_##ksize##_##lmode = NULL; \
524
} \
525
return _hidden_aes_##ksize##_##lmode; \
526
}
527
528
DECLARE_AES_EVP(128, ecb, ECB)
529
DECLARE_AES_EVP(128, cbc, CBC)
530
DECLARE_AES_EVP(128, cfb, CFB)
531
DECLARE_AES_EVP(128, ofb, OFB)
532
DECLARE_AES_EVP(128, ctr, CTR)
533
534
DECLARE_AES_EVP(192, ecb, ECB)
535
DECLARE_AES_EVP(192, cbc, CBC)
536
DECLARE_AES_EVP(192, cfb, CFB)
537
DECLARE_AES_EVP(192, ofb, OFB)
538
DECLARE_AES_EVP(192, ctr, CTR)
539
540
DECLARE_AES_EVP(256, ecb, ECB)
541
DECLARE_AES_EVP(256, cbc, CBC)
542
DECLARE_AES_EVP(256, cfb, CFB)
543
DECLARE_AES_EVP(256, ofb, OFB)
544
DECLARE_AES_EVP(256, ctr, CTR)
545
546
static int
547
padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids,
548
int nid)
549
{
550
/* No specific cipher => return a list of supported nids ... */
551
if (!cipher) {
552
*nids = padlock_cipher_nids;
553
return padlock_cipher_nids_num;
554
}
555
556
/* ... or the requested "cipher" otherwise */
557
switch (nid) {
558
case NID_aes_128_ecb:
559
*cipher = padlock_aes_128_ecb();
560
break;
561
case NID_aes_128_cbc:
562
*cipher = padlock_aes_128_cbc();
563
break;
564
case NID_aes_128_cfb:
565
*cipher = padlock_aes_128_cfb();
566
break;
567
case NID_aes_128_ofb:
568
*cipher = padlock_aes_128_ofb();
569
break;
570
case NID_aes_128_ctr:
571
*cipher = padlock_aes_128_ctr();
572
break;
573
574
case NID_aes_192_ecb:
575
*cipher = padlock_aes_192_ecb();
576
break;
577
case NID_aes_192_cbc:
578
*cipher = padlock_aes_192_cbc();
579
break;
580
case NID_aes_192_cfb:
581
*cipher = padlock_aes_192_cfb();
582
break;
583
case NID_aes_192_ofb:
584
*cipher = padlock_aes_192_ofb();
585
break;
586
case NID_aes_192_ctr:
587
*cipher = padlock_aes_192_ctr();
588
break;
589
590
case NID_aes_256_ecb:
591
*cipher = padlock_aes_256_ecb();
592
break;
593
case NID_aes_256_cbc:
594
*cipher = padlock_aes_256_cbc();
595
break;
596
case NID_aes_256_cfb:
597
*cipher = padlock_aes_256_cfb();
598
break;
599
case NID_aes_256_ofb:
600
*cipher = padlock_aes_256_ofb();
601
break;
602
case NID_aes_256_ctr:
603
*cipher = padlock_aes_256_ctr();
604
break;
605
606
default:
607
/* Sorry, we don't support this NID */
608
*cipher = NULL;
609
return 0;
610
}
611
612
return 1;
613
}
614
615
/* Prepare the encryption key for PadLock usage */
616
static int
617
padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
618
const unsigned char *iv, int enc)
619
{
620
struct padlock_cipher_data *cdata;
621
int key_len = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
622
unsigned long mode = EVP_CIPHER_CTX_get_mode(ctx);
623
624
if (key == NULL)
625
return 0; /* ERROR */
626
627
cdata = ALIGNED_CIPHER_DATA(ctx);
628
memset(cdata, 0, sizeof(*cdata));
629
630
/* Prepare Control word. */
631
if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE)
632
cdata->cword.b.encdec = 0;
633
else
634
cdata->cword.b.encdec = (EVP_CIPHER_CTX_is_encrypting(ctx) == 0);
635
cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
636
cdata->cword.b.ksize = (key_len - 128) / 64;
637
638
switch (key_len) {
639
case 128:
640
/*
641
* PadLock can generate an extended key for AES128 in hardware
642
*/
643
memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
644
cdata->cword.b.keygen = 0;
645
break;
646
647
case 192:
648
case 256:
649
/*
650
* Generate an extended AES key in software. Needed for AES192/AES256
651
*/
652
/*
653
* Well, the above applies to Stepping 8 CPUs and is listed as
654
* hardware errata. They most likely will fix it at some point and
655
* then a check for stepping would be due here.
656
*/
657
if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
658
&& !enc)
659
AES_set_decrypt_key(key, key_len, &cdata->ks);
660
else
661
AES_set_encrypt_key(key, key_len, &cdata->ks);
662
/*
663
* OpenSSL C functions use byte-swapped extended key.
664
*/
665
padlock_key_bswap(&cdata->ks);
666
cdata->cword.b.keygen = 1;
667
break;
668
669
default:
670
/* ERROR */
671
return 0;
672
}
673
674
/*
675
* This is done to cover for cases when user reuses the
676
* context for new key. The catch is that if we don't do
677
* this, padlock_eas_cipher might proceed with old key...
678
*/
679
padlock_reload_key();
680
681
return 1;
682
}
683
684
/* ===== Random Number Generator ===== */
685
/*
686
* This code is not engaged. The reason is that it does not comply
687
* with recommendations for VIA RNG usage for secure applications
688
* (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
689
* provide meaningful error control...
690
*/
691
/*
692
* Wrapper that provides an interface between the API and the raw PadLock
693
* RNG
694
*/
695
static int padlock_rand_bytes(unsigned char *output, int count)
696
{
697
unsigned int eax, buf;
698
699
while (count >= 8) {
700
eax = padlock_xstore(output, 0);
701
if (!(eax & (1 << 6)))
702
return 0; /* RNG disabled */
703
/* this ---vv--- covers DC bias, Raw Bits and String Filter */
704
if (eax & (0x1F << 10))
705
return 0;
706
if ((eax & 0x1F) == 0)
707
continue; /* no data, retry... */
708
if ((eax & 0x1F) != 8)
709
return 0; /* fatal failure... */
710
output += 8;
711
count -= 8;
712
}
713
while (count > 0) {
714
eax = padlock_xstore(&buf, 3);
715
if (!(eax & (1 << 6)))
716
return 0; /* RNG disabled */
717
/* this ---vv--- covers DC bias, Raw Bits and String Filter */
718
if (eax & (0x1F << 10))
719
return 0;
720
if ((eax & 0x1F) == 0)
721
continue; /* no data, retry... */
722
if ((eax & 0x1F) != 1)
723
return 0; /* fatal failure... */
724
*output++ = (unsigned char)buf;
725
count--;
726
}
727
OPENSSL_cleanse(&buf, sizeof(buf));
728
729
return 1;
730
}
731
732
/* Dummy but necessary function */
733
static int padlock_rand_status(void)
734
{
735
return 1;
736
}
737
738
/* Prepare structure for registration */
739
static RAND_METHOD padlock_rand = {
740
NULL, /* seed */
741
padlock_rand_bytes, /* bytes */
742
NULL, /* cleanup */
743
NULL, /* add */
744
padlock_rand_bytes, /* pseudorand */
745
padlock_rand_status, /* rand status */
746
};
747
748
# endif /* COMPILE_PADLOCKENG */
749
#endif /* !OPENSSL_NO_PADLOCKENG */
750
751
#if defined(OPENSSL_NO_PADLOCKENG) || !defined(COMPILE_PADLOCKENG)
752
# ifndef OPENSSL_NO_DYNAMIC_ENGINE
753
OPENSSL_EXPORT
754
int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
755
OPENSSL_EXPORT
756
int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns)
757
{
758
return 0;
759
}
760
761
IMPLEMENT_DYNAMIC_CHECK_FN()
762
# endif
763
#endif
764
765