Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
CTCaer
GitHub Repository: CTCaer/hekate
Path: blob/master/bdk/sec/se.c
3694 views
1
/*
2
* Copyright (c) 2018 naehrwert
3
* Copyright (c) 2018-2026 CTCaer
4
*
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms and conditions of the GNU General Public License,
7
* version 2, as published by the Free Software Foundation.
8
*
9
* This program is distributed in the hope it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
* more details.
13
*
14
* You should have received a copy of the GNU General Public License
15
* along with this program. If not, see <http://www.gnu.org/licenses/>.
16
*/
17
18
#include <string.h>
19
20
#include "se.h"
21
#include <memory_map.h>
22
#include <soc/bpmp.h>
23
#include <soc/hw_init.h>
24
#include <soc/pmc.h>
25
#include <soc/timer.h>
26
#include <soc/t210.h>
27
28
typedef struct _se_ll_t
29
{
30
u32 num;
31
u32 addr;
32
u32 size;
33
} se_ll_t;
34
35
se_ll_t ll_src, ll_dst; // Must be u32 aligned.
36
se_ll_t *ll_src_ptr, *ll_dst_ptr;
37
38
static void _se_ls_1bit(void *buf)
39
{
40
u8 *block = (u8 *)buf;
41
u32 carry = 0;
42
43
for (int i = SE_AES_BLOCK_SIZE - 1; i >= 0; i--)
44
{
45
u8 b = block[i];
46
block[i] = (b << 1) | carry;
47
carry = b >> 7;
48
}
49
50
if (carry)
51
block[SE_AES_BLOCK_SIZE - 1] ^= 0x87;
52
}
53
54
static void _se_ls_1bit_le(void *buf)
55
{
56
u32 *block = (u32 *)buf;
57
u32 carry = 0;
58
59
for (u32 i = 0; i < 4; i++)
60
{
61
u32 b = block[i];
62
block[i] = (b << 1) | carry;
63
carry = b >> 31;
64
}
65
66
if (carry)
67
block[0x0] ^= 0x87;
68
}
69
70
static void _se_ll_set(se_ll_t *ll, u32 addr, u32 size)
71
{
72
ll->num = 0;
73
ll->addr = addr;
74
ll->size = size & 0xFFFFFF;
75
}
76
77
static int _se_op_wait()
78
{
79
bool tegra_t210 = hw_get_chip_id() == GP_HIDREV_MAJOR_T210;
80
81
// Wait for operation to be done.
82
while (!(SE(SE_INT_STATUS_REG) & SE_INT_OP_DONE))
83
;
84
85
// Check for errors.
86
if ((SE(SE_INT_STATUS_REG) & SE_INT_ERR_STAT) ||
87
(SE(SE_STATUS_REG) & SE_STATUS_STATE_MASK) != SE_STATUS_STATE_IDLE ||
88
(SE(SE_ERR_STATUS_REG) != 0)
89
)
90
{
91
return 1;
92
}
93
94
// WAR: Coherency flushing.
95
if (ll_dst_ptr)
96
{
97
// Ensure data is out from SE.
98
if (tegra_t210)
99
usleep(15); // Worst case scenario.
100
else
101
{
102
// T210B01 has a status bit for that.
103
u32 retries = 500000;
104
while (SE(SE_STATUS_REG) & SE_STATUS_MEM_IF_BUSY)
105
{
106
if (!retries)
107
return 1;
108
usleep(1);
109
retries--;
110
}
111
}
112
113
// Ensure data is out from AHB.
114
u32 retries = 500000;
115
while (AHB_GIZMO(AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID) & MEM_WRQUE_SE_MST_ID)
116
{
117
if (!retries)
118
return 1;
119
usleep(1);
120
retries--;
121
}
122
}
123
124
return 0;
125
}
126
127
static int _se_execute_finalize()
128
{
129
int res = _se_op_wait();
130
131
// Invalidate data after OP is done.
132
bpmp_mmu_maintenance(BPMP_MMU_MAINT_INVALID_WAY, false);
133
134
return res;
135
}
136
137
static int _se_execute(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size, bool is_oneshot)
138
{
139
if (dst_size > SE_LL_MAX_SIZE || src_size > SE_LL_MAX_SIZE)
140
return 1;
141
142
ll_src_ptr = NULL;
143
ll_dst_ptr = NULL;
144
145
if (src)
146
{
147
ll_src_ptr = &ll_src;
148
_se_ll_set(ll_src_ptr, (u32)src, src_size);
149
}
150
151
if (dst)
152
{
153
ll_dst_ptr = &ll_dst;
154
_se_ll_set(ll_dst_ptr, (u32)dst, dst_size);
155
}
156
157
// Set linked list pointers.
158
SE(SE_IN_LL_ADDR_REG) = (u32)ll_src_ptr;
159
SE(SE_OUT_LL_ADDR_REG) = (u32)ll_dst_ptr;
160
161
// Clear status.
162
SE(SE_ERR_STATUS_REG) = SE(SE_ERR_STATUS_REG);
163
SE(SE_INT_STATUS_REG) = SE(SE_INT_STATUS_REG);
164
165
// Flush data before starting OP.
166
bpmp_mmu_maintenance(BPMP_MMU_MAINT_CLEAN_WAY, false);
167
168
SE(SE_OPERATION_REG) = op;
169
170
if (is_oneshot)
171
return _se_execute_finalize();
172
173
return 0;
174
}
175
176
static int _se_execute_oneshot(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size)
177
{
178
return _se_execute(op, dst, dst_size, src, src_size, true);
179
}
180
181
static int _se_execute_aes_oneshot(void *dst, const void *src, u32 size)
182
{
183
// Set optional memory interface.
184
if (dst >= (void *)DRAM_START && src >= (void *)DRAM_START)
185
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_MEMIF(MEMIF_MCCIF);
186
187
u32 size_aligned = ALIGN_DOWN(size, SE_AES_BLOCK_SIZE);
188
u32 size_residue = size % SE_AES_BLOCK_SIZE;
189
int res = 0;
190
191
// Handle initial aligned message.
192
if (size_aligned)
193
{
194
SE(SE_CRYPTO_LAST_BLOCK_REG) = (size >> 4) - 1;
195
196
res = _se_execute_oneshot(SE_OP_START, dst, size_aligned, src, size_aligned);
197
}
198
199
// Handle leftover partial message.
200
if (!res && size_residue)
201
{
202
// Copy message to a block sized buffer in case it's partial.
203
u32 block[SE_AES_BLOCK_SIZE / sizeof(u32)] = {0};
204
memcpy(block, src + size_aligned, size_residue);
205
206
// Use updated IV for CBC and OFB. Ignored on others.
207
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_IV_SEL(IV_UPDATED);
208
209
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
210
211
res = _se_execute_oneshot(SE_OP_START, block, SE_AES_BLOCK_SIZE, block, SE_AES_BLOCK_SIZE);
212
213
// Copy result back.
214
memcpy(dst + size_aligned, block, size_residue);
215
}
216
217
return res;
218
}
219
220
static void _se_aes_counter_set(const void *ctr)
221
{
222
u32 data[SE_AES_IV_SIZE / sizeof(u32)];
223
memcpy(data, ctr, SE_AES_IV_SIZE);
224
225
for (u32 i = 0; i < SE_CRYPTO_LINEAR_CTR_REG_COUNT; i++)
226
SE(SE_CRYPTO_LINEAR_CTR_REG + sizeof(u32) * i) = data[i];
227
}
228
229
void se_rsa_acc_ctrl(u32 rs, u32 flags)
230
{
231
if (flags & SE_RSA_KEY_TBL_DIS_KEY_ACCESS_FLAG)
232
SE(SE_RSA_KEYTABLE_ACCESS_REG + sizeof(u32) * rs) =
233
(((flags >> 4) & SE_RSA_KEY_TBL_DIS_KEYUSE_FLAG) | (flags & SE_RSA_KEY_TBL_DIS_KEY_READ_UPDATE_FLAG)) ^
234
SE_RSA_KEY_TBL_DIS_KEY_READ_UPDATE_USE_FLAG;
235
if (flags & SE_RSA_KEY_LOCK_FLAG)
236
SE(SE_RSA_SECURITY_PERKEY_REG) &= ~BIT(rs);
237
}
238
239
void se_key_acc_ctrl(u32 ks, u32 flags)
240
{
241
if (flags & SE_KEY_TBL_DIS_KEY_ACCESS_FLAG)
242
SE(SE_CRYPTO_KEYTABLE_ACCESS_REG + sizeof(u32) * ks) = ~flags;
243
if (flags & SE_KEY_LOCK_FLAG)
244
SE(SE_CRYPTO_SECURITY_PERKEY_REG) &= ~BIT(ks);
245
}
246
247
u32 se_key_acc_ctrl_get(u32 ks)
248
{
249
return SE(SE_CRYPTO_KEYTABLE_ACCESS_REG + sizeof(u32) * ks);
250
}
251
252
void se_aes_key_set(u32 ks, const void *key, u32 size)
253
{
254
u32 data[SE_AES_MAX_KEY_SIZE / sizeof(u32)];
255
memcpy(data, key, size);
256
257
for (u32 i = 0; i < (size / sizeof(u32)); i++)
258
{
259
// QUAD KEYS_4_7 bit is automatically set by PKT macro.
260
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(KEYS_0_3) | SE_KEYTABLE_PKT(i);
261
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = data[i];
262
}
263
}
264
265
void se_aes_iv_set(u32 ks, const void *iv, u32 size)
266
{
267
u32 data[SE_AES_MAX_KEY_SIZE / sizeof(u32)];
268
memcpy(data, iv, size);
269
270
for (u32 i = 0; i < (size / sizeof(u32)); i++)
271
{
272
// QUAD UPDATED_IV bit is automatically set by PKT macro.
273
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(ORIGINAL_IV) | SE_KEYTABLE_PKT(i);
274
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = data[i];
275
}
276
}
277
278
void se_aes_key_get(u32 ks, void *key, u32 size)
279
{
280
u32 data[SE_AES_MAX_KEY_SIZE / sizeof(u32)];
281
282
for (u32 i = 0; i < (size / sizeof(u32)); i++)
283
{
284
// QUAD KEYS_4_7 bit is automatically set by PKT macro.
285
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(KEYS_0_3) | SE_KEYTABLE_PKT(i);
286
data[i] = SE(SE_CRYPTO_KEYTABLE_DATA_REG);
287
}
288
289
memcpy(key, data, size);
290
}
291
292
void se_aes_key_clear(u32 ks)
293
{
294
for (u32 i = 0; i < (SE_AES_MAX_KEY_SIZE / sizeof(u32)); i++)
295
{
296
// QUAD KEYS_4_7 bit is automatically set by PKT macro.
297
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(KEYS_0_3) | SE_KEYTABLE_PKT(i);
298
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
299
}
300
}
301
302
void se_aes_iv_clear(u32 ks)
303
{
304
for (u32 i = 0; i < (SE_AES_MAX_KEY_SIZE / sizeof(u32)); i++)
305
{
306
// QUAD UPDATED_IV bit is automatically set by PKT macro.
307
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(ORIGINAL_IV) | SE_KEYTABLE_PKT(i);
308
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
309
}
310
}
311
312
int se_aes_unwrap_key(u32 ks_dst, u32 ks_src, const void *seed)
313
{
314
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_MODE(MODE_KEY128) | SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_KEYTABLE);
315
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks_src) | SE_CRYPTO_CORE_SEL(CORE_DECRYPT);
316
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
317
SE(SE_CRYPTO_KEYTABLE_DST_REG) = SE_KEYTABLE_DST_KEY_INDEX(ks_dst) | SE_KEYTABLE_DST_WORD_QUAD(KEYS_0_3);
318
319
return _se_execute_oneshot(SE_OP_START, NULL, 0, seed, SE_KEY_128_SIZE);
320
}
321
322
int se_aes_crypt_ecb(u32 ks, int enc, void *dst, const void *src, u32 size)
323
{
324
if (enc)
325
{
326
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
327
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
328
SE_CRYPTO_XOR_POS(XOR_BYPASS);
329
}
330
else
331
{
332
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_MODE(MODE_KEY128) | SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
333
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_DECRYPT) |
334
SE_CRYPTO_XOR_POS(XOR_BYPASS);
335
}
336
337
return _se_execute_aes_oneshot(dst, src, size);
338
}
339
340
int se_aes_crypt_cbc(u32 ks, int enc, void *dst, const void *src, u32 size)
341
{
342
if (enc)
343
{
344
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
345
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) |
346
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_XOR_POS(XOR_TOP);
347
}
348
else
349
{
350
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_MODE(MODE_KEY128) | SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
351
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVMEM) |
352
SE_CRYPTO_CORE_SEL(CORE_DECRYPT) | SE_CRYPTO_XOR_POS(XOR_BOTTOM);
353
}
354
355
return _se_execute_aes_oneshot(dst, src, size);
356
}
357
358
int se_aes_crypt_ofb(u32 ks, void *dst, const void *src, u32 size)
359
{
360
SE(SE_SPARE_REG) = SE_INPUT_NONCE_LE;
361
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
362
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_INPUT_SEL(INPUT_AESOUT) |
363
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_XOR_POS(XOR_BOTTOM);
364
365
return _se_execute_aes_oneshot(dst, src, size);
366
}
367
368
int se_aes_crypt_ctr(u32 ks, void *dst, const void *src, u32 size, void *ctr)
369
{
370
SE(SE_SPARE_REG) = SE_INPUT_NONCE_LE;
371
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
372
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
373
SE_CRYPTO_XOR_POS(XOR_BOTTOM) | SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) |
374
SE_CRYPTO_CTR_CNTN(1);
375
376
_se_aes_counter_set(ctr);
377
378
return _se_execute_aes_oneshot(dst, src, size);
379
}
380
381
int se_aes_crypt_xts_sec(u32 tweak_ks, u32 crypt_ks, int enc, u64 sec, void *dst, void *src, u32 secsize)
382
{
383
u32 tmp[SE_AES_BLOCK_SIZE / sizeof(u32)];
384
u8 *tweak = (u8 *)tmp;
385
u8 *pdst = (u8 *)dst;
386
u8 *psrc = (u8 *)src;
387
388
// Generate tweak.
389
for (int i = SE_AES_BLOCK_SIZE - 1; i >= 0; i--)
390
{
391
tweak[i] = sec & 0xFF;
392
sec >>= 8;
393
}
394
if (se_aes_crypt_ecb(tweak_ks, ENCRYPT, tweak, tweak, SE_AES_BLOCK_SIZE))
395
return 1;
396
397
// We are assuming a 0x10-aligned sector size in this implementation.
398
for (u32 i = 0; i < secsize / SE_AES_BLOCK_SIZE; i++)
399
{
400
for (u32 j = 0; j < SE_AES_BLOCK_SIZE; j++)
401
pdst[j] = psrc[j] ^ tweak[j];
402
403
if (se_aes_crypt_ecb(crypt_ks, enc, pdst, pdst, SE_AES_BLOCK_SIZE))
404
return 1;
405
406
for (u32 j = 0; j < SE_AES_BLOCK_SIZE; j++)
407
pdst[j] = pdst[j] ^ tweak[j];
408
409
_se_ls_1bit(tweak);
410
psrc += SE_AES_BLOCK_SIZE;
411
pdst += SE_AES_BLOCK_SIZE;
412
}
413
414
return 0;
415
}
416
417
int se_aes_crypt_xts_sec_nx(u32 tweak_ks, u32 crypt_ks, int enc, u64 sec, u8 *tweak, bool regen_tweak, u32 tweak_exp, void *dst, void *src, u32 sec_size)
418
{
419
u32 *pdst = (u32 *)dst;
420
u32 *psrc = (u32 *)src;
421
u32 *ptweak = (u32 *)tweak;
422
423
if (regen_tweak)
424
{
425
for (int i = SE_AES_BLOCK_SIZE - 1; i >= 0; i--)
426
{
427
tweak[i] = sec & 0xFF;
428
sec >>= 8;
429
}
430
if (se_aes_crypt_ecb(tweak_ks, ENCRYPT, tweak, tweak, SE_AES_BLOCK_SIZE))
431
return 1;
432
}
433
434
// tweak_exp allows using a saved tweak to reduce _se_ls_1bit_le calls.
435
for (u32 i = 0; i < (tweak_exp << 5); i++)
436
_se_ls_1bit_le(tweak);
437
438
u8 orig_tweak[SE_KEY_128_SIZE] __attribute__((aligned(4)));
439
memcpy(orig_tweak, tweak, SE_KEY_128_SIZE);
440
441
// We are assuming a 16 sector aligned size in this implementation.
442
for (u32 i = 0; i < (sec_size >> 4); i++)
443
{
444
for (u32 j = 0; j < (SE_AES_BLOCK_SIZE / sizeof(u32)); j++)
445
pdst[j] = psrc[j] ^ ptweak[j];
446
447
_se_ls_1bit_le(tweak);
448
psrc += sizeof(u32);
449
pdst += sizeof(u32);
450
}
451
452
if (se_aes_crypt_ecb(crypt_ks, enc, dst, dst, sec_size))
453
return 1;
454
455
pdst = (u32 *)dst;
456
ptweak = (u32 *)orig_tweak;
457
for (u32 i = 0; i < (sec_size >> 4); i++)
458
{
459
for (u32 j = 0; j < (SE_AES_BLOCK_SIZE / sizeof(u32)); j++)
460
pdst[j] = pdst[j] ^ ptweak[j];
461
462
_se_ls_1bit_le(orig_tweak);
463
pdst += sizeof(u32);
464
}
465
466
return 0;
467
}
468
469
int se_aes_crypt_xts(u32 tweak_ks, u32 crypt_ks, int enc, u64 sec, void *dst, void *src, u32 secsize, u32 num_secs)
470
{
471
u8 *pdst = (u8 *)dst;
472
u8 *psrc = (u8 *)src;
473
474
for (u32 i = 0; i < num_secs; i++)
475
if (se_aes_crypt_xts_sec(tweak_ks, crypt_ks, enc, sec + i, pdst + secsize * i, psrc + secsize * i, secsize))
476
return 1;
477
478
return 0;
479
}
480
481
static void _se_sha_hash_256_get_hash(void *hash)
482
{
483
// Copy output hash.
484
u32 hash32[SE_SHA_256_SIZE / sizeof(u32)];
485
for (u32 i = 0; i < (SE_SHA_256_SIZE / sizeof(u32)); i++)
486
hash32[i] = byte_swap_32(SE(SE_HASH_RESULT_REG + sizeof(u32) * i));
487
memcpy(hash, hash32, SE_SHA_256_SIZE);
488
}
489
490
static int _se_sha_hash_256(void *hash, u64 total_size, const void *src, u32 src_size, bool is_oneshot)
491
{
492
// Src size of 0 is not supported, so return null string sha256.
493
if (!src_size)
494
{
495
const u8 null_hash[SE_SHA_256_SIZE] = {
496
0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, 0x1C, 0x14, 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24,
497
0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C, 0xA4, 0x95, 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55
498
};
499
memcpy(hash, null_hash, SE_SHA_256_SIZE);
500
return 0;
501
}
502
503
// Increase leftover size if not last message. (Engine will always stop at src_size.)
504
u32 msg_left = src_size;
505
if (total_size < src_size)
506
msg_left++;
507
508
// Setup config for SHA256.
509
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_SHA256) | SE_CONFIG_ENC_ALG(ALG_SHA) | SE_CONFIG_DST(DST_HASHREG);
510
511
// Set total size: BITS(total_size), up to 2 EB.
512
SE(SE_SHA_MSG_LENGTH_0_REG) = (u32)(total_size << 3);
513
SE(SE_SHA_MSG_LENGTH_1_REG) = (u32)(total_size >> 29);
514
SE(SE_SHA_MSG_LENGTH_2_REG) = 0;
515
SE(SE_SHA_MSG_LENGTH_3_REG) = 0;
516
517
// Set leftover size: BITS(src_size).
518
SE(SE_SHA_MSG_LEFT_0_REG) = (u32)(msg_left << 3);
519
SE(SE_SHA_MSG_LEFT_1_REG) = (u32)(msg_left >> 29);
520
SE(SE_SHA_MSG_LEFT_2_REG) = 0;
521
SE(SE_SHA_MSG_LEFT_3_REG) = 0;
522
523
// Set config based on init or partial continuation.
524
if (total_size == src_size || !total_size)
525
SE(SE_SHA_CONFIG_REG) = SHA_INIT_HASH;
526
else
527
SE(SE_SHA_CONFIG_REG) = SHA_CONTINUE;
528
529
// Trigger the operation. src vs total size decides if it's partial.
530
int res = _se_execute(SE_OP_START, NULL, 0, src, src_size, is_oneshot);
531
532
if (!res && is_oneshot)
533
_se_sha_hash_256_get_hash(hash);
534
535
return res;
536
}
537
538
int se_sha_hash_256_async(void *hash, const void *src, u32 size)
539
{
540
return _se_sha_hash_256(hash, size, src, size, false);
541
}
542
543
int se_sha_hash_256_oneshot(void *hash, const void *src, u32 size)
544
{
545
return _se_sha_hash_256(hash, size, src, size, true);
546
}
547
548
int se_sha_hash_256_partial_start(void *hash, const void *src, u32 size, bool is_oneshot)
549
{
550
// Check if aligned SHA256 block size.
551
if (size % SE_SHA2_MIN_BLOCK_SIZE)
552
return 1;
553
554
return _se_sha_hash_256(hash, 0, src, size, is_oneshot);
555
}
556
557
int se_sha_hash_256_partial_update(void *hash, const void *src, u32 size, bool is_oneshot)
558
{
559
// Check if aligned to SHA256 block size.
560
if (size % SE_SHA2_MIN_BLOCK_SIZE)
561
return 1;
562
563
return _se_sha_hash_256(hash, size - 1, src, size, is_oneshot);
564
}
565
566
int se_sha_hash_256_partial_end(void *hash, u64 total_size, const void *src, u32 src_size, bool is_oneshot)
567
{
568
return _se_sha_hash_256(hash, total_size, src, src_size, is_oneshot);
569
}
570
571
int se_sha_hash_256_finalize(void *hash)
572
{
573
int res = _se_execute_finalize();
574
575
_se_sha_hash_256_get_hash(hash);
576
577
return res;
578
}
579
580
int se_rng_pseudo(void *dst, u32 size)
581
{
582
// Setup config for SP 800-90 PRNG.
583
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_DST(DST_MEMORY);
584
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_XOR_POS(XOR_BYPASS) | SE_CRYPTO_INPUT_SEL(INPUT_RANDOM);
585
SE(SE_RNG_CONFIG_REG) = SE_RNG_CONFIG_SRC(SRC_ENTROPY) | SE_RNG_CONFIG_MODE(MODE_NORMAL);
586
SE(SE_RNG_SRC_CONFIG_REG) |= SE_RNG_SRC_CONFIG_ENTR_SRC(RO_ENTR_ENABLE); // DRBG. Depends on ENTROPY clock.
587
SE(SE_RNG_RESEED_INTERVAL_REG) = 4096;
588
589
u32 size_aligned = ALIGN_DOWN(size, SE_RNG_BLOCK_SIZE);
590
u32 size_residue = size % SE_RNG_BLOCK_SIZE;
591
int res = 0;
592
593
// Handle initial aligned message.
594
if (size_aligned)
595
{
596
SE(SE_CRYPTO_LAST_BLOCK_REG) = (size >> 4) - 1;
597
598
res = _se_execute_oneshot(SE_OP_START, dst, size_aligned, NULL, 0);
599
}
600
601
// Handle leftover partial message.
602
if (!res && size_residue)
603
{
604
// Copy message to a block sized buffer in case it's partial.
605
u32 block[SE_RNG_BLOCK_SIZE / sizeof(u32)] = {0};
606
607
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
608
609
res = _se_execute_oneshot(SE_OP_START, block, SE_RNG_BLOCK_SIZE, NULL, 0);
610
611
// Copy result back.
612
memcpy(dst + size_aligned, block, size_residue);
613
}
614
615
return res;
616
}
617
618
void se_aes_ctx_get_keys(u8 *buf, u8 *keys, u32 keysize)
619
{
620
u8 *aligned_buf = (u8 *)ALIGN((u32)buf, 0x40);
621
622
// Set Secure Random Key.
623
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_DST(DST_SRK);
624
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(0) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_INPUT_SEL(INPUT_RANDOM);
625
SE(SE_RNG_CONFIG_REG) = SE_RNG_CONFIG_SRC(SRC_ENTROPY) | SE_RNG_CONFIG_MODE(MODE_FORCE_RESEED);
626
SE(SE_CRYPTO_LAST_BLOCK) = 0;
627
_se_execute_oneshot(SE_OP_START, NULL, 0, NULL, 0);
628
629
// Save AES keys.
630
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
631
632
for (u32 i = 0; i < SE_AES_KEYSLOT_COUNT; i++)
633
{
634
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(AES_KEYTABLE) | SE_KEYTABLE_DST_KEY_INDEX(i) |
635
SE_CONTEXT_AES_KEY_INDEX(0) | SE_CONTEXT_AES_WORD_QUAD(KEYS_0_3);
636
637
SE(SE_CRYPTO_LAST_BLOCK) = 0;
638
_se_execute_oneshot(SE_OP_CTX_SAVE, aligned_buf, SE_AES_BLOCK_SIZE, NULL, 0);
639
memcpy(keys + i * keysize, aligned_buf, SE_AES_BLOCK_SIZE);
640
641
if (keysize > SE_KEY_128_SIZE)
642
{
643
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(AES_KEYTABLE) | SE_KEYTABLE_DST_KEY_INDEX(i) |
644
SE_CONTEXT_AES_KEY_INDEX(0) | SE_CONTEXT_AES_WORD_QUAD(KEYS_4_7);
645
646
SE(SE_CRYPTO_LAST_BLOCK) = 0;
647
_se_execute_oneshot(SE_OP_CTX_SAVE, aligned_buf, SE_AES_BLOCK_SIZE, NULL, 0);
648
memcpy(keys + i * keysize + SE_AES_BLOCK_SIZE, aligned_buf, SE_AES_BLOCK_SIZE);
649
}
650
}
651
652
// Save SRK to PMC secure scratches.
653
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(SRK);
654
SE(SE_CRYPTO_LAST_BLOCK) = 0;
655
_se_execute_oneshot(SE_OP_CTX_SAVE, NULL, 0, NULL, 0);
656
657
// End context save.
658
SE(SE_CONFIG_REG) = 0;
659
_se_execute_oneshot(SE_OP_CTX_SAVE, NULL, 0, NULL, 0);
660
661
// Get SRK.
662
u32 srk[4];
663
srk[0] = PMC(APBDEV_PMC_SECURE_SCRATCH4);
664
srk[1] = PMC(APBDEV_PMC_SECURE_SCRATCH5);
665
srk[2] = PMC(APBDEV_PMC_SECURE_SCRATCH6);
666
srk[3] = PMC(APBDEV_PMC_SECURE_SCRATCH7);
667
668
// Decrypt context.
669
se_aes_key_set(3, srk, SE_KEY_128_SIZE);
670
se_aes_crypt_cbc(3, DECRYPT, keys, keys, SE_AES_KEYSLOT_COUNT * keysize);
671
se_aes_key_clear(3);
672
}
673
674
int se_aes_hash_cmac(u32 ks, void *hash, const void *src, u32 size)
675
{
676
u32 tmp1[SE_KEY_128_SIZE / sizeof(u32)] = {0};
677
u32 tmp2[SE_AES_BLOCK_SIZE / sizeof(u32)] = {0};
678
u8 *subkey = (u8 *)tmp1;
679
u8 *last_block = (u8 *)tmp2;
680
681
// Generate sub key (CBC with zeroed IV, basically ECB).
682
se_aes_iv_clear(ks);
683
if (se_aes_crypt_cbc(ks, ENCRYPT, subkey, subkey, SE_KEY_128_SIZE))
684
return 1;
685
686
// Generate K1 subkey.
687
_se_ls_1bit(subkey);
688
if (size & 0xF)
689
_se_ls_1bit(subkey); // Convert to K2.
690
691
// Switch to hash register. The rest of the config is already set.
692
SE(SE_CONFIG_REG) |= SE_CONFIG_DST(DST_HASHREG);
693
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_HASH(HASH_ENABLE);
694
695
// Initial blocks.
696
u32 num_blocks = (size + 0xF) >> 4;
697
if (num_blocks > 1)
698
{
699
SE(SE_CRYPTO_LAST_BLOCK_REG) = num_blocks - 2;
700
701
if (_se_execute_oneshot(SE_OP_START, NULL, 0, src, size))
702
return 1;
703
704
// Use updated IV for next OP as a continuation.
705
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_IV_SEL(IV_UPDATED);
706
}
707
708
// Last block.
709
if (size & 0xF)
710
{
711
memcpy(last_block, src + (size & (~0xF)), size & 0xF);
712
last_block[size & 0xF] = 0x80;
713
}
714
else if (size >= SE_AES_BLOCK_SIZE)
715
memcpy(last_block, src + size - SE_AES_BLOCK_SIZE, SE_AES_BLOCK_SIZE);
716
717
for (u32 i = 0; i < SE_KEY_128_SIZE; i++)
718
last_block[i] ^= subkey[i];
719
720
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
721
722
if (_se_execute_oneshot(SE_OP_START, NULL, 0, last_block, SE_AES_BLOCK_SIZE))
723
return 1;
724
725
// Copy output hash.
726
u32 *hash32 = (u32 *)hash;
727
for (u32 i = 0; i < (SE_AES_CMAC_DIGEST_SIZE / sizeof(u32)); i++)
728
hash32[i] = SE(SE_HASH_RESULT_REG + sizeof(u32) * i);
729
730
return 0;
731
}
732
733