Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
CTCaer
GitHub Repository: CTCaer/hekate
Path: blob/master/bdk/sec/se.c
1476 views
1
/*
2
* Copyright (c) 2018 naehrwert
3
* Copyright (c) 2018-2025 CTCaer
4
*
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms and conditions of the GNU General Public License,
7
* version 2, as published by the Free Software Foundation.
8
*
9
* This program is distributed in the hope it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
* more details.
13
*
14
* You should have received a copy of the GNU General Public License
15
* along with this program. If not, see <http://www.gnu.org/licenses/>.
16
*/
17
18
#include <string.h>
19
20
#include "se.h"
21
#include <memory_map.h>
22
#include <soc/bpmp.h>
23
#include <soc/hw_init.h>
24
#include <soc/pmc.h>
25
#include <soc/timer.h>
26
#include <soc/t210.h>
27
28
typedef struct _se_ll_t
29
{
30
vu32 num;
31
vu32 addr;
32
vu32 size;
33
} se_ll_t;
34
35
se_ll_t ll_src, ll_dst;
36
se_ll_t *ll_src_ptr, *ll_dst_ptr; // Must be u32 aligned.
37
38
static void _gf256_mul_x(void *block)
39
{
40
u8 *pdata = (u8 *)block;
41
u32 carry = 0;
42
43
for (int i = 0xF; i >= 0; i--)
44
{
45
u8 b = pdata[i];
46
pdata[i] = (b << 1) | carry;
47
carry = b >> 7;
48
}
49
50
if (carry)
51
pdata[0xF] ^= 0x87;
52
}
53
54
static void _gf256_mul_x_le(void *block)
55
{
56
u32 *pdata = (u32 *)block;
57
u32 carry = 0;
58
59
for (u32 i = 0; i < 4; i++)
60
{
61
u32 b = pdata[i];
62
pdata[i] = (b << 1) | carry;
63
carry = b >> 31;
64
}
65
66
if (carry)
67
pdata[0x0] ^= 0x87;
68
}
69
70
static void _se_ll_init(se_ll_t *ll, u32 addr, u32 size)
71
{
72
ll->num = 0;
73
ll->addr = addr;
74
ll->size = size;
75
}
76
77
static void _se_ll_set(se_ll_t *src, se_ll_t *dst)
78
{
79
SE(SE_IN_LL_ADDR_REG) = (u32)src;
80
SE(SE_OUT_LL_ADDR_REG) = (u32)dst;
81
}
82
83
static int _se_wait()
84
{
85
bool tegra_t210 = hw_get_chip_id() == GP_HIDREV_MAJOR_T210;
86
87
// Wait for operation to be done.
88
while (!(SE(SE_INT_STATUS_REG) & SE_INT_OP_DONE))
89
;
90
91
// Check for errors.
92
if ((SE(SE_INT_STATUS_REG) & SE_INT_ERR_STAT) ||
93
(SE(SE_STATUS_REG) & SE_STATUS_STATE_MASK) != SE_STATUS_STATE_IDLE ||
94
(SE(SE_ERR_STATUS_REG) != 0)
95
)
96
{
97
return 0;
98
}
99
100
// T210B01: IRAM/TZRAM/DRAM AHB coherency WAR.
101
if (!tegra_t210 && ll_dst_ptr)
102
{
103
u32 timeout = get_tmr_us() + 1000000;
104
// Ensure data is out from SE.
105
while (SE(SE_STATUS_REG) & SE_STATUS_MEM_IF_BUSY)
106
{
107
if (get_tmr_us() > timeout)
108
return 0;
109
usleep(1);
110
}
111
112
// Ensure data is out from AHB.
113
if (ll_dst_ptr->addr >= DRAM_START)
114
{
115
timeout = get_tmr_us() + 200000;
116
while (AHB_GIZMO(AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID) & MEM_WRQUE_SE_MST_ID)
117
{
118
if (get_tmr_us() > timeout)
119
return 0;
120
usleep(1);
121
}
122
}
123
}
124
125
return 1;
126
}
127
128
static int _se_execute_finalize()
129
{
130
int res = _se_wait();
131
132
// Invalidate data after OP is done.
133
bpmp_mmu_maintenance(BPMP_MMU_MAINT_INVALID_WAY, false);
134
135
ll_src_ptr = NULL;
136
ll_dst_ptr = NULL;
137
138
return res;
139
}
140
141
static int _se_execute(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size, bool is_oneshot)
142
{
143
ll_src_ptr = NULL;
144
ll_dst_ptr = NULL;
145
146
if (src)
147
{
148
ll_src_ptr = &ll_src;
149
_se_ll_init(ll_src_ptr, (u32)src, src_size);
150
}
151
152
if (dst)
153
{
154
ll_dst_ptr = &ll_dst;
155
_se_ll_init(ll_dst_ptr, (u32)dst, dst_size);
156
}
157
158
_se_ll_set(ll_src_ptr, ll_dst_ptr);
159
160
SE(SE_ERR_STATUS_REG) = SE(SE_ERR_STATUS_REG);
161
SE(SE_INT_STATUS_REG) = SE(SE_INT_STATUS_REG);
162
163
// Flush data before starting OP.
164
bpmp_mmu_maintenance(BPMP_MMU_MAINT_CLEAN_WAY, false);
165
166
SE(SE_OPERATION_REG) = op;
167
168
if (is_oneshot)
169
return _se_execute_finalize();
170
171
return 1;
172
}
173
174
static int _se_execute_oneshot(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size)
175
{
176
return _se_execute(op, dst, dst_size, src, src_size, true);
177
}
178
179
static int _se_execute_one_block(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size)
180
{
181
if (!src || !dst)
182
return 0;
183
184
u32 block[SE_AES_BLOCK_SIZE / sizeof(u32)] = {0};
185
186
SE(SE_CRYPTO_BLOCK_COUNT_REG) = 1 - 1;
187
188
memcpy(block, src, src_size);
189
int res = _se_execute_oneshot(op, block, SE_AES_BLOCK_SIZE, block, SE_AES_BLOCK_SIZE);
190
memcpy(dst, block, dst_size);
191
192
return res;
193
}
194
195
static void _se_aes_ctr_set(const void *ctr)
196
{
197
u32 data[SE_AES_IV_SIZE / 4];
198
memcpy(data, ctr, SE_AES_IV_SIZE);
199
200
for (u32 i = 0; i < SE_CRYPTO_LINEAR_CTR_REG_COUNT; i++)
201
SE(SE_CRYPTO_LINEAR_CTR_REG + (4 * i)) = data[i];
202
}
203
204
void se_rsa_acc_ctrl(u32 rs, u32 flags)
205
{
206
if (flags & SE_RSA_KEY_TBL_DIS_KEY_ACCESS_FLAG)
207
SE(SE_RSA_KEYTABLE_ACCESS_REG + 4 * rs) =
208
(((flags >> 4) & SE_RSA_KEY_TBL_DIS_KEYUSE_FLAG) | (flags & SE_RSA_KEY_TBL_DIS_KEY_READ_UPDATE_FLAG)) ^
209
SE_RSA_KEY_TBL_DIS_KEY_READ_UPDATE_USE_FLAG;
210
if (flags & SE_RSA_KEY_LOCK_FLAG)
211
SE(SE_RSA_SECURITY_PERKEY_REG) &= ~BIT(rs);
212
}
213
214
void se_key_acc_ctrl(u32 ks, u32 flags)
215
{
216
if (flags & SE_KEY_TBL_DIS_KEY_ACCESS_FLAG)
217
SE(SE_CRYPTO_KEYTABLE_ACCESS_REG + 4 * ks) = ~flags;
218
if (flags & SE_KEY_LOCK_FLAG)
219
SE(SE_CRYPTO_SECURITY_PERKEY_REG) &= ~BIT(ks);
220
}
221
222
u32 se_key_acc_ctrl_get(u32 ks)
223
{
224
return SE(SE_CRYPTO_KEYTABLE_ACCESS_REG + 4 * ks);
225
}
226
227
void se_aes_key_set(u32 ks, const void *key, u32 size)
228
{
229
u32 data[SE_AES_MAX_KEY_SIZE / 4];
230
memcpy(data, key, size);
231
232
for (u32 i = 0; i < (size / 4); i++)
233
{
234
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_PKT(i); // QUAD is automatically set by PKT.
235
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = data[i];
236
}
237
}
238
239
void se_aes_iv_set(u32 ks, const void *iv)
240
{
241
u32 data[SE_AES_IV_SIZE / 4];
242
memcpy(data, iv, SE_AES_IV_SIZE);
243
244
for (u32 i = 0; i < (SE_AES_IV_SIZE / 4); i++)
245
{
246
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(ORIGINAL_IV) | SE_KEYTABLE_PKT(i);
247
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = data[i];
248
}
249
}
250
251
void se_aes_key_get(u32 ks, void *key, u32 size)
252
{
253
u32 data[SE_AES_MAX_KEY_SIZE / 4];
254
255
for (u32 i = 0; i < (size / 4); i++)
256
{
257
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_PKT(i); // QUAD is automatically set by PKT.
258
data[i] = SE(SE_CRYPTO_KEYTABLE_DATA_REG);
259
}
260
261
memcpy(key, data, size);
262
}
263
264
void se_aes_key_clear(u32 ks)
265
{
266
for (u32 i = 0; i < (SE_AES_MAX_KEY_SIZE / 4); i++)
267
{
268
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_PKT(i); // QUAD is automatically set by PKT.
269
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
270
}
271
}
272
273
void se_aes_iv_clear(u32 ks)
274
{
275
for (u32 i = 0; i < (SE_AES_IV_SIZE / 4); i++)
276
{
277
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(ORIGINAL_IV) | SE_KEYTABLE_PKT(i);
278
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
279
}
280
}
281
282
void se_aes_iv_updated_clear(u32 ks)
283
{
284
for (u32 i = 0; i < (SE_AES_IV_SIZE / 4); i++)
285
{
286
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(UPDATED_IV) | SE_KEYTABLE_PKT(i);
287
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
288
}
289
}
290
291
int se_aes_unwrap_key(u32 ks_dst, u32 ks_src, const void *input)
292
{
293
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_KEYTABLE);
294
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks_src) | SE_CRYPTO_CORE_SEL(CORE_DECRYPT);
295
SE(SE_CRYPTO_BLOCK_COUNT_REG) = 1 - 1;
296
SE(SE_CRYPTO_KEYTABLE_DST_REG) = SE_KEYTABLE_DST_KEY_INDEX(ks_dst) | SE_KEYTABLE_DST_WORD_QUAD(KEYS_0_3);
297
298
return _se_execute_oneshot(SE_OP_START, NULL, 0, input, SE_KEY_128_SIZE);
299
}
300
301
int se_aes_crypt_hash(u32 ks, u32 enc, void *dst, u32 dst_size, const void *src, u32 src_size)
302
{
303
if (enc)
304
{
305
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
306
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) |
307
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_XOR_POS(XOR_TOP) |
308
SE_CRYPTO_HASH(HASH_ENABLE);
309
}
310
else
311
{
312
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
313
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVMEM) |
314
SE_CRYPTO_CORE_SEL(CORE_DECRYPT) | SE_CRYPTO_XOR_POS(XOR_BOTTOM) |
315
SE_CRYPTO_HASH(HASH_ENABLE);
316
}
317
SE(SE_CRYPTO_BLOCK_COUNT_REG) = (src_size >> 4) - 1;
318
return _se_execute_oneshot(SE_OP_START, dst, dst_size, src, src_size);
319
}
320
321
int se_aes_crypt_ecb(u32 ks, u32 enc, void *dst, u32 dst_size, const void *src, u32 src_size)
322
{
323
if (enc)
324
{
325
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
326
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
327
}
328
else
329
{
330
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
331
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_DECRYPT);
332
}
333
SE(SE_CRYPTO_BLOCK_COUNT_REG) = (src_size >> 4) - 1;
334
return _se_execute_oneshot(SE_OP_START, dst, dst_size, src, src_size);
335
}
336
337
int se_aes_crypt_cbc(u32 ks, u32 enc, void *dst, u32 dst_size, const void *src, u32 src_size)
338
{
339
if (enc)
340
{
341
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
342
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) |
343
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_XOR_POS(XOR_TOP);
344
}
345
else
346
{
347
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
348
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVMEM) |
349
SE_CRYPTO_CORE_SEL(CORE_DECRYPT) | SE_CRYPTO_XOR_POS(XOR_BOTTOM);
350
}
351
SE(SE_CRYPTO_BLOCK_COUNT_REG) = (src_size >> 4) - 1;
352
return _se_execute_oneshot(SE_OP_START, dst, dst_size, src, src_size);
353
}
354
355
int se_aes_crypt_block_ecb(u32 ks, u32 enc, void *dst, const void *src)
356
{
357
return se_aes_crypt_ecb(ks, enc, dst, SE_AES_BLOCK_SIZE, src, SE_AES_BLOCK_SIZE);
358
}
359
360
int se_aes_crypt_ctr(u32 ks, void *dst, u32 dst_size, const void *src, u32 src_size, void *ctr)
361
{
362
SE(SE_SPARE_REG) = SE_ECO(SE_ERRATA_FIX_ENABLE);
363
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
364
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
365
SE_CRYPTO_XOR_POS(XOR_BOTTOM) | SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) |
366
SE_CRYPTO_CTR_CNTN(1);
367
_se_aes_ctr_set(ctr);
368
369
u32 src_size_aligned = src_size & 0xFFFFFFF0;
370
u32 src_size_delta = src_size & 0xF;
371
372
if (src_size_aligned)
373
{
374
SE(SE_CRYPTO_BLOCK_COUNT_REG) = (src_size >> 4) - 1;
375
if (!_se_execute_oneshot(SE_OP_START, dst, dst_size, src, src_size_aligned))
376
return 0;
377
}
378
379
if (src_size - src_size_aligned && src_size_aligned < dst_size)
380
return _se_execute_one_block(SE_OP_START, dst + src_size_aligned,
381
MIN(src_size_delta, dst_size - src_size_aligned),
382
src + src_size_aligned, src_size_delta);
383
384
return 1;
385
}
386
387
int se_aes_xts_crypt_sec(u32 tweak_ks, u32 crypt_ks, u32 enc, u64 sec, void *dst, void *src, u32 secsize)
388
{
389
int res = 0;
390
u32 tmp[SE_AES_BLOCK_SIZE / sizeof(u32)];
391
u8 *tweak = (u8 *)tmp;
392
u8 *pdst = (u8 *)dst;
393
u8 *psrc = (u8 *)src;
394
395
// Generate tweak.
396
for (int i = 0xF; i >= 0; i--)
397
{
398
tweak[i] = sec & 0xFF;
399
sec >>= 8;
400
}
401
if (!se_aes_crypt_block_ecb(tweak_ks, ENCRYPT, tweak, tweak))
402
goto out;
403
404
// We are assuming a 0x10-aligned sector size in this implementation.
405
for (u32 i = 0; i < secsize / SE_AES_BLOCK_SIZE; i++)
406
{
407
for (u32 j = 0; j < SE_AES_BLOCK_SIZE; j++)
408
pdst[j] = psrc[j] ^ tweak[j];
409
if (!se_aes_crypt_block_ecb(crypt_ks, enc, pdst, pdst))
410
goto out;
411
for (u32 j = 0; j < SE_AES_BLOCK_SIZE; j++)
412
pdst[j] = pdst[j] ^ tweak[j];
413
_gf256_mul_x(tweak);
414
psrc += SE_AES_BLOCK_SIZE;
415
pdst += SE_AES_BLOCK_SIZE;
416
}
417
418
res = 1;
419
420
out:
421
return res;
422
}
423
424
int se_aes_xts_crypt_sec_nx(u32 tweak_ks, u32 crypt_ks, u32 enc, u64 sec, u8 *tweak, bool regen_tweak, u32 tweak_exp, void *dst, void *src, u32 sec_size)
425
{
426
u32 *pdst = (u32 *)dst;
427
u32 *psrc = (u32 *)src;
428
u32 *ptweak = (u32 *)tweak;
429
430
if (regen_tweak)
431
{
432
for (int i = 0xF; i >= 0; i--)
433
{
434
tweak[i] = sec & 0xFF;
435
sec >>= 8;
436
}
437
if (!se_aes_crypt_block_ecb(tweak_ks, ENCRYPT, tweak, tweak))
438
return 0;
439
}
440
441
// tweak_exp allows using a saved tweak to reduce _gf256_mul_x_le calls.
442
for (u32 i = 0; i < (tweak_exp << 5); i++)
443
_gf256_mul_x_le(tweak);
444
445
u8 orig_tweak[SE_KEY_128_SIZE] __attribute__((aligned(4)));
446
memcpy(orig_tweak, tweak, SE_KEY_128_SIZE);
447
448
// We are assuming a 16 sector aligned size in this implementation.
449
for (u32 i = 0; i < (sec_size >> 4); i++)
450
{
451
for (u32 j = 0; j < 4; j++)
452
pdst[j] = psrc[j] ^ ptweak[j];
453
454
_gf256_mul_x_le(tweak);
455
psrc += 4;
456
pdst += 4;
457
}
458
459
if (!se_aes_crypt_ecb(crypt_ks, enc, dst, sec_size, dst, sec_size))
460
return 0;
461
462
pdst = (u32 *)dst;
463
ptweak = (u32 *)orig_tweak;
464
for (u32 i = 0; i < (sec_size >> 4); i++)
465
{
466
for (u32 j = 0; j < 4; j++)
467
pdst[j] = pdst[j] ^ ptweak[j];
468
469
_gf256_mul_x_le(orig_tweak);
470
pdst += 4;
471
}
472
473
return 1;
474
}
475
476
int se_aes_xts_crypt(u32 tweak_ks, u32 crypt_ks, u32 enc, u64 sec, void *dst, void *src, u32 secsize, u32 num_secs)
477
{
478
u8 *pdst = (u8 *)dst;
479
u8 *psrc = (u8 *)src;
480
481
for (u32 i = 0; i < num_secs; i++)
482
if (!se_aes_xts_crypt_sec(tweak_ks, crypt_ks, enc, sec + i, pdst + secsize * i, psrc + secsize * i, secsize))
483
return 0;
484
485
return 1;
486
}
487
488
static void se_calc_sha256_get_hash(void *hash, u32 *msg_left)
489
{
490
u32 hash32[SE_SHA_256_SIZE / 4];
491
492
// Backup message left.
493
if (msg_left)
494
{
495
msg_left[0] = SE(SE_SHA_MSG_LEFT_0_REG);
496
msg_left[1] = SE(SE_SHA_MSG_LEFT_1_REG);
497
}
498
499
// Copy output hash.
500
for (u32 i = 0; i < (SE_SHA_256_SIZE / 4); i++)
501
hash32[i] = byte_swap_32(SE(SE_HASH_RESULT_REG + (i * 4)));
502
memcpy(hash, hash32, SE_SHA_256_SIZE);
503
}
504
505
int se_calc_sha256(void *hash, u32 *msg_left, const void *src, u32 src_size, u64 total_size, u32 sha_cfg, bool is_oneshot)
506
{
507
int res;
508
u32 hash32[SE_SHA_256_SIZE / 4];
509
510
//! TODO: src_size must be 512 bit aligned if continuing and not last block for SHA256.
511
if (src_size > 0xFFFFFF || !hash) // Max 16MB - 1 chunks and aligned x4 hash buffer.
512
return 0;
513
514
// Src size of 0 is not supported, so return null string sha256.
515
if (!src_size)
516
{
517
const u8 null_hash[SE_SHA_256_SIZE] = {
518
0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, 0x1C, 0x14, 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24,
519
0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C, 0xA4, 0x95, 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55
520
};
521
memcpy(hash, null_hash, SE_SHA_256_SIZE);
522
return 1;
523
}
524
525
// Setup config for SHA256.
526
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_SHA256) | SE_CONFIG_ENC_ALG(ALG_SHA) | SE_CONFIG_DST(DST_HASHREG);
527
SE(SE_SHA_CONFIG_REG) = sha_cfg;
528
SE(SE_CRYPTO_BLOCK_COUNT_REG) = 1 - 1;
529
530
// Set total size to current buffer size if empty.
531
if (!total_size)
532
total_size = src_size;
533
534
// Set total size: BITS(src_size), up to 2 EB.
535
SE(SE_SHA_MSG_LENGTH_0_REG) = (u32)(total_size << 3);
536
SE(SE_SHA_MSG_LENGTH_1_REG) = (u32)(total_size >> 29);
537
SE(SE_SHA_MSG_LENGTH_2_REG) = 0;
538
SE(SE_SHA_MSG_LENGTH_3_REG) = 0;
539
540
// Set size left to hash.
541
SE(SE_SHA_MSG_LEFT_0_REG) = (u32)(total_size << 3);
542
SE(SE_SHA_MSG_LEFT_1_REG) = (u32)(total_size >> 29);
543
SE(SE_SHA_MSG_LEFT_2_REG) = 0;
544
SE(SE_SHA_MSG_LEFT_3_REG) = 0;
545
546
// If we hash in chunks, copy over the intermediate.
547
if (sha_cfg == SHA_CONTINUE && msg_left)
548
{
549
// Restore message left to process.
550
SE(SE_SHA_MSG_LEFT_0_REG) = msg_left[0];
551
SE(SE_SHA_MSG_LEFT_1_REG) = msg_left[1];
552
553
// Restore hash reg.
554
memcpy(hash32, hash, SE_SHA_256_SIZE);
555
for (u32 i = 0; i < (SE_SHA_256_SIZE / 4); i++)
556
SE(SE_HASH_RESULT_REG + (i * 4)) = byte_swap_32(hash32[i]);
557
}
558
559
// Trigger the operation.
560
res = _se_execute(SE_OP_START, NULL, 0, src, src_size, is_oneshot);
561
562
if (is_oneshot)
563
se_calc_sha256_get_hash(hash, msg_left);
564
565
return res;
566
}
567
568
int se_calc_sha256_oneshot(void *hash, const void *src, u32 src_size)
569
{
570
return se_calc_sha256(hash, NULL, src, src_size, 0, SHA_INIT_HASH, true);
571
}
572
573
int se_calc_sha256_finalize(void *hash, u32 *msg_left)
574
{
575
int res = _se_execute_finalize();
576
577
se_calc_sha256_get_hash(hash, msg_left);
578
579
return res;
580
}
581
582
int se_gen_prng128(void *dst)
583
{
584
// Setup config for X931 PRNG.
585
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_DST(DST_MEMORY);
586
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_HASH(HASH_DISABLE) | SE_CRYPTO_XOR_POS(XOR_BYPASS) | SE_CRYPTO_INPUT_SEL(INPUT_RANDOM);
587
SE(SE_RNG_CONFIG_REG) = SE_RNG_CONFIG_SRC(SRC_ENTROPY) | SE_RNG_CONFIG_MODE(MODE_NORMAL);
588
//SE(SE_RNG_SRC_CONFIG_REG) =
589
// SE_RNG_SRC_CONFIG_ENTR_SRC(RO_ENTR_ENABLE) | SE_RNG_SRC_CONFIG_ENTR_SRC_LOCK(RO_ENTR_LOCK_ENABLE);
590
SE(SE_RNG_RESEED_INTERVAL_REG) = 1;
591
592
SE(SE_CRYPTO_BLOCK_COUNT_REG) = (16 >> 4) - 1;
593
594
// Trigger the operation.
595
return _se_execute_oneshot(SE_OP_START, dst, 16, NULL, 0);
596
}
597
598
void se_get_aes_keys(u8 *buf, u8 *keys, u32 keysize)
599
{
600
u8 *aligned_buf = (u8 *)ALIGN((u32)buf, 0x40);
601
602
// Set Secure Random Key.
603
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_DST(DST_SRK);
604
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(0) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_INPUT_SEL(INPUT_RANDOM);
605
SE(SE_RNG_CONFIG_REG) = SE_RNG_CONFIG_SRC(SRC_ENTROPY) | SE_RNG_CONFIG_MODE(MODE_FORCE_RESEED);
606
SE(SE_CRYPTO_LAST_BLOCK) = 0;
607
_se_execute_oneshot(SE_OP_START, NULL, 0, NULL, 0);
608
609
// Save AES keys.
610
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
611
612
for (u32 i = 0; i < SE_AES_KEYSLOT_COUNT; i++)
613
{
614
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(AES_KEYTABLE) | SE_KEYTABLE_DST_KEY_INDEX(i) |
615
SE_CONTEXT_AES_KEY_INDEX(0) | SE_CONTEXT_AES_WORD_QUAD(KEYS_0_3);
616
617
SE(SE_CRYPTO_LAST_BLOCK) = 0;
618
_se_execute_oneshot(SE_OP_CTX_SAVE, aligned_buf, SE_AES_BLOCK_SIZE, NULL, 0);
619
memcpy(keys + i * keysize, aligned_buf, SE_AES_BLOCK_SIZE);
620
621
if (keysize > SE_KEY_128_SIZE)
622
{
623
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(AES_KEYTABLE) | SE_KEYTABLE_DST_KEY_INDEX(i) |
624
SE_CONTEXT_AES_KEY_INDEX(0) | SE_CONTEXT_AES_WORD_QUAD(KEYS_4_7);
625
626
SE(SE_CRYPTO_LAST_BLOCK) = 0;
627
_se_execute_oneshot(SE_OP_CTX_SAVE, aligned_buf, SE_AES_BLOCK_SIZE, NULL, 0);
628
memcpy(keys + i * keysize + SE_AES_BLOCK_SIZE, aligned_buf, SE_AES_BLOCK_SIZE);
629
}
630
}
631
632
// Save SRK to PMC secure scratches.
633
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(SRK);
634
SE(SE_CRYPTO_LAST_BLOCK) = 0;
635
_se_execute_oneshot(SE_OP_CTX_SAVE, NULL, 0, NULL, 0);
636
637
// End context save.
638
SE(SE_CONFIG_REG) = 0;
639
_se_execute_oneshot(SE_OP_CTX_SAVE, NULL, 0, NULL, 0);
640
641
// Get SRK.
642
u32 srk[4];
643
srk[0] = PMC(APBDEV_PMC_SECURE_SCRATCH4);
644
srk[1] = PMC(APBDEV_PMC_SECURE_SCRATCH5);
645
srk[2] = PMC(APBDEV_PMC_SECURE_SCRATCH6);
646
srk[3] = PMC(APBDEV_PMC_SECURE_SCRATCH7);
647
648
// Decrypt context.
649
se_aes_key_clear(3);
650
se_aes_key_set(3, srk, SE_KEY_128_SIZE);
651
se_aes_crypt_cbc(3, DECRYPT, keys, SE_AES_KEYSLOT_COUNT * keysize, keys, SE_AES_KEYSLOT_COUNT * keysize);
652
se_aes_key_clear(3);
653
}
654
655
int se_aes_cmac_128(u32 ks, void *dst, const void *src, u32 src_size)
656
{
657
int res = 0;
658
659
u32 tmp1[SE_KEY_128_SIZE / sizeof(u32)] = {0};
660
u32 tmp2[SE_AES_BLOCK_SIZE / sizeof(u32)] = {0};
661
u8 *key = (u8 *)tmp1;
662
u8 *last_block = (u8 *)tmp2;
663
664
se_aes_iv_clear(ks);
665
se_aes_iv_updated_clear(ks);
666
667
// Generate sub key
668
if (!se_aes_crypt_hash(ks, ENCRYPT, key, SE_KEY_128_SIZE, key, SE_KEY_128_SIZE))
669
goto out;
670
671
_gf256_mul_x(key);
672
if (src_size & 0xF)
673
_gf256_mul_x(key);
674
675
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_HASHREG);
676
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_INPUT_SEL(INPUT_MEMORY) |
677
SE_CRYPTO_XOR_POS(XOR_TOP) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) | SE_CRYPTO_HASH(HASH_ENABLE) |
678
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT);
679
se_aes_iv_clear(ks);
680
se_aes_iv_updated_clear(ks);
681
682
u32 num_blocks = (src_size + 0xf) >> 4;
683
if (num_blocks > 1)
684
{
685
SE(SE_CRYPTO_BLOCK_COUNT_REG) = num_blocks - 2;
686
if (!_se_execute_oneshot(SE_OP_START, NULL, 0, src, src_size))
687
goto out;
688
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_IV_SEL(IV_UPDATED);
689
}
690
691
if (src_size & 0xf)
692
{
693
memcpy(last_block, src + (src_size & ~0xf), src_size & 0xf);
694
last_block[src_size & 0xf] = 0x80;
695
}
696
else if (src_size >= SE_AES_BLOCK_SIZE)
697
{
698
memcpy(last_block, src + src_size - SE_AES_BLOCK_SIZE, SE_AES_BLOCK_SIZE);
699
}
700
701
for (u32 i = 0; i < SE_KEY_128_SIZE; i++)
702
last_block[i] ^= key[i];
703
704
SE(SE_CRYPTO_BLOCK_COUNT_REG) = 0;
705
res = _se_execute_oneshot(SE_OP_START, NULL, 0, last_block, SE_AES_BLOCK_SIZE);
706
707
u32 *dst32 = (u32 *)dst;
708
for (u32 i = 0; i < (SE_KEY_128_SIZE / 4); i++)
709
dst32[i] = SE(SE_HASH_RESULT_REG + (i * 4));
710
711
out:
712
return res;
713
}
714
715