Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/lib/crypto/powerpc/aes.h
121833 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (c) 2015 Markus Stockhausen <[email protected]>
4
* Copyright (C) 2015 International Business Machines Inc.
5
* Copyright 2026 Google LLC
6
*/
7
#include <asm/simd.h>
8
#include <asm/switch_to.h>
9
#include <linux/cpufeature.h>
10
#include <linux/jump_label.h>
11
#include <linux/preempt.h>
12
#include <linux/uaccess.h>
13
14
#ifdef CONFIG_SPE
15
16
EXPORT_SYMBOL_GPL(ppc_expand_key_128);
17
EXPORT_SYMBOL_GPL(ppc_expand_key_192);
18
EXPORT_SYMBOL_GPL(ppc_expand_key_256);
19
EXPORT_SYMBOL_GPL(ppc_generate_decrypt_key);
20
EXPORT_SYMBOL_GPL(ppc_encrypt_ecb);
21
EXPORT_SYMBOL_GPL(ppc_decrypt_ecb);
22
EXPORT_SYMBOL_GPL(ppc_encrypt_cbc);
23
EXPORT_SYMBOL_GPL(ppc_decrypt_cbc);
24
EXPORT_SYMBOL_GPL(ppc_crypt_ctr);
25
EXPORT_SYMBOL_GPL(ppc_encrypt_xts);
26
EXPORT_SYMBOL_GPL(ppc_decrypt_xts);
27
28
void ppc_encrypt_aes(u8 *out, const u8 *in, const u32 *key_enc, u32 rounds);
29
void ppc_decrypt_aes(u8 *out, const u8 *in, const u32 *key_dec, u32 rounds);
30
31
static void spe_begin(void)
32
{
33
/* disable preemption and save users SPE registers if required */
34
preempt_disable();
35
enable_kernel_spe();
36
}
37
38
static void spe_end(void)
39
{
40
disable_kernel_spe();
41
/* reenable preemption */
42
preempt_enable();
43
}
44
45
static void aes_preparekey_arch(union aes_enckey_arch *k,
46
union aes_invkey_arch *inv_k,
47
const u8 *in_key, int key_len, int nrounds)
48
{
49
if (key_len == AES_KEYSIZE_128)
50
ppc_expand_key_128(k->spe_enc_key, in_key);
51
else if (key_len == AES_KEYSIZE_192)
52
ppc_expand_key_192(k->spe_enc_key, in_key);
53
else
54
ppc_expand_key_256(k->spe_enc_key, in_key);
55
56
if (inv_k)
57
ppc_generate_decrypt_key(inv_k->spe_dec_key, k->spe_enc_key,
58
key_len);
59
}
60
61
static void aes_encrypt_arch(const struct aes_enckey *key,
62
u8 out[AES_BLOCK_SIZE],
63
const u8 in[AES_BLOCK_SIZE])
64
{
65
spe_begin();
66
ppc_encrypt_aes(out, in, key->k.spe_enc_key, key->nrounds / 2 - 1);
67
spe_end();
68
}
69
70
static void aes_decrypt_arch(const struct aes_key *key,
71
u8 out[AES_BLOCK_SIZE],
72
const u8 in[AES_BLOCK_SIZE])
73
{
74
spe_begin();
75
ppc_decrypt_aes(out, in, key->inv_k.spe_dec_key, key->nrounds / 2 - 1);
76
spe_end();
77
}
78
79
#else /* CONFIG_SPE */
80
81
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_vec_crypto);
82
83
EXPORT_SYMBOL_GPL(aes_p8_set_encrypt_key);
84
EXPORT_SYMBOL_GPL(aes_p8_set_decrypt_key);
85
EXPORT_SYMBOL_GPL(aes_p8_encrypt);
86
EXPORT_SYMBOL_GPL(aes_p8_decrypt);
87
EXPORT_SYMBOL_GPL(aes_p8_cbc_encrypt);
88
EXPORT_SYMBOL_GPL(aes_p8_ctr32_encrypt_blocks);
89
EXPORT_SYMBOL_GPL(aes_p8_xts_encrypt);
90
EXPORT_SYMBOL_GPL(aes_p8_xts_decrypt);
91
92
static inline bool is_vsx_format(const struct p8_aes_key *key)
93
{
94
return key->nrounds != 0;
95
}
96
97
/*
98
* Convert a round key from VSX to generic format by reflecting all 16 bytes (if
99
* little endian) or reflecting the bytes in each 4-byte word (if big endian),
100
* and (if apply_inv_mix=true) applying InvMixColumn to each column.
101
*
102
* It would be nice if the VSX and generic key formats would be compatible. But
103
* that's very difficult to do, with the assembly code having been borrowed from
104
* OpenSSL and also targeted to POWER8 rather than POWER9.
105
*
106
* Fortunately, this conversion should only be needed in extremely rare cases,
107
* possibly not at all in practice. It's just included for full correctness.
108
*/
109
static void rndkey_from_vsx(u32 out[4], const u32 in[4], bool apply_inv_mix)
110
{
111
const bool be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
112
u32 k0 = swab32(in[0]);
113
u32 k1 = swab32(in[1]);
114
u32 k2 = swab32(in[2]);
115
u32 k3 = swab32(in[3]);
116
117
if (apply_inv_mix) {
118
k0 = inv_mix_columns(k0);
119
k1 = inv_mix_columns(k1);
120
k2 = inv_mix_columns(k2);
121
k3 = inv_mix_columns(k3);
122
}
123
out[0] = be ? k0 : k3;
124
out[1] = be ? k1 : k2;
125
out[2] = be ? k2 : k1;
126
out[3] = be ? k3 : k0;
127
}
128
129
static void aes_preparekey_arch(union aes_enckey_arch *k,
130
union aes_invkey_arch *inv_k,
131
const u8 *in_key, int key_len, int nrounds)
132
{
133
const int keybits = 8 * key_len;
134
int ret;
135
136
if (static_branch_likely(&have_vec_crypto) && likely(may_use_simd())) {
137
preempt_disable();
138
pagefault_disable();
139
enable_kernel_vsx();
140
ret = aes_p8_set_encrypt_key(in_key, keybits, &k->p8);
141
/*
142
* aes_p8_set_encrypt_key() should never fail here, since the
143
* key length was already validated.
144
*/
145
WARN_ON_ONCE(ret);
146
if (inv_k) {
147
ret = aes_p8_set_decrypt_key(in_key, keybits,
148
&inv_k->p8);
149
/* ... and likewise for aes_p8_set_decrypt_key(). */
150
WARN_ON_ONCE(ret);
151
}
152
disable_kernel_vsx();
153
pagefault_enable();
154
preempt_enable();
155
} else {
156
aes_expandkey_generic(k->rndkeys,
157
inv_k ? inv_k->inv_rndkeys : NULL,
158
in_key, key_len);
159
/* Mark the key as using the generic format. */
160
k->p8.nrounds = 0;
161
if (inv_k)
162
inv_k->p8.nrounds = 0;
163
}
164
}
165
166
static void aes_encrypt_arch(const struct aes_enckey *key,
167
u8 out[AES_BLOCK_SIZE],
168
const u8 in[AES_BLOCK_SIZE])
169
{
170
if (static_branch_likely(&have_vec_crypto) &&
171
likely(is_vsx_format(&key->k.p8) && may_use_simd())) {
172
preempt_disable();
173
pagefault_disable();
174
enable_kernel_vsx();
175
aes_p8_encrypt(in, out, &key->k.p8);
176
disable_kernel_vsx();
177
pagefault_enable();
178
preempt_enable();
179
} else if (unlikely(is_vsx_format(&key->k.p8))) {
180
/*
181
* This handles (the hopefully extremely rare) case where a key
182
* was prepared using the VSX optimized format, then encryption
183
* is done in a context that cannot use VSX instructions.
184
*/
185
u32 rndkeys[AES_MAX_KEYLENGTH_U32];
186
187
for (int i = 0; i < 4 * (key->nrounds + 1); i += 4)
188
rndkey_from_vsx(&rndkeys[i],
189
&key->k.p8.rndkeys[i], false);
190
aes_encrypt_generic(rndkeys, key->nrounds, out, in);
191
} else {
192
aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
193
}
194
}
195
196
static void aes_decrypt_arch(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
197
const u8 in[AES_BLOCK_SIZE])
198
{
199
if (static_branch_likely(&have_vec_crypto) &&
200
likely(is_vsx_format(&key->inv_k.p8) && may_use_simd())) {
201
preempt_disable();
202
pagefault_disable();
203
enable_kernel_vsx();
204
aes_p8_decrypt(in, out, &key->inv_k.p8);
205
disable_kernel_vsx();
206
pagefault_enable();
207
preempt_enable();
208
} else if (unlikely(is_vsx_format(&key->inv_k.p8))) {
209
/*
210
* This handles (the hopefully extremely rare) case where a key
211
* was prepared using the VSX optimized format, then decryption
212
* is done in a context that cannot use VSX instructions.
213
*/
214
u32 inv_rndkeys[AES_MAX_KEYLENGTH_U32];
215
int i;
216
217
rndkey_from_vsx(&inv_rndkeys[0],
218
&key->inv_k.p8.rndkeys[0], false);
219
for (i = 4; i < 4 * key->nrounds; i += 4) {
220
rndkey_from_vsx(&inv_rndkeys[i],
221
&key->inv_k.p8.rndkeys[i], true);
222
}
223
rndkey_from_vsx(&inv_rndkeys[i],
224
&key->inv_k.p8.rndkeys[i], false);
225
aes_decrypt_generic(inv_rndkeys, key->nrounds, out, in);
226
} else {
227
aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds,
228
out, in);
229
}
230
}
231
232
#define aes_mod_init_arch aes_mod_init_arch
233
static void aes_mod_init_arch(void)
234
{
235
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
236
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
237
static_branch_enable(&have_vec_crypto);
238
}
239
240
#endif /* !CONFIG_SPE */
241
242