Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/include/asm/book3s/64/kup.h
26519 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
3
#define _ASM_POWERPC_BOOK3S_64_KUP_H
4
5
#include <linux/const.h>
6
#include <asm/reg.h>
7
8
#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
9
#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
10
#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
11
#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
12
13
#ifdef __ASSEMBLY__
14
15
.macro kuap_user_restore gpr1, gpr2
16
#if defined(CONFIG_PPC_PKEY)
17
BEGIN_MMU_FTR_SECTION_NESTED(67)
18
b 100f // skip_restore_amr
19
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
20
/*
21
* AMR and IAMR are going to be different when
22
* returning to userspace.
23
*/
24
ld \gpr1, STACK_REGS_AMR(r1)
25
26
/*
27
* If kuap feature is not enabled, do the mtspr
28
* only if AMR value is different.
29
*/
30
BEGIN_MMU_FTR_SECTION_NESTED(68)
31
mfspr \gpr2, SPRN_AMR
32
cmpd \gpr1, \gpr2
33
beq 99f
34
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUAP, 68)
35
36
isync
37
mtspr SPRN_AMR, \gpr1
38
99:
39
/*
40
* Restore IAMR only when returning to userspace
41
*/
42
ld \gpr1, STACK_REGS_IAMR(r1)
43
44
/*
45
* If kuep feature is not enabled, do the mtspr
46
* only if IAMR value is different.
47
*/
48
BEGIN_MMU_FTR_SECTION_NESTED(69)
49
mfspr \gpr2, SPRN_IAMR
50
cmpd \gpr1, \gpr2
51
beq 100f
52
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
53
54
isync
55
mtspr SPRN_IAMR, \gpr1
56
57
100: //skip_restore_amr
58
/* No isync required, see kuap_user_restore() */
59
#endif
60
.endm
61
62
.macro kuap_kernel_restore gpr1, gpr2
63
#if defined(CONFIG_PPC_PKEY)
64
65
BEGIN_MMU_FTR_SECTION_NESTED(67)
66
/*
67
* AMR is going to be mostly the same since we are
68
* returning to the kernel. Compare and do a mtspr.
69
*/
70
ld \gpr2, STACK_REGS_AMR(r1)
71
mfspr \gpr1, SPRN_AMR
72
cmpd \gpr1, \gpr2
73
beq 100f
74
isync
75
mtspr SPRN_AMR, \gpr2
76
/*
77
* No isync required, see kuap_restore_amr()
78
* No need to restore IAMR when returning to kernel space.
79
*/
80
100:
81
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)
82
#endif
83
.endm
84
85
#ifdef CONFIG_PPC_KUAP
86
.macro kuap_check_amr gpr1, gpr2
87
#ifdef CONFIG_PPC_KUAP_DEBUG
88
BEGIN_MMU_FTR_SECTION_NESTED(67)
89
mfspr \gpr1, SPRN_AMR
90
/* Prevent access to userspace using any key values */
91
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
92
999: tdne \gpr1, \gpr2
93
EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
94
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)
95
#endif
96
.endm
97
#endif
98
99
/*
100
* if (pkey) {
101
*
102
* save AMR -> stack;
103
* if (kuap) {
104
* if (AMR != BLOCKED)
105
* KUAP_BLOCKED -> AMR;
106
* }
107
* if (from_user) {
108
* save IAMR -> stack;
109
* if (kuep) {
110
* KUEP_BLOCKED ->IAMR
111
* }
112
* }
113
* return;
114
* }
115
*
116
* if (kuap) {
117
* if (from_kernel) {
118
* save AMR -> stack;
119
* if (AMR != BLOCKED)
120
* KUAP_BLOCKED -> AMR;
121
* }
122
*
123
* }
124
*/
125
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
126
#if defined(CONFIG_PPC_PKEY)
127
128
/*
129
* if both pkey and kuap is disabled, nothing to do
130
*/
131
BEGIN_MMU_FTR_SECTION_NESTED(68)
132
b 100f // skip_save_amr
133
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_KUAP, 68)
134
135
/*
136
* if pkey is disabled and we are entering from userspace
137
* don't do anything.
138
*/
139
BEGIN_MMU_FTR_SECTION_NESTED(67)
140
.ifnb \msr_pr_cr
141
/*
142
* Without pkey we are not changing AMR outside the kernel
143
* hence skip this completely.
144
*/
145
bne \msr_pr_cr, 100f // from userspace
146
.endif
147
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
148
149
/*
150
* pkey is enabled or pkey is disabled but entering from kernel
151
*/
152
mfspr \gpr1, SPRN_AMR
153
std \gpr1, STACK_REGS_AMR(r1)
154
155
/*
156
* update kernel AMR with AMR_KUAP_BLOCKED only
157
* if KUAP feature is enabled
158
*/
159
BEGIN_MMU_FTR_SECTION_NESTED(69)
160
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
161
cmpd \use_cr, \gpr1, \gpr2
162
beq \use_cr, 102f
163
/*
164
* We don't isync here because we very recently entered via an interrupt
165
*/
166
mtspr SPRN_AMR, \gpr2
167
isync
168
102:
169
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)
170
171
/*
172
* if entering from kernel we don't need save IAMR
173
*/
174
.ifnb \msr_pr_cr
175
beq \msr_pr_cr, 100f // from kernel space
176
mfspr \gpr1, SPRN_IAMR
177
std \gpr1, STACK_REGS_IAMR(r1)
178
179
/*
180
* update kernel IAMR with AMR_KUEP_BLOCKED only
181
* if KUEP feature is enabled
182
*/
183
BEGIN_MMU_FTR_SECTION_NESTED(70)
184
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
185
mtspr SPRN_IAMR, \gpr2
186
isync
187
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
188
.endif
189
190
100: // skip_save_amr
191
#endif
192
.endm
193
194
#else /* !__ASSEMBLY__ */
195
196
#include <linux/jump_label.h>
197
#include <linux/sched.h>
198
199
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
200
201
#ifdef CONFIG_PPC_PKEY
202
203
extern u64 __ro_after_init default_uamor;
204
extern u64 __ro_after_init default_amr;
205
extern u64 __ro_after_init default_iamr;
206
207
#include <asm/mmu.h>
208
#include <asm/ptrace.h>
209
210
/* usage of kthread_use_mm() should inherit the
211
* AMR value of the operating address space. But, the AMR value is
212
* thread-specific and we inherit the address space and not thread
213
* access restrictions. Because of this ignore AMR value when accessing
214
* userspace via kernel thread.
215
*/
216
static __always_inline u64 current_thread_amr(void)
217
{
218
if (current->thread.regs)
219
return current->thread.regs->amr;
220
return default_amr;
221
}
222
223
static __always_inline u64 current_thread_iamr(void)
224
{
225
if (current->thread.regs)
226
return current->thread.regs->iamr;
227
return default_iamr;
228
}
229
#endif /* CONFIG_PPC_PKEY */
230
231
#ifdef CONFIG_PPC_KUAP
232
233
static __always_inline void kuap_user_restore(struct pt_regs *regs)
234
{
235
bool restore_amr = false, restore_iamr = false;
236
unsigned long amr, iamr;
237
238
if (!mmu_has_feature(MMU_FTR_PKEY))
239
return;
240
241
if (!mmu_has_feature(MMU_FTR_KUAP)) {
242
amr = mfspr(SPRN_AMR);
243
if (amr != regs->amr)
244
restore_amr = true;
245
} else {
246
restore_amr = true;
247
}
248
249
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
250
iamr = mfspr(SPRN_IAMR);
251
if (iamr != regs->iamr)
252
restore_iamr = true;
253
} else {
254
restore_iamr = true;
255
}
256
257
258
if (restore_amr || restore_iamr) {
259
isync();
260
if (restore_amr)
261
mtspr(SPRN_AMR, regs->amr);
262
if (restore_iamr)
263
mtspr(SPRN_IAMR, regs->iamr);
264
}
265
/*
266
* No isync required here because we are about to rfi
267
* back to previous context before any user accesses
268
* would be made, which is a CSI.
269
*/
270
}
271
272
static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
273
{
274
if (likely(regs->amr == amr))
275
return;
276
277
isync();
278
mtspr(SPRN_AMR, regs->amr);
279
/*
280
* No isync required here because we are about to rfi
281
* back to previous context before any user accesses
282
* would be made, which is a CSI.
283
*
284
* No need to restore IAMR when returning to kernel space.
285
*/
286
}
287
288
static __always_inline unsigned long __kuap_get_and_assert_locked(void)
289
{
290
unsigned long amr = mfspr(SPRN_AMR);
291
292
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
293
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
294
return amr;
295
}
296
#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
297
298
/* __kuap_lock() not required, book3s/64 does that in ASM */
299
300
/*
301
* We support individually allowing read or write, but we don't support nesting
302
* because that would require an expensive read/modify write of the AMR.
303
*/
304
305
static __always_inline unsigned long get_kuap(void)
306
{
307
/*
308
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
309
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
310
* cause restore_user_access to do a flush.
311
*
312
* This has no effect in terms of actually blocking things on hash,
313
* so it doesn't break anything.
314
*/
315
if (!mmu_has_feature(MMU_FTR_KUAP))
316
return AMR_KUAP_BLOCKED;
317
318
return mfspr(SPRN_AMR);
319
}
320
321
static __always_inline void set_kuap(unsigned long value)
322
{
323
if (!mmu_has_feature(MMU_FTR_KUAP))
324
return;
325
326
/*
327
* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
328
* before and after the move to AMR. See table 6 on page 1134.
329
*/
330
isync();
331
mtspr(SPRN_AMR, value);
332
isync();
333
}
334
335
static __always_inline bool
336
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
337
{
338
/*
339
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
340
* For hash this will be a key fault (DSISR_KEYFAULT)
341
*/
342
/*
343
* We do have exception table entry, but accessing the
344
* userspace results in fault. This could be because we
345
* didn't unlock the AMR or access is denied by userspace
346
* using a key value that blocks access. We are only interested
347
* in catching the use case of accessing without unlocking
348
* the AMR. Hence check for BLOCK_WRITE/READ against AMR.
349
*/
350
if (is_write) {
351
return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
352
}
353
return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
354
}
355
356
static __always_inline void allow_user_access(void __user *to, const void __user *from,
357
unsigned long size, unsigned long dir)
358
{
359
unsigned long thread_amr = 0;
360
361
// This is written so we can resolve to a single case at build time
362
BUILD_BUG_ON(!__builtin_constant_p(dir));
363
364
if (mmu_has_feature(MMU_FTR_PKEY))
365
thread_amr = current_thread_amr();
366
367
if (dir == KUAP_READ)
368
set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
369
else if (dir == KUAP_WRITE)
370
set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
371
else if (dir == KUAP_READ_WRITE)
372
set_kuap(thread_amr);
373
else
374
BUILD_BUG();
375
}
376
377
#else /* CONFIG_PPC_KUAP */
378
379
static __always_inline unsigned long get_kuap(void)
380
{
381
return AMR_KUAP_BLOCKED;
382
}
383
384
static __always_inline void set_kuap(unsigned long value) { }
385
386
static __always_inline void allow_user_access(void __user *to, const void __user *from,
387
unsigned long size, unsigned long dir)
388
{ }
389
390
#endif /* !CONFIG_PPC_KUAP */
391
392
static __always_inline void prevent_user_access(unsigned long dir)
393
{
394
set_kuap(AMR_KUAP_BLOCKED);
395
if (static_branch_unlikely(&uaccess_flush_key))
396
do_uaccess_flush();
397
}
398
399
static __always_inline unsigned long prevent_user_access_return(void)
400
{
401
unsigned long flags = get_kuap();
402
403
set_kuap(AMR_KUAP_BLOCKED);
404
if (static_branch_unlikely(&uaccess_flush_key))
405
do_uaccess_flush();
406
407
return flags;
408
}
409
410
static __always_inline void restore_user_access(unsigned long flags)
411
{
412
set_kuap(flags);
413
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
414
do_uaccess_flush();
415
}
416
#endif /* __ASSEMBLY__ */
417
418
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
419
420