Path: blob/master/arch/powerpc/include/asm/book3s/64/kup.h
26519 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H2#define _ASM_POWERPC_BOOK3S_64_KUP_H34#include <linux/const.h>5#include <asm/reg.h>67#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)8#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)9#define AMR_KUEP_BLOCKED UL(0x5455555555555555)10#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)1112#ifdef __ASSEMBLY__1314.macro kuap_user_restore gpr1, gpr215#if defined(CONFIG_PPC_PKEY)16BEGIN_MMU_FTR_SECTION_NESTED(67)17b 100f // skip_restore_amr18END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)19/*20* AMR and IAMR are going to be different when21* returning to userspace.22*/23ld \gpr1, STACK_REGS_AMR(r1)2425/*26* If kuap feature is not enabled, do the mtspr27* only if AMR value is different.28*/29BEGIN_MMU_FTR_SECTION_NESTED(68)30mfspr \gpr2, SPRN_AMR31cmpd \gpr1, \gpr232beq 99f33END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUAP, 68)3435isync36mtspr SPRN_AMR, \gpr13799:38/*39* Restore IAMR only when returning to userspace40*/41ld \gpr1, STACK_REGS_IAMR(r1)4243/*44* If kuep feature is not enabled, do the mtspr45* only if IAMR value is different.46*/47BEGIN_MMU_FTR_SECTION_NESTED(69)48mfspr \gpr2, SPRN_IAMR49cmpd \gpr1, \gpr250beq 100f51END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)5253isync54mtspr SPRN_IAMR, \gpr15556100: //skip_restore_amr57/* No isync required, see kuap_user_restore() */58#endif59.endm6061.macro kuap_kernel_restore gpr1, gpr262#if defined(CONFIG_PPC_PKEY)6364BEGIN_MMU_FTR_SECTION_NESTED(67)65/*66* AMR is going to be mostly the same since we are67* returning to the kernel. Compare and do a mtspr.68*/69ld \gpr2, STACK_REGS_AMR(r1)70mfspr \gpr1, SPRN_AMR71cmpd \gpr1, \gpr272beq 100f73isync74mtspr SPRN_AMR, \gpr275/*76* No isync required, see kuap_restore_amr()77* No need to restore IAMR when returning to kernel space.78*/79100:80END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)81#endif82.endm8384#ifdef CONFIG_PPC_KUAP85.macro kuap_check_amr gpr1, gpr286#ifdef CONFIG_PPC_KUAP_DEBUG87BEGIN_MMU_FTR_SECTION_NESTED(67)88mfspr \gpr1, SPRN_AMR89/* Prevent access to userspace using any key values */90LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)91999: tdne \gpr1, \gpr292EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)93END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)94#endif95.endm96#endif9798/*99* if (pkey) {100*101* save AMR -> stack;102* if (kuap) {103* if (AMR != BLOCKED)104* KUAP_BLOCKED -> AMR;105* }106* if (from_user) {107* save IAMR -> stack;108* if (kuep) {109* KUEP_BLOCKED ->IAMR110* }111* }112* return;113* }114*115* if (kuap) {116* if (from_kernel) {117* save AMR -> stack;118* if (AMR != BLOCKED)119* KUAP_BLOCKED -> AMR;120* }121*122* }123*/124.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr125#if defined(CONFIG_PPC_PKEY)126127/*128* if both pkey and kuap is disabled, nothing to do129*/130BEGIN_MMU_FTR_SECTION_NESTED(68)131b 100f // skip_save_amr132END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_KUAP, 68)133134/*135* if pkey is disabled and we are entering from userspace136* don't do anything.137*/138BEGIN_MMU_FTR_SECTION_NESTED(67)139.ifnb \msr_pr_cr140/*141* Without pkey we are not changing AMR outside the kernel142* hence skip this completely.143*/144bne \msr_pr_cr, 100f // from userspace145.endif146END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)147148/*149* pkey is enabled or pkey is disabled but entering from kernel150*/151mfspr \gpr1, SPRN_AMR152std \gpr1, STACK_REGS_AMR(r1)153154/*155* update kernel AMR with AMR_KUAP_BLOCKED only156* if KUAP feature is enabled157*/158BEGIN_MMU_FTR_SECTION_NESTED(69)159LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)160cmpd \use_cr, \gpr1, \gpr2161beq \use_cr, 102f162/*163* We don't isync here because we very recently entered via an interrupt164*/165mtspr SPRN_AMR, \gpr2166isync167102:168END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)169170/*171* if entering from kernel we don't need save IAMR172*/173.ifnb \msr_pr_cr174beq \msr_pr_cr, 100f // from kernel space175mfspr \gpr1, SPRN_IAMR176std \gpr1, STACK_REGS_IAMR(r1)177178/*179* update kernel IAMR with AMR_KUEP_BLOCKED only180* if KUEP feature is enabled181*/182BEGIN_MMU_FTR_SECTION_NESTED(70)183LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)184mtspr SPRN_IAMR, \gpr2185isync186END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)187.endif188189100: // skip_save_amr190#endif191.endm192193#else /* !__ASSEMBLY__ */194195#include <linux/jump_label.h>196#include <linux/sched.h>197198DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);199200#ifdef CONFIG_PPC_PKEY201202extern u64 __ro_after_init default_uamor;203extern u64 __ro_after_init default_amr;204extern u64 __ro_after_init default_iamr;205206#include <asm/mmu.h>207#include <asm/ptrace.h>208209/* usage of kthread_use_mm() should inherit the210* AMR value of the operating address space. But, the AMR value is211* thread-specific and we inherit the address space and not thread212* access restrictions. Because of this ignore AMR value when accessing213* userspace via kernel thread.214*/215static __always_inline u64 current_thread_amr(void)216{217if (current->thread.regs)218return current->thread.regs->amr;219return default_amr;220}221222static __always_inline u64 current_thread_iamr(void)223{224if (current->thread.regs)225return current->thread.regs->iamr;226return default_iamr;227}228#endif /* CONFIG_PPC_PKEY */229230#ifdef CONFIG_PPC_KUAP231232static __always_inline void kuap_user_restore(struct pt_regs *regs)233{234bool restore_amr = false, restore_iamr = false;235unsigned long amr, iamr;236237if (!mmu_has_feature(MMU_FTR_PKEY))238return;239240if (!mmu_has_feature(MMU_FTR_KUAP)) {241amr = mfspr(SPRN_AMR);242if (amr != regs->amr)243restore_amr = true;244} else {245restore_amr = true;246}247248if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {249iamr = mfspr(SPRN_IAMR);250if (iamr != regs->iamr)251restore_iamr = true;252} else {253restore_iamr = true;254}255256257if (restore_amr || restore_iamr) {258isync();259if (restore_amr)260mtspr(SPRN_AMR, regs->amr);261if (restore_iamr)262mtspr(SPRN_IAMR, regs->iamr);263}264/*265* No isync required here because we are about to rfi266* back to previous context before any user accesses267* would be made, which is a CSI.268*/269}270271static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)272{273if (likely(regs->amr == amr))274return;275276isync();277mtspr(SPRN_AMR, regs->amr);278/*279* No isync required here because we are about to rfi280* back to previous context before any user accesses281* would be made, which is a CSI.282*283* No need to restore IAMR when returning to kernel space.284*/285}286287static __always_inline unsigned long __kuap_get_and_assert_locked(void)288{289unsigned long amr = mfspr(SPRN_AMR);290291if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */292WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);293return amr;294}295#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked296297/* __kuap_lock() not required, book3s/64 does that in ASM */298299/*300* We support individually allowing read or write, but we don't support nesting301* because that would require an expensive read/modify write of the AMR.302*/303304static __always_inline unsigned long get_kuap(void)305{306/*307* We return AMR_KUAP_BLOCKED when we don't support KUAP because308* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to309* cause restore_user_access to do a flush.310*311* This has no effect in terms of actually blocking things on hash,312* so it doesn't break anything.313*/314if (!mmu_has_feature(MMU_FTR_KUAP))315return AMR_KUAP_BLOCKED;316317return mfspr(SPRN_AMR);318}319320static __always_inline void set_kuap(unsigned long value)321{322if (!mmu_has_feature(MMU_FTR_KUAP))323return;324325/*326* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both327* before and after the move to AMR. See table 6 on page 1134.328*/329isync();330mtspr(SPRN_AMR, value);331isync();332}333334static __always_inline bool335__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)336{337/*338* For radix this will be a storage protection fault (DSISR_PROTFAULT).339* For hash this will be a key fault (DSISR_KEYFAULT)340*/341/*342* We do have exception table entry, but accessing the343* userspace results in fault. This could be because we344* didn't unlock the AMR or access is denied by userspace345* using a key value that blocks access. We are only interested346* in catching the use case of accessing without unlocking347* the AMR. Hence check for BLOCK_WRITE/READ against AMR.348*/349if (is_write) {350return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;351}352return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;353}354355static __always_inline void allow_user_access(void __user *to, const void __user *from,356unsigned long size, unsigned long dir)357{358unsigned long thread_amr = 0;359360// This is written so we can resolve to a single case at build time361BUILD_BUG_ON(!__builtin_constant_p(dir));362363if (mmu_has_feature(MMU_FTR_PKEY))364thread_amr = current_thread_amr();365366if (dir == KUAP_READ)367set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);368else if (dir == KUAP_WRITE)369set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);370else if (dir == KUAP_READ_WRITE)371set_kuap(thread_amr);372else373BUILD_BUG();374}375376#else /* CONFIG_PPC_KUAP */377378static __always_inline unsigned long get_kuap(void)379{380return AMR_KUAP_BLOCKED;381}382383static __always_inline void set_kuap(unsigned long value) { }384385static __always_inline void allow_user_access(void __user *to, const void __user *from,386unsigned long size, unsigned long dir)387{ }388389#endif /* !CONFIG_PPC_KUAP */390391static __always_inline void prevent_user_access(unsigned long dir)392{393set_kuap(AMR_KUAP_BLOCKED);394if (static_branch_unlikely(&uaccess_flush_key))395do_uaccess_flush();396}397398static __always_inline unsigned long prevent_user_access_return(void)399{400unsigned long flags = get_kuap();401402set_kuap(AMR_KUAP_BLOCKED);403if (static_branch_unlikely(&uaccess_flush_key))404do_uaccess_flush();405406return flags;407}408409static __always_inline void restore_user_access(unsigned long flags)410{411set_kuap(flags);412if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)413do_uaccess_flush();414}415#endif /* __ASSEMBLY__ */416417#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */418419420