1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
3 #define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
5 #include <linux/const.h>
8 #define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
9 #define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
10 #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
11 #define AMR_KUAP_SHIFT 62
15 .macro kuap_restore_amr gpr1, gpr2
16 #ifdef CONFIG_PPC_KUAP
17 BEGIN_MMU_FTR_SECTION_NESTED(67)
19 ld \gpr2, STACK_REGS_KUAP(r1)
24 /* No isync required, see kuap_restore_amr() */
26 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
30 .macro kuap_check_amr gpr1, gpr2
31 #ifdef CONFIG_PPC_KUAP_DEBUG
32 BEGIN_MMU_FTR_SECTION_NESTED(67)
34 li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
35 sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
36 999: tdne \gpr1, \gpr2
37 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
38 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
42 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
43 #ifdef CONFIG_PPC_KUAP
44 BEGIN_MMU_FTR_SECTION_NESTED(67)
49 std \gpr1, STACK_REGS_KUAP(r1)
50 li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
51 sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
52 cmpd \use_cr, \gpr1, \gpr2
54 // We don't isync here because we very recently entered via rfid
58 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
62 #else /* !__ASSEMBLY__ */
64 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
66 #ifdef CONFIG_PPC_KUAP
69 #include <asm/ptrace.h>
71 static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
73 if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
75 mtspr(SPRN_AMR, regs->kuap);
77 * No isync required here because we are about to RFI back to
78 * previous context before any user accesses would be made,
84 static inline unsigned long kuap_get_and_check_amr(void)
86 if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
87 unsigned long amr = mfspr(SPRN_AMR);
88 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
89 WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
95 static inline void kuap_check_amr(void)
97 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
98 WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
102 * We support individually allowing read or write, but we don't support nesting
103 * because that would require an expensive read/modify write of the AMR.
106 static inline unsigned long get_kuap(void)
109 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
110 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
111 * cause restore_user_access to do a flush.
113 * This has no effect in terms of actually blocking things on hash,
114 * so it doesn't break anything.
116 if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
117 return AMR_KUAP_BLOCKED;
119 return mfspr(SPRN_AMR);
122 static inline void set_kuap(unsigned long value)
124 if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
128 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
129 * before and after the move to AMR. See table 6 on page 1134.
132 mtspr(SPRN_AMR, value);
137 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
139 return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
140 (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
141 "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
143 #else /* CONFIG_PPC_KUAP */
144 static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
146 static inline unsigned long kuap_get_and_check_amr(void)
151 static inline void kuap_check_amr(void) { }
153 static inline unsigned long get_kuap(void)
155 return AMR_KUAP_BLOCKED;
158 static inline void set_kuap(unsigned long value) { }
159 #endif /* !CONFIG_PPC_KUAP */
161 static __always_inline void allow_user_access(void __user *to, const void __user *from,
162 unsigned long size, unsigned long dir)
164 // This is written so we can resolve to a single case at build time
165 BUILD_BUG_ON(!__builtin_constant_p(dir));
166 if (dir == KUAP_READ)
167 set_kuap(AMR_KUAP_BLOCK_WRITE);
168 else if (dir == KUAP_WRITE)
169 set_kuap(AMR_KUAP_BLOCK_READ);
170 else if (dir == KUAP_READ_WRITE)
176 static inline void prevent_user_access(void __user *to, const void __user *from,
177 unsigned long size, unsigned long dir)
179 set_kuap(AMR_KUAP_BLOCKED);
180 if (static_branch_unlikely(&uaccess_flush_key))
184 static inline unsigned long prevent_user_access_return(void)
186 unsigned long flags = get_kuap();
188 set_kuap(AMR_KUAP_BLOCKED);
189 if (static_branch_unlikely(&uaccess_flush_key))
195 static inline void restore_user_access(unsigned long flags)
198 if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
201 #endif /* __ASSEMBLY__ */
203 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */