OSDN Git Service

97c2394e7deaf9545c83448df2b448e29c5d4e6c
[tomoyo/tomoyo-test1.git] / arch / powerpc / include / asm / book3s / 64 / kup-radix.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
3 #define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
4
5 #include <linux/const.h>
6 #include <asm/reg.h>
7
8 #define AMR_KUAP_BLOCK_READ     UL(0x4000000000000000)
9 #define AMR_KUAP_BLOCK_WRITE    UL(0x8000000000000000)
10 #define AMR_KUAP_BLOCKED        (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
11 #define AMR_KUAP_SHIFT          62
12
13 #ifdef __ASSEMBLY__
14
15 .macro kuap_restore_amr gpr1, gpr2
16 #ifdef CONFIG_PPC_KUAP
17         BEGIN_MMU_FTR_SECTION_NESTED(67)
18         mfspr   \gpr1, SPRN_AMR
19         ld      \gpr2, STACK_REGS_KUAP(r1)
20         cmpd    \gpr1, \gpr2
21         beq     998f
22         isync
23         mtspr   SPRN_AMR, \gpr2
24         /* No isync required, see kuap_restore_amr() */
25 998:
26         END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
27 #endif
28 .endm
29
30 .macro kuap_check_amr gpr1, gpr2
31 #ifdef CONFIG_PPC_KUAP_DEBUG
32         BEGIN_MMU_FTR_SECTION_NESTED(67)
33         mfspr   \gpr1, SPRN_AMR
34         li      \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
35         sldi    \gpr2, \gpr2, AMR_KUAP_SHIFT
36 999:    tdne    \gpr1, \gpr2
37         EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
38         END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
39 #endif
40 .endm
41
42 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
43 #ifdef CONFIG_PPC_KUAP
44         BEGIN_MMU_FTR_SECTION_NESTED(67)
45         .ifnb \msr_pr_cr
46         bne     \msr_pr_cr, 99f
47         .endif
48         mfspr   \gpr1, SPRN_AMR
49         std     \gpr1, STACK_REGS_KUAP(r1)
50         li      \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
51         sldi    \gpr2, \gpr2, AMR_KUAP_SHIFT
52         cmpd    \use_cr, \gpr1, \gpr2
53         beq     \use_cr, 99f
54         // We don't isync here because we very recently entered via rfid
55         mtspr   SPRN_AMR, \gpr2
56         isync
57 99:
58         END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
59 #endif
60 .endm
61
62 #else /* !__ASSEMBLY__ */
63
64 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
65
66 #ifdef CONFIG_PPC_KUAP
67
68 #include <asm/mmu.h>
69 #include <asm/ptrace.h>
70
71 static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
72 {
73         if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
74                 isync();
75                 mtspr(SPRN_AMR, regs->kuap);
76                 /*
77                  * No isync required here because we are about to RFI back to
78                  * previous context before any user accesses would be made,
79                  * which is a CSI.
80                  */
81         }
82 }
83
84 static inline unsigned long kuap_get_and_check_amr(void)
85 {
86         if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
87                 unsigned long amr = mfspr(SPRN_AMR);
88                 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
89                         WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
90                 return amr;
91         }
92         return 0;
93 }
94
95 static inline void kuap_check_amr(void)
96 {
97         if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
98                 WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
99 }
100
101 /*
102  * We support individually allowing read or write, but we don't support nesting
103  * because that would require an expensive read/modify write of the AMR.
104  */
105
106 static inline unsigned long get_kuap(void)
107 {
108         /*
109          * We return AMR_KUAP_BLOCKED when we don't support KUAP because
110          * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
111          * cause restore_user_access to do a flush.
112          *
113          * This has no effect in terms of actually blocking things on hash,
114          * so it doesn't break anything.
115          */
116         if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
117                 return AMR_KUAP_BLOCKED;
118
119         return mfspr(SPRN_AMR);
120 }
121
122 static inline void set_kuap(unsigned long value)
123 {
124         if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
125                 return;
126
127         /*
128          * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
129          * before and after the move to AMR. See table 6 on page 1134.
130          */
131         isync();
132         mtspr(SPRN_AMR, value);
133         isync();
134 }
135
136 static inline bool
137 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
138 {
139         return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
140                     (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
141                     "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
142 }
143 #else /* CONFIG_PPC_KUAP */
144 static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
145
146 static inline unsigned long kuap_get_and_check_amr(void)
147 {
148         return 0UL;
149 }
150
151 static inline void kuap_check_amr(void) { }
152
153 static inline unsigned long get_kuap(void)
154 {
155         return AMR_KUAP_BLOCKED;
156 }
157
158 static inline void set_kuap(unsigned long value) { }
159 #endif /* !CONFIG_PPC_KUAP */
160
161 static __always_inline void allow_user_access(void __user *to, const void __user *from,
162                                               unsigned long size, unsigned long dir)
163 {
164         // This is written so we can resolve to a single case at build time
165         BUILD_BUG_ON(!__builtin_constant_p(dir));
166         if (dir == KUAP_READ)
167                 set_kuap(AMR_KUAP_BLOCK_WRITE);
168         else if (dir == KUAP_WRITE)
169                 set_kuap(AMR_KUAP_BLOCK_READ);
170         else if (dir == KUAP_READ_WRITE)
171                 set_kuap(0);
172         else
173                 BUILD_BUG();
174 }
175
176 static inline void prevent_user_access(void __user *to, const void __user *from,
177                                        unsigned long size, unsigned long dir)
178 {
179         set_kuap(AMR_KUAP_BLOCKED);
180         if (static_branch_unlikely(&uaccess_flush_key))
181                 do_uaccess_flush();
182 }
183
184 static inline unsigned long prevent_user_access_return(void)
185 {
186         unsigned long flags = get_kuap();
187
188         set_kuap(AMR_KUAP_BLOCKED);
189         if (static_branch_unlikely(&uaccess_flush_key))
190                 do_uaccess_flush();
191
192         return flags;
193 }
194
195 static inline void restore_user_access(unsigned long flags)
196 {
197         set_kuap(flags);
198         if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
199                 do_uaccess_flush();
200 }
201 #endif /* __ASSEMBLY__ */
202
203 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */