1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
6 #include <asm/book3s/32/mmu-hash.h>
12 #include <linux/jump_label.h>
14 extern struct static_key_false disable_kuap_key;
15 extern struct static_key_false disable_kuep_key;
17 static __always_inline bool kuap_is_disabled(void)
19 return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
22 static __always_inline bool kuep_is_disabled(void)
24 return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
27 static inline void kuep_lock(void)
29 if (kuep_is_disabled())
32 update_user_segments(mfsr(0) | SR_NX);
34 * This isync() shouldn't be necessary as the kernel is not excepted to
35 * run any instruction in userspace soon after the update of segments,
36 * but hash based cores (at least G3) seem to exhibit a random
37 * behaviour when the 'isync' is not there. 603 cores don't have this
38 * behaviour so don't do the 'isync' as it saves several CPU cycles.
40 if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
41 isync(); /* Context sync required after mtsr() */
44 static inline void kuep_unlock(void)
46 if (kuep_is_disabled())
49 update_user_segments(mfsr(0) & ~SR_NX);
51 * This isync() shouldn't be necessary as a 'rfi' will soon be executed
52 * to return to userspace, but hash based cores (at least G3) seem to
53 * exhibit a random behaviour when the 'isync' is not there. 603 cores
54 * don't have this behaviour so don't do the 'isync' as it saves several
57 if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
58 isync(); /* Context sync required after mtsr() */
61 #ifdef CONFIG_PPC_KUAP
63 #include <linux/sched.h>
65 #define KUAP_NONE (~0UL)
66 #define KUAP_ALL (~1UL)
68 static inline void kuap_lock_one(unsigned long addr)
70 mtsr(mfsr(addr) | SR_KS, addr);
71 isync(); /* Context sync required after mtsr() */
74 static inline void kuap_unlock_one(unsigned long addr)
76 mtsr(mfsr(addr) & ~SR_KS, addr);
77 isync(); /* Context sync required after mtsr() */
80 static inline void kuap_lock_all(void)
82 update_user_segments(mfsr(0) | SR_KS);
83 isync(); /* Context sync required after mtsr() */
86 static inline void kuap_unlock_all(void)
88 update_user_segments(mfsr(0) & ~SR_KS);
89 isync(); /* Context sync required after mtsr() */
92 void kuap_lock_all_ool(void);
93 void kuap_unlock_all_ool(void);
95 static inline void kuap_lock(unsigned long addr, bool ool)
97 if (likely(addr != KUAP_ALL))
105 static inline void kuap_unlock(unsigned long addr, bool ool)
107 if (likely(addr != KUAP_ALL))
108 kuap_unlock_one(addr);
112 kuap_unlock_all_ool();
115 static inline void kuap_save_and_lock(struct pt_regs *regs)
117 unsigned long kuap = current->thread.kuap;
119 if (kuap_is_disabled())
123 if (unlikely(kuap == KUAP_NONE))
126 current->thread.kuap = KUAP_NONE;
127 kuap_lock(kuap, false);
130 static inline void kuap_user_restore(struct pt_regs *regs)
134 static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
136 if (kuap_is_disabled())
139 current->thread.kuap = regs->kuap;
141 kuap_unlock(regs->kuap, false);
144 static inline unsigned long kuap_get_and_assert_locked(void)
146 unsigned long kuap = current->thread.kuap;
148 if (kuap_is_disabled())
151 WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
156 static inline void kuap_assert_locked(void)
158 kuap_get_and_assert_locked();
161 static __always_inline void allow_user_access(void __user *to, const void __user *from,
162 u32 size, unsigned long dir)
164 if (kuap_is_disabled())
167 BUILD_BUG_ON(!__builtin_constant_p(dir));
169 if (!(dir & KUAP_WRITE))
172 current->thread.kuap = (__force u32)to;
173 kuap_unlock_one((__force u32)to);
176 static __always_inline void prevent_user_access(unsigned long dir)
178 u32 kuap = current->thread.kuap;
180 if (kuap_is_disabled())
183 BUILD_BUG_ON(!__builtin_constant_p(dir));
185 if (!(dir & KUAP_WRITE))
188 current->thread.kuap = KUAP_NONE;
189 kuap_lock(kuap, true);
192 static inline unsigned long prevent_user_access_return(void)
194 unsigned long flags = current->thread.kuap;
196 if (kuap_is_disabled())
199 if (flags != KUAP_NONE) {
200 current->thread.kuap = KUAP_NONE;
201 kuap_lock(flags, true);
207 static inline void restore_user_access(unsigned long flags)
209 if (kuap_is_disabled())
212 if (flags != KUAP_NONE) {
213 current->thread.kuap = flags;
214 kuap_unlock(flags, true);
219 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
221 unsigned long kuap = regs->kuap;
223 if (kuap_is_disabled())
226 if (!is_write || kuap == KUAP_ALL)
228 if (kuap == KUAP_NONE)
231 /* If faulting address doesn't match unlocked segment, unlock all */
232 if ((kuap ^ address) & 0xf0000000)
233 regs->kuap = KUAP_ALL;
238 #endif /* CONFIG_PPC_KUAP */
240 #endif /* __ASSEMBLY__ */
242 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */