1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16 #include <linux/kprobes.h>
17 #include <linux/kfence.h>
18 #include <linux/entry-common.h>
20 #include <asm/ptrace.h>
21 #include <asm/tlbflush.h>
23 #include "../kernel/head.h"
25 static void die_kernel_fault(const char *msg, unsigned long addr,
30 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
35 make_task_dead(SIGKILL);
38 static inline void no_context(struct pt_regs *regs, unsigned long addr)
42 /* Are we prepared to handle this kernel fault? */
43 if (fixup_exception(regs))
47 * Oops. The kernel tried to access some bad page. We'll have to
48 * terminate things with extreme prejudice.
51 msg = "NULL pointer dereference";
53 if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
56 msg = "paging request";
59 die_kernel_fault(msg, addr, regs);
62 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
64 if (fault & VM_FAULT_OOM) {
66 * We ran out of memory, call the OOM killer, and return the userspace
67 * (which will retry the fault, or kill us if we got oom-killed).
69 if (!user_mode(regs)) {
70 no_context(regs, addr);
73 pagefault_out_of_memory();
75 } else if (fault & VM_FAULT_SIGBUS) {
76 /* Kernel mode? Handle exceptions or die */
77 if (!user_mode(regs)) {
78 no_context(regs, addr);
81 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
87 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
90 * Something tried to access memory that isn't in our memory map.
91 * Fix it, but check if it's kernel or user first.
94 /* User mode accesses just cause a SIGSEGV */
95 if (user_mode(regs)) {
96 do_trap(regs, SIGSEGV, code, addr);
100 no_context(regs, addr);
103 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
113 /* User mode accesses just cause a SIGSEGV */
115 return do_trap(regs, SIGSEGV, code, addr);
118 * Synchronize this task's top level page-table
119 * with the 'reference' page table.
121 * Do _not_ use "tsk->active_mm->pgd" here.
122 * We might be inside an interrupt in the middle
125 index = pgd_index(addr);
126 pfn = csr_read(CSR_SATP) & SATP_PPN;
127 pgd = (pgd_t *)pfn_to_virt(pfn) + index;
128 pgd_k = init_mm.pgd + index;
130 if (!pgd_present(*pgd_k)) {
131 no_context(regs, addr);
134 set_pgd(pgd, *pgd_k);
136 p4d_k = p4d_offset(pgd_k, addr);
137 if (!p4d_present(*p4d_k)) {
138 no_context(regs, addr);
142 pud_k = pud_offset(p4d_k, addr);
143 if (!pud_present(*pud_k)) {
144 no_context(regs, addr);
147 if (pud_leaf(*pud_k))
151 * Since the vmalloc area is global, it is unnecessary
152 * to copy individual PTEs
154 pmd_k = pmd_offset(pud_k, addr);
155 if (!pmd_present(*pmd_k)) {
156 no_context(regs, addr);
159 if (pmd_leaf(*pmd_k))
163 * Make sure the actual PTE exists as well to
164 * catch kernel vmalloc-area accesses to non-mapped
165 * addresses. If we don't do this, this will just
166 * silently loop forever.
168 pte_k = pte_offset_kernel(pmd_k, addr);
169 if (!pte_present(*pte_k)) {
170 no_context(regs, addr);
175 * The kernel assumes that TLBs don't cache invalid
176 * entries, but in RISC-V, SFENCE.VMA specifies an
177 * ordering constraint, not a cache flush; it is
178 * necessary even after writing invalid entries.
181 local_flush_tlb_page(addr);
184 static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
187 case EXC_INST_PAGE_FAULT:
188 if (!(vma->vm_flags & VM_EXEC)) {
192 case EXC_LOAD_PAGE_FAULT:
193 /* Write implies read */
194 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
198 case EXC_STORE_PAGE_FAULT:
199 if (!(vma->vm_flags & VM_WRITE)) {
204 panic("%s: unhandled cause %lu", __func__, cause);
210 * This routine handles page faults. It determines the address and the
211 * problem, and then passes it off to one of the appropriate routines.
213 void handle_page_fault(struct pt_regs *regs)
215 struct task_struct *tsk;
216 struct vm_area_struct *vma;
217 struct mm_struct *mm;
218 unsigned long addr, cause;
219 unsigned int flags = FAULT_FLAG_DEFAULT;
220 int code = SEGV_MAPERR;
224 addr = regs->badaddr;
229 if (kprobe_page_fault(regs, cause))
233 * Fault-in kernel-space virtual memory on-demand.
234 * The 'reference' page table is init_mm.pgd.
236 * NOTE! We MUST NOT take any locks for this case. We may
237 * be in an interrupt or a critical region, and should
238 * only copy the information from the master page table,
241 if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) {
242 vmalloc_fault(regs, code, addr);
248 * Modules in 64bit kernels lie in their own virtual region which is not
249 * in the vmalloc region, but dealing with page faults in this region
250 * or the vmalloc region amounts to doing the same thing: checking that
251 * the mapping exists in init_mm.pgd and updating user page table, so
252 * just use vmalloc_fault.
254 if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
255 vmalloc_fault(regs, code, addr);
259 /* Enable interrupts if they were enabled in the parent context. */
260 if (!regs_irqs_disabled(regs))
264 * If we're in an interrupt, have no user context, or are running
265 * in an atomic region, then we must not take the fault.
267 if (unlikely(faulthandler_disabled() || !mm)) {
268 tsk->thread.bad_cause = cause;
269 no_context(regs, addr);
274 flags |= FAULT_FLAG_USER;
276 if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
277 if (fixup_exception(regs))
280 die_kernel_fault("access to user memory without uaccess routines", addr, regs);
283 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
285 if (cause == EXC_STORE_PAGE_FAULT)
286 flags |= FAULT_FLAG_WRITE;
287 else if (cause == EXC_INST_PAGE_FAULT)
288 flags |= FAULT_FLAG_INSTRUCTION;
291 vma = find_vma(mm, addr);
292 if (unlikely(!vma)) {
293 tsk->thread.bad_cause = cause;
294 bad_area(regs, mm, code, addr);
297 if (likely(vma->vm_start <= addr))
299 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
300 tsk->thread.bad_cause = cause;
301 bad_area(regs, mm, code, addr);
304 if (unlikely(expand_stack(vma, addr))) {
305 tsk->thread.bad_cause = cause;
306 bad_area(regs, mm, code, addr);
311 * Ok, we have a good vm_area for this memory access, so
317 if (unlikely(access_error(cause, vma))) {
318 tsk->thread.bad_cause = cause;
319 bad_area(regs, mm, code, addr);
324 * If for any reason at all we could not handle the fault,
325 * make sure we exit gracefully rather than endlessly redo
328 fault = handle_mm_fault(vma, addr, flags, regs);
331 * If we need to retry but a fatal signal is pending, handle the
332 * signal first. We do not need to release the mmap_lock because it
333 * would already be released in __lock_page_or_retry in mm/filemap.c.
335 if (fault_signal_pending(fault, regs)) {
336 if (!user_mode(regs))
337 no_context(regs, addr);
341 /* The fault is fully completed (including releasing mmap lock) */
342 if (fault & VM_FAULT_COMPLETED)
345 if (unlikely(fault & VM_FAULT_RETRY)) {
346 flags |= FAULT_FLAG_TRIED;
349 * No need to mmap_read_unlock(mm) as we would
350 * have already released it in __lock_page_or_retry
356 mmap_read_unlock(mm);
358 if (unlikely(fault & VM_FAULT_ERROR)) {
359 tsk->thread.bad_cause = cause;
360 mm_fault_error(regs, addr, fault);