1 // SPDX-License-Identifier: GPL-2.0
3 * MMU fault handling support.
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <linux/sched/signal.h>
9 #include <linux/kernel.h>
11 #include <linux/extable.h>
12 #include <linux/interrupt.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <linux/prefetch.h>
16 #include <linux/uaccess.h>
17 #include <linux/perf_event.h>
19 #include <asm/processor.h>
20 #include <asm/exception.h>
22 extern int die(char *, struct pt_regs *, long);
25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
26 * (inside region 5, on ia64) and that page is present.
29 mapped_kernel_page_is_present (unsigned long address)
37 pgd = pgd_offset_k(address);
38 if (pgd_none(*pgd) || pgd_bad(*pgd))
41 p4d = p4d_offset(pgd, address);
42 if (p4d_none(*p4d) || p4d_bad(*p4d))
45 pud = pud_offset(p4d, address);
46 if (pud_none(*pud) || pud_bad(*pud))
49 pmd = pmd_offset(pud, address);
50 if (pmd_none(*pmd) || pmd_bad(*pmd))
53 ptep = pte_offset_kernel(pmd, address);
58 return pte_present(pte);
61 # define VM_READ_BIT 0
62 # define VM_WRITE_BIT 1
63 # define VM_EXEC_BIT 2
66 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
68 int signal = SIGSEGV, code = SEGV_MAPERR;
69 struct vm_area_struct *vma, *prev_vma;
70 struct mm_struct *mm = current->mm;
73 unsigned int flags = FAULT_FLAG_DEFAULT;
75 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
76 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
78 /* mmap_lock is performance critical.... */
79 prefetchw(&mm->mmap_lock);
82 * If we're in an interrupt or have no user context, we must not take the fault..
84 if (faulthandler_disabled() || !mm)
88 * This is to handle the kprobes on user space access instructions
90 if (kprobe_page_fault(regs, TRAP_BRKPT))
94 flags |= FAULT_FLAG_USER;
96 flags |= FAULT_FLAG_WRITE;
98 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
102 vma = find_vma_prev(mm, address, &prev_vma);
103 if (!vma && !prev_vma )
107 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
109 * May find no vma, but could be that the last vm area is the
110 * register backing store that needs to expand upwards, in
111 * this case vma will be null, but prev_vma will ne non-null
113 if (( !vma && prev_vma ) || (address < vma->vm_start) )
114 goto check_expansion;
119 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
121 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
122 || (1 << VM_EXEC_BIT) != VM_EXEC)
123 # error File is out of sync with <linux/mm.h>. Please update.
126 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
129 if ((vma->vm_flags & mask) != mask)
133 * If for any reason at all we couldn't handle the fault, make
134 * sure we exit gracefully rather than endlessly redo the
137 fault = handle_mm_fault(vma, address, flags, regs);
139 if (fault_signal_pending(fault, regs))
142 if (unlikely(fault & VM_FAULT_ERROR)) {
144 * We ran out of memory, or some other thing happened
145 * to us that made us unable to handle the page fault
148 if (fault & VM_FAULT_OOM) {
150 } else if (fault & VM_FAULT_SIGSEGV) {
152 } else if (fault & VM_FAULT_SIGBUS) {
159 if (flags & FAULT_FLAG_ALLOW_RETRY) {
160 if (fault & VM_FAULT_RETRY) {
161 flags |= FAULT_FLAG_TRIED;
163 /* No need to mmap_read_unlock(mm) as we would
164 * have already released it in __lock_page_or_retry
172 mmap_read_unlock(mm);
176 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
179 if (!(vma->vm_flags & VM_GROWSDOWN))
181 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
182 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
184 if (expand_stack(vma, address))
188 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
189 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
192 * Since the register backing store is accessed sequentially,
193 * we disallow growing it by more than a page at a time.
195 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
197 if (expand_upwards(vma, address))
203 mmap_read_unlock(mm);
204 if ((isr & IA64_ISR_SP)
205 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
208 * This fault was due to a speculative load or lfetch.fault, set the "ed"
209 * bit in the psr to ensure forward progress. (Target register will get a
210 * NaT for ld.s, lfetch will be canceled.)
212 ia64_psr(regs)->ed = 1;
215 if (user_mode(regs)) {
216 force_sig_fault(signal, code, (void __user *) address,
217 0, __ISR_VALID, isr);
222 if ((isr & IA64_ISR_SP)
223 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
226 * This fault was due to a speculative load or lfetch.fault, set the "ed"
227 * bit in the psr to ensure forward progress. (Target register will get a
228 * NaT for ld.s, lfetch will be canceled.)
230 ia64_psr(regs)->ed = 1;
235 * Since we have no vma's for region 5, we might get here even if the address is
236 * valid, due to the VHPT walker inserting a non present translation that becomes
237 * stale. If that happens, the non present fault handler already purged the stale
238 * translation, which fixed the problem. So, we check to see if the translation is
239 * valid, and return if it is.
241 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
244 if (ia64_done_with_exception(regs))
248 * Oops. The kernel tried to access some bad page. We'll have to terminate things
249 * with extreme prejudice.
253 if (address < PAGE_SIZE)
254 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
256 printk(KERN_ALERT "Unable to handle kernel paging request at "
257 "virtual address %016lx\n", address);
258 if (die("Oops", regs, isr))
266 mmap_read_unlock(mm);
267 if (!user_mode(regs))
269 pagefault_out_of_memory();