1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/mm/fault.c
5 * Copyright (C) 1995 Linus Torvalds
8 #include <linux/sched/signal.h>
9 #include <linux/kernel.h>
13 #define __EXTERN_INLINE inline
14 #include <asm/mmu_context.h>
15 #include <asm/tlbflush.h>
16 #undef __EXTERN_INLINE
18 #include <linux/signal.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/extable.h>
27 #include <linux/uaccess.h>
28 #include <linux/perf_event.h>
30 extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
34 * Force a new ASN for a task.
38 unsigned long last_asn = ASN_FIRST_VERSION;
42 __load_new_mm_context(struct mm_struct *next_mm)
45 struct pcb_struct *pcb;
47 mmc = __get_new_mm_context(next_mm, smp_processor_id());
48 next_mm->context[smp_processor_id()] = mmc;
50 pcb = ¤t_thread_info()->pcb;
51 pcb->asn = mmc & HARDWARE_ASN_MASK;
52 pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
59 * This routine handles page faults. It determines the address,
60 * and the problem, and then passes it off to handle_mm_fault().
63 * 0 = translation not valid
64 * 1 = access violation
66 * 3 = fault-on-execute
70 * -1 = instruction fetch
74 * Registers $9 through $15 are saved in a block just prior to `regs' and
75 * are saved and restored around the call to allow exception code to
79 /* Macro for exception fixup code to access integer registers. */
81 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
82 (r) <= 18 ? (r)+10 : (r)-10])
85 do_page_fault(unsigned long address, unsigned long mmcsr,
86 long cause, struct pt_regs *regs)
88 struct vm_area_struct * vma;
89 struct mm_struct *mm = current->mm;
90 const struct exception_table_entry *fixup;
91 int si_code = SEGV_MAPERR;
93 unsigned int flags = FAULT_FLAG_DEFAULT;
95 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
96 (or is suppressed by the PALcode). Support that for older CPUs
97 by ignoring such an instruction. */
100 __get_user(insn, (unsigned int __user *)regs->pc);
101 if ((insn >> 21 & 0x1f) == 0x1f &&
102 /* ldq ldl ldt lds ldg ldf ldwu ldbu */
103 (1ul << (insn >> 26) & 0x30f00001400ul)) {
109 /* If we're in an interrupt context, or have no user context,
110 we must not take the fault. */
111 if (!mm || faulthandler_disabled())
114 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
115 if (address >= TASK_SIZE)
119 flags |= FAULT_FLAG_USER;
120 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
122 vma = lock_mm_and_find_vma(mm, address, regs);
124 goto bad_area_nosemaphore;
126 /* Ok, we have a good vm_area for this memory access, so
128 si_code = SEGV_ACCERR;
130 if (!(vma->vm_flags & VM_EXEC))
133 /* Allow reads even for write-only mappings */
134 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
137 if (!(vma->vm_flags & VM_WRITE))
139 flags |= FAULT_FLAG_WRITE;
142 /* If for any reason at all we couldn't handle the fault,
143 make sure we exit gracefully rather than endlessly redo
145 fault = handle_mm_fault(vma, address, flags, regs);
147 if (fault_signal_pending(fault, regs)) {
148 if (!user_mode(regs))
153 /* The fault is fully completed (including releasing mmap lock) */
154 if (fault & VM_FAULT_COMPLETED)
157 if (unlikely(fault & VM_FAULT_ERROR)) {
158 if (fault & VM_FAULT_OOM)
160 else if (fault & VM_FAULT_SIGSEGV)
162 else if (fault & VM_FAULT_SIGBUS)
167 if (fault & VM_FAULT_RETRY) {
168 flags |= FAULT_FLAG_TRIED;
170 /* No need to mmap_read_unlock(mm) as we would
171 * have already released it in __lock_page_or_retry
178 mmap_read_unlock(mm);
182 /* Something tried to access memory that isn't in our memory map.
183 Fix it, but check if it's kernel or user first. */
185 mmap_read_unlock(mm);
187 bad_area_nosemaphore:
192 /* Are we prepared to handle this fault as an exception? */
193 if ((fixup = search_exception_tables(regs->pc)) != 0) {
195 newpc = fixup_exception(dpf_reg, fixup, regs->pc);
200 /* Oops. The kernel tried to access some bad page. We'll have to
201 terminate things with extreme prejudice. */
202 printk(KERN_ALERT "Unable to handle kernel paging request at "
203 "virtual address %016lx\n", address);
204 die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
205 make_task_dead(SIGKILL);
207 /* We ran out of memory, or some other thing happened to us that
208 made us unable to handle the page fault gracefully. */
210 mmap_read_unlock(mm);
211 if (!user_mode(regs))
213 pagefault_out_of_memory();
217 mmap_read_unlock(mm);
218 /* Send a sigbus, regardless of whether we were in kernel
220 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address);
221 if (!user_mode(regs))
226 force_sig_fault(SIGSEGV, si_code, (void __user *) address);
229 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
234 /* Synchronize this task's top level page-table
235 with the "reference" page table from init. */
236 long index = pgd_index(address);
239 pgd = current->active_mm->pgd + index;
240 pgd_k = swapper_pg_dir + index;
241 if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
242 pgd_val(*pgd) = pgd_val(*pgd_k);