select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_MOD_ARCH_SPECIFIC
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/* Ok, we have a good vm_area for this memory access, so
we can handle it. */
- good_area:
si_code = SEGV_ACCERR;
if (cause < 0) {
if (!(vma->vm_flags & VM_EXEC))
bad_area:
mmap_read_unlock(mm);
+ bad_area_nosemaphore:
if (user_mode(regs))
goto do_sigsegv;
select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (unlikely(address < vma->vm_start)) {
- if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
- goto bad_area;
- }
+ goto bad_area_nosemaphore;
/*
* vm_area is good, now check permissions for this memory access
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
/*
* Major/minor page fault accounting
* (in case of retry we only land here once)
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
+ select LOCK_MM_AND_FIND_VMA
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES
select OF
BUG();
}
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
- mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
if (is_write(regs))
flags |= FAULT_FLAG_WRITE;
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, addr);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma)) {
- bad_area(regs, mm, code, addr);
- return;
- }
- if (likely(vma->vm_start <= addr))
- goto good_area;
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, mm, code, addr);
- return;
- }
- if (unlikely(expand_stack(vma, addr))) {
- bad_area(regs, mm, code, addr);
+ bad_area_nosemaphore(regs, mm, code, addr);
return;
}
* Ok, we have a good vm_area for this memory access, so
* we can handle it.
*/
-good_area:
code = SEGV_ACCERR;
if (unlikely(access_error(regs, vma))) {
- bad_area(regs, mm, code, addr);
+ mmap_read_unlock(mm);
+ bad_area_nosemaphore(regs, mm, code, addr);
return;
}
select GENERIC_SMP_IDLE_THREAD
select STACKTRACE_SUPPORT
select GENERIC_CLOCKEVENTS_BROADCAST
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
select ARCH_WANT_LD_ORPHAN_WARN
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (unlikely(!vma))
+ goto bad_area_nosemaphore;
- if (vma->vm_start <= address)
- goto good_area;
-
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
-
- if (expand_stack(vma, address))
- goto bad_area;
-
-good_area:
/* Address space is OK. Now check access rights. */
si_code = SEGV_ACCERR;
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
+ select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_MERGE_VMAS if MMU
select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (!expand_stack(vma, address))
- goto good_area;
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (unlikely(!vma))
+ goto bad_area_nosemaphore;
+ goto good_area;
+
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
do_sigsegv(regs, write, address, si_code);
return;
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
select IRQ_DOMAIN
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (!mmap_read_trylock(mm)) {
- if (!user_mode(regs) && !search_exception_tables(regs->ea))
- goto bad_area_nosemaphore;
retry:
- mmap_read_lock(mm);
- }
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
code = SEGV_ACCERR;
switch (cause) {
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select NEED_SG_DMA_LENGTH
select NO_DMA if !MMU && !DMA_COHERENT
}
retry:
- mmap_read_lock(mm);
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma)) {
- bad_area(regs, error_code, address);
- return;
- }
- if (likely(vma->vm_start <= address))
- goto good_area;
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, error_code, address);
- return;
- }
- if (unlikely(expand_stack(vma, address))) {
- bad_area(regs, error_code, address);
+ bad_area_nosemaphore(regs, error_code, address);
return;
}
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64
select HAVE_UID16
+ select LOCK_MM_AND_FIND_VMA
select OLD_SIGACTION
select ZONE_DMA
if (pagefault_disabled() || !mm)
goto no_context;
+ if (!from_user && address >= PAGE_OFFSET)
+ goto no_context;
+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
-
- if (!from_user && address >= PAGE_OFFSET)
- goto bad_area;
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
code = SEGV_MAPERR;
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
-good_area:
+ goto bad_area_nosemaphore;
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
return;
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_DOMAIN
+ select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC
select TRACE_IRQFLAGS_SUPPORT
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
-
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
code = SEGV_ACCERR;
if (is_write) {
*/
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, code, (void *) address);
return;