jmp_buf *fault_catcher;
struct task_struct *prev_sched;
unsigned long temp_stack;
- jmp_buf *exec_buf;
struct arch_thread arch;
jmp_buf switch_buf;
int mm_count;
.fault_addr = NULL, \
.prev_sched = NULL, \
.temp_stack = 0, \
- .exec_buf = NULL, \
.arch = INIT_ARCH_THREAD, \
.request = { 0 } \
}
extern int os_getpgrp(void);
extern void init_new_thread_signals(void);
-extern int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr);
extern int os_map_memory(void *virt, int fd, unsigned long long off,
unsigned long len, int r, int w, int x);
#endif
}
EXPORT_SYMBOL(start_thread);
-
-void __noreturn ret_from_kernel_execve(struct pt_regs *unused)
-{
- UML_LONGJMP(current->thread.exec_buf, 1);
-}
arg = current->thread.request.u.thread.arg;
/*
- * The return value is 1 if the kernel thread execs a process,
- * 0 if it just exits
+ * callback returns only if the kernel thread execs a process
*/
- n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
- if (n == 1)
- userspace(¤t->thread.regs.regs);
- else
- do_exit(0);
+ n = fn(arg);
+ userspace(¤t->thread.regs.regs);
}
/* Called magically, see new_thread_handler above */
signal(SIGWINCH, SIG_IGN);
signal(SIGTERM, SIG_DFL);
}
-
-int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
-{
- jmp_buf buf;
- int n;
-
- *jmp_ptr = &buf;
- n = UML_SETJMP(&buf);
- if (n != 0)
- return n;
- (*fn)(arg);
- return 0;
-}
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS || UPROBES)
# define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_SYS_WAITPID
# define __ARCH_WANT_SYS_EXECVE
-# define __ARCH_WANT_KERNEL_EXECVE
/*
* "Conditional" syscalls
CFI_ENDPROC
END(ret_from_fork)
-ENTRY(ret_from_kernel_execve)
- movl %eax, %esp
- movl $0,PT_EAX(%esp)
+ENTRY(ret_from_kernel_thread)
+ CFI_STARTPROC
+ pushl_cfi %eax
+ call schedule_tail
GET_THREAD_INFO(%ebp)
+ popl_cfi %eax
+ pushl_cfi $0x0202 # Reset kernel eflags
+ popfl_cfi
+ movl PT_EBP(%esp),%eax
+ call *PT_EBX(%esp)
+ movl $0,PT_EAX(%esp)
jmp syscall_exit
-END(ret_from_kernel_execve)
+ CFI_ENDPROC
+ENDPROC(ret_from_kernel_thread)
/*
* Interrupt exit functions should be protected against kprobes
*/
.popsection
-ENTRY(ret_from_kernel_thread)
- CFI_STARTPROC
- pushl_cfi %eax
- call schedule_tail
- GET_THREAD_INFO(%ebp)
- popl_cfi %eax
- pushl_cfi $0x0202 # Reset kernel eflags
- popfl_cfi
- movl PT_EBP(%esp),%eax
- call *PT_EBX(%esp)
- call do_exit
- ud2 # padding for call trace
- CFI_ENDPROC
-ENDPROC(ret_from_kernel_thread)
-
#ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter
entrypoint expects, so fix it up before using the normal path. */
jmp ret_from_sys_call # go to the SYSRET fastpath
1:
- subq $REST_SKIP, %rsp # move the stack pointer back
+ subq $REST_SKIP, %rsp # leave space for volatiles
CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbp, %rdi
call *%rbx
- # exit
- mov %eax, %edi
- call do_exit
- ud2 # padding for call trace
-
+ movl $0, RAX(%rsp)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
CFI_ENDPROC
END(ret_from_fork)
jmp 2b
.previous
-ENTRY(ret_from_kernel_execve)
- movq %rdi, %rsp
- movl $0, RAX(%rsp)
- // RESTORE_REST
- movq 0*8(%rsp), %r15
- movq 1*8(%rsp), %r14
- movq 2*8(%rsp), %r13
- movq 3*8(%rsp), %r12
- movq 4*8(%rsp), %rbp
- movq 5*8(%rsp), %rbx
- addq $(6*8), %rsp
- jmp int_ret_from_sys_call
-END(ret_from_kernel_execve)
-
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
CFI_STARTPROC
def_bool y
select GENERIC_FIND_FIRST_BIT
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
config 64BIT
bool "64-bit kernel" if SUBARCH = "x86"