select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING if X86_64
+ select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
release_vm86_irqs(dead_task);
}
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
struct task_struct *tsk;
*/
if (clone_flags & CLONE_SETTLS)
err = do_set_thread_area(p, -1,
- (struct user_desc __user *)childregs->si, 0);
+ (struct user_desc __user *)tls, 0);
if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
return get_desc_base(&t->thread.tls_array[tls]);
}
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
int err;
struct pt_regs *childregs;
#ifdef CONFIG_IA32_EMULATION
if (is_ia32_task())
err = do_set_thread_area(p, -1,
- (struct user_desc __user *)childregs->si, 0);
+ (struct user_desc __user *)tls, 0);
else
#endif
- err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
+ err = do_arch_prctl(p, ARCH_SET_FS, tls);
if (err)
goto out;
}