Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 May 2017 17:41:45 +0000 (10:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 May 2017 17:41:45 +0000 (10:41 -0700)
Pull stackprotector fixlet from Ingo Molnar:
 "A single fix/enhancement to increase stackprotector canary randomness
  on 64-bit kernels with very little cost"

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  stackprotector: Increase the per-task stack canary's random range from 32 bits to 64 bits on 64-bit platforms

1  2 
kernel/fork.c

diff --combined kernel/fork.c
@@@ -87,7 -87,6 +87,7 @@@
  #include <linux/compiler.h>
  #include <linux/sysctl.h>
  #include <linux/kcov.h>
 +#include <linux/livepatch.h>
  
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
@@@ -179,24 -178,6 +179,24 @@@ void __weak arch_release_thread_stack(u
   */
  #define NR_CACHED_STACKS 2
  static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
 +
 +static int free_vm_stack_cache(unsigned int cpu)
 +{
 +      struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
 +      int i;
 +
 +      for (i = 0; i < NR_CACHED_STACKS; i++) {
 +              struct vm_struct *vm_stack = cached_vm_stacks[i];
 +
 +              if (!vm_stack)
 +                      continue;
 +
 +              vfree(vm_stack->addr);
 +              cached_vm_stacks[i] = NULL;
 +      }
 +
 +      return 0;
 +}
  #endif
  
  static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
  
        stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
                                     VMALLOC_START, VMALLOC_END,
 -                                   THREADINFO_GFP | __GFP_HIGHMEM,
 +                                   THREADINFO_GFP,
                                     PAGE_KERNEL,
                                     0, node, __builtin_return_address(0));
  
@@@ -485,11 -466,6 +485,11 @@@ void __init fork_init(void
        for (i = 0; i < UCOUNT_COUNTS; i++) {
                init_user_ns.ucount_max[i] = max_threads/2;
        }
 +
 +#ifdef CONFIG_VMAP_STACK
 +      cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
 +                        NULL, free_vm_stack_cache);
 +#endif
  }
  
  int __weak arch_dup_task_struct(struct task_struct *dst,
@@@ -560,7 -536,7 +560,7 @@@ static struct task_struct *dup_task_str
        set_task_stack_end_magic(tsk);
  
  #ifdef CONFIG_CC_STACKPROTECTOR
-       tsk->stack_canary = get_random_int();
+       tsk->stack_canary = get_random_long();
  #endif
  
        /*
@@@ -1337,7 -1313,7 +1337,7 @@@ void __cleanup_sighand(struct sighand_s
        if (atomic_dec_and_test(&sighand->count)) {
                signalfd_cleanup(sighand);
                /*
 -               * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
 +               * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
                 * without an RCU grace period, see __lock_task_sighand().
                 */
                kmem_cache_free(sighand_cachep, sighand);
@@@ -1704,12 -1680,9 +1704,12 @@@ static __latent_entropy struct task_str
                goto bad_fork_cleanup_perf;
        /* copy all the process information */
        shm_init_task(p);
 -      retval = copy_semundo(clone_flags, p);
 +      retval = security_task_alloc(p, clone_flags);
        if (retval)
                goto bad_fork_cleanup_audit;
 +      retval = copy_semundo(clone_flags, p);
 +      if (retval)
 +              goto bad_fork_cleanup_security;
        retval = copy_files(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_semundo;
                p->parent_exec_id = current->self_exec_id;
        }
  
 +      klp_copy_process(p);
 +
        spin_lock(&current->sighand->siglock);
  
        /*
@@@ -1933,8 -1904,6 +1933,8 @@@ bad_fork_cleanup_files
        exit_files(p); /* blocking */
  bad_fork_cleanup_semundo:
        exit_sem(p);
 +bad_fork_cleanup_security:
 +      security_task_free(p);
  bad_fork_cleanup_audit:
        audit_free(p);
  bad_fork_cleanup_perf:
@@@ -2176,7 -2145,7 +2176,7 @@@ void __init proc_caches_init(void
  {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
 -                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
 +                      SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
                        SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,