sched/core, sched/x86: Kill thread_info::saved_preempt_count
authorPeter Zijlstra <peterz@infradead.org>
Mon, 28 Sep 2015 16:11:18 +0000 (18:11 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 6 Oct 2015 15:08:18 +0000 (17:08 +0200)
With the introduction of the context switch preempt_count invariant,
and the demise of PREEMPT_ACTIVE, its pointless to save/restore the
per-cpu preemption count, it must always be 2.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/preempt.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c

index 01e700d..01bcde8 100644 (file)
@@ -30,12 +30,9 @@ static __always_inline void preempt_count_set(int pc)
 /*
  * must be macros to avoid header recursion hell
  */
-#define init_task_preempt_count(p) do { \
-       task_thread_info(p)->saved_preempt_count = FORK_PREEMPT_COUNT; \
-} while (0)
+#define init_task_preempt_count(p) do { } while (0)
 
 #define init_idle_preempt_count(p, cpu) do { \
-       task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
        per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
 } while (0)
 
index 8afdc3e..809877e 100644 (file)
@@ -57,7 +57,6 @@ struct thread_info {
        __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
        __u32                   cpu;            /* current CPU */
-       int                     saved_preempt_count;
        mm_segment_t            addr_limit;
        void __user             *sysenter_return;
        unsigned int            sig_on_uaccess_error:1;
@@ -69,7 +68,6 @@ struct thread_info {
        .task           = &tsk,                 \
        .flags          = 0,                    \
        .cpu            = 0,                    \
-       .saved_preempt_count = INIT_PREEMPT_COUNT,      \
        .addr_limit     = KERNEL_DS,            \
 }
 
index 737527b..9f95091 100644 (file)
@@ -280,14 +280,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                set_iopl_mask(next->iopl);
 
        /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
-
-       /*
         * Now maybe handle debug registers and/or IO bitmaps
         */
        if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
index b35921a..d7f1d5c 100644 (file)
@@ -401,14 +401,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        this_cpu_write(current_task, next_p);
 
-       /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
-
        /* Reload esp0 and ss1.  This changes current_thread_info(). */
        load_sp0(tss, next);