sched/membarrier: Skip IPIs when mm->mm_users == 1
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 19 Sep 2019 17:37:04 +0000 (13:37 -0400)
committerIngo Molnar <mingo@kernel.org>
Wed, 25 Sep 2019 15:42:31 +0000 (17:42 +0200)
If there is only a single mm_user for the mm, the private expedited
membarrier command can skip the IPIs, because only a single thread
is using the mm.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190919173705.2181-7-mathieu.desnoyers@efficios.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/membarrier.c

index 070cf43..fced54a 100644 (file)
@@ -145,20 +145,21 @@ static int membarrier_private_expedited(int flags)
        int cpu;
        bool fallback = false;
        cpumask_var_t tmpmask;
+       struct mm_struct *mm = current->mm;
 
        if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
                if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
                        return -EINVAL;
-               if (!(atomic_read(&current->mm->membarrier_state) &
+               if (!(atomic_read(&mm->membarrier_state) &
                      MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
                        return -EPERM;
        } else {
-               if (!(atomic_read(&current->mm->membarrier_state) &
+               if (!(atomic_read(&mm->membarrier_state) &
                      MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
                        return -EPERM;
        }
 
-       if (num_online_cpus() == 1)
+       if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
                return 0;
 
        /*
@@ -194,7 +195,7 @@ static int membarrier_private_expedited(int flags)
                        continue;
                rcu_read_lock();
                p = rcu_dereference(cpu_rq(cpu)->curr);
-               if (p && p->mm == current->mm) {
+               if (p && p->mm == mm) {
                        if (!fallback)
                                __cpumask_set_cpu(cpu, tmpmask);
                        else