mips/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()
authorWill Deacon <will.deacon@arm.com>
Fri, 22 Feb 2019 13:37:21 +0000 (13:37 +0000)
committerWill Deacon <will.deacon@arm.com>
Mon, 8 Apr 2019 11:00:28 +0000 (12:00 +0100)
The mmiowb() macro is horribly difficult to use and drivers will continue
to work most of the time if they omit a call when it is required.

Rather than rely on driver authors getting this right, push mmiowb() into
arch_spin_unlock() for mips. If this is deemed to be a performance issue,
a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide
the barrier in cases where no I/O writes were performed inside the
critical section.

Acked-by: Paul Burton <paul.burton@mips.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/mips/include/asm/Kbuild
arch/mips/include/asm/io.h
arch/mips/include/asm/mmiowb.h [new file with mode: 0644]
arch/mips/include/asm/spinlock.h

index bf39c22..87b86cd 100644 (file)
@@ -12,7 +12,6 @@ generic-y += irq_work.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
 generic-y += msi.h
 generic-y += parport.h
 generic-y += percpu.h
index 845fbbc..29997e4 100644 (file)
@@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
 #define iobarrier_w() wmb()
 #define iobarrier_sync() iob()
 
-/* Some callers use this older API instead.  */
-#define mmiowb() iobarrier_w()
-
 /*
  *     virt_to_phys    -       map virtual addresses to physical
  *     @address: address to remap
diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h
new file mode 100644 (file)
index 0000000..a40824e
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb()       iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
index ee81297..8a88eb2 100644 (file)
 
 #include <asm/processor.h>
 #include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define        queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       /* This could be optimised with ARCH_HAS_MMIOWB */
+       mmiowb();
+       smp_store_release(&lock->locked, 0);
+}
+
 #include <asm/qspinlock.h>
 
 #endif /* _ASM_SPINLOCK_H */