mm/slub: simplify __cmpxchg_double_slab() and slab_[un]lock()
authorVlastimil Babka <vbabka@suse.cz>
Tue, 23 Aug 2022 17:04:00 +0000 (19:04 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Fri, 16 Sep 2022 22:18:35 +0000 (00:18 +0200)
The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
(through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
preemption and that's sufficient on PREEMPT_RT where no allocation/free
operation is performed in hardirq context and so can't interrupt the
current operation.

That means we no longer need the slab_[un]lock() wrappers, so delete
them and rename the current __slab_[un]lock() to slab_[un]lock().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mm/slub.c

index e0759b5..d58f195 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
 /*
  * Per slab locking using the pagelock
  */
-static __always_inline void __slab_lock(struct slab *slab)
+static __always_inline void slab_lock(struct slab *slab)
 {
        struct page *page = slab_page(slab);
 
@@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
        bit_spin_lock(PG_locked, &page->flags);
 }
 
-static __always_inline void __slab_unlock(struct slab *slab)
+static __always_inline void slab_unlock(struct slab *slab)
 {
        struct page *page = slab_page(slab);
 
@@ -462,24 +462,12 @@ static __always_inline void __slab_unlock(struct slab *slab)
        __bit_spin_unlock(PG_locked, &page->flags);
 }
 
-static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
-{
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               local_irq_save(*flags);
-       __slab_lock(slab);
-}
-
-static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
-{
-       __slab_unlock(slab);
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               local_irq_restore(*flags);
-}
-
 /*
  * Interrupts must be disabled (for the fallback code to work right), typically
- * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
- * so we disable interrupts as part of slab_[un]lock().
+ * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
+ * part of bit_spin_lock(), is sufficient because the policy is not to allow any
+ * allocation/ free operation in hardirq context. Therefore nothing can
+ * interrupt the operation.
  */
 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
                void *freelist_old, unsigned long counters_old,
@@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
        } else
 #endif
        {
-               /* init to 0 to prevent spurious warnings */
-               unsigned long flags = 0;
-
-               slab_lock(slab, &flags);
+               slab_lock(slab);
                if (slab->freelist == freelist_old &&
                                        slab->counters == counters_old) {
                        slab->freelist = freelist_new;
                        slab->counters = counters_new;
-                       slab_unlock(slab, &flags);
+                       slab_unlock(slab);
                        return true;
                }
-               slab_unlock(slab, &flags);
+               slab_unlock(slab);
        }
 
        cpu_relax();
@@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
                unsigned long flags;
 
                local_irq_save(flags);
-               __slab_lock(slab);
+               slab_lock(slab);
                if (slab->freelist == freelist_old &&
                                        slab->counters == counters_old) {
                        slab->freelist = freelist_new;
                        slab->counters = counters_new;
-                       __slab_unlock(slab);
+                       slab_unlock(slab);
                        local_irq_restore(flags);
                        return true;
                }
-               __slab_unlock(slab);
+               slab_unlock(slab);
                local_irq_restore(flags);
        }