From: Vlastimil Babka Date: Tue, 26 Oct 2021 11:39:14 +0000 (+0200) Subject: mm/slub: Convert __slab_lock() and __slab_unlock() to struct slab X-Git-Tag: v6.1-rc5~2189^2~22 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0393895b091227e8a77dfd5e6a6ab61bd11b8df0;p=platform%2Fkernel%2Flinux-starfive.git mm/slub: Convert __slab_lock() and __slab_unlock() to struct slab These functions operate on the PG_locked page flag, but make them accept struct slab to encapsulate this implementation detail. Signed-off-by: Vlastimil Babka Reviewed-by: Roman Gushchin --- diff --git a/mm/slub.c b/mm/slub.c index acf2608..14550e7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -440,14 +440,18 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) /* * Per slab locking using the pagelock */ -static __always_inline void __slab_lock(struct page *page) +static __always_inline void __slab_lock(struct slab *slab) { + struct page *page = slab_page(slab); + VM_BUG_ON_PAGE(PageTail(page), page); bit_spin_lock(PG_locked, &page->flags); } -static __always_inline void __slab_unlock(struct page *page) +static __always_inline void __slab_unlock(struct slab *slab) { + struct page *page = slab_page(slab); + VM_BUG_ON_PAGE(PageTail(page), page); __bit_spin_unlock(PG_locked, &page->flags); } @@ -456,12 +460,12 @@ static __always_inline void slab_lock(struct page *page, unsigned long *flags) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_save(*flags); - __slab_lock(page); + __slab_lock(page_slab(page)); } static __always_inline void slab_unlock(struct page *page, unsigned long *flags) { - __slab_unlock(page); + __slab_unlock(page_slab(page)); if (IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_restore(*flags); } @@ -530,16 +534,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, unsigned long flags; local_irq_save(flags); - __slab_lock(page); + __slab_lock(page_slab(page)); if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; page->counters = counters_new; - __slab_unlock(page); + __slab_unlock(page_slab(page)); local_irq_restore(flags); return true; } - __slab_unlock(page); + __slab_unlock(page_slab(page)); local_irq_restore(flags); }