mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled
authorVlastimil Babka <vbabka@suse.cz>
Wed, 10 Nov 2021 13:12:45 +0000 (14:12 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 6 Jan 2022 11:26:53 +0000 (12:26 +0100)
The fields 'next' and 'slabs' are only used when CONFIG_SLUB_CPU_PARTIAL
is enabled. We can put their definition to #ifdef to prevent accidental
use when disabled.

Currenlty show_slab_objects() and slabs_cpu_partial_show() contain code
accessing the slabs field that's effectively dead with
CONFIG_SLUB_CPU_PARTIAL=n through the wrappers slub_percpu_partial() and
slub_percpu_partial_read_once(), but to prevent a compile error, we need
to hide all this code behind #ifdef.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
mm/slab.h
mm/slub.c

index 495008f..f14e723 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -25,10 +25,12 @@ struct slab {
        union {
                struct list_head slab_list;
                struct rcu_head rcu_head;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
                struct {
                        struct slab *next;
                        int slabs;      /* Nr of slabs left */
                };
+#endif
        };
        struct kmem_cache *slab_cache;
        /* Double-word boundary */
index d08ba10..2614740 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5258,6 +5258,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        total += x;
                        nodes[node] += x;
 
+#ifdef CONFIG_SLUB_CPU_PARTIAL
                        slab = slub_percpu_partial_read_once(c);
                        if (slab) {
                                node = slab_nid(slab);
@@ -5270,6 +5271,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                                total += x;
                                nodes[node] += x;
                        }
+#endif
                }
        }
 
@@ -5469,9 +5471,10 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 {
        int objects = 0;
        int slabs = 0;
-       int cpu;
+       int cpu __maybe_unused;
        int len = 0;
 
+#ifdef CONFIG_SLUB_CPU_PARTIAL
        for_each_online_cpu(cpu) {
                struct slab *slab;
 
@@ -5480,12 +5483,13 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
                if (slab)
                        slabs += slab->slabs;
        }
+#endif
 
        /* Approximate half-full slabs, see slub_set_cpu_partial() */
        objects = (slabs * oo_objects(s->oo)) / 2;
        len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)
        for_each_online_cpu(cpu) {
                struct slab *slab;