1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
6 * SLUB : A Slab allocator without object queues.
8 * (C) 2007 SGI, Christoph Lameter
10 #include <linux/kfence.h>
11 #include <linux/kobject.h>
12 #include <linux/reciprocal_div.h>
13 #include <linux/local_lock.h>
16 ALLOC_FASTPATH, /* Allocation from cpu slab */
17 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
18 FREE_FASTPATH, /* Free to cpu slab */
19 FREE_SLOWPATH, /* Freeing not to cpu slab */
20 FREE_FROZEN, /* Freeing to frozen slab */
21 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
22 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
23 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
24 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
25 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
26 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
27 FREE_SLAB, /* Slab freed to the page allocator */
28 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
29 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
30 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
31 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
34 DEACTIVATE_BYPASS, /* Implicit deactivation */
35 ORDER_FALLBACK, /* Number of times fallback was necessary */
36 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
37 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
38 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
39 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
40 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
41 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
44 #ifndef CONFIG_SLUB_TINY
46 * When changing the layout, make sure freelist and tid are still compatible
47 * with this_cpu_cmpxchg_double() alignment requirements.
49 struct kmem_cache_cpu {
50 void **freelist; /* Pointer to next available object */
51 unsigned long tid; /* Globally unique transaction id */
52 struct slab *slab; /* The slab from which we are allocating */
53 #ifdef CONFIG_SLUB_CPU_PARTIAL
54 struct slab *partial; /* Partially allocated frozen slabs */
56 local_lock_t lock; /* Protects the fields above */
57 #ifdef CONFIG_SLUB_STATS
58 unsigned stat[NR_SLUB_STAT_ITEMS];
61 #endif /* CONFIG_SLUB_TINY */
63 #ifdef CONFIG_SLUB_CPU_PARTIAL
64 #define slub_percpu_partial(c) ((c)->partial)
66 #define slub_set_percpu_partial(c, p) \
68 slub_percpu_partial(c) = (p)->next; \
71 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
73 #define slub_percpu_partial(c) NULL
75 #define slub_set_percpu_partial(c, p)
77 #define slub_percpu_partial_read_once(c) NULL
78 #endif // CONFIG_SLUB_CPU_PARTIAL
81 * Word size structure that can be atomically updated or read and that
82 * contains both the order and the number of objects that a slab of the
83 * given order would contain.
85 struct kmem_cache_order_objects {
90 * Slab cache management.
93 #ifndef CONFIG_SLUB_TINY
94 struct kmem_cache_cpu __percpu *cpu_slab;
96 /* Used for retrieving partial slabs, etc. */
98 unsigned long min_partial;
99 unsigned int size; /* The size of an object including metadata */
100 unsigned int object_size;/* The size of an object without metadata */
101 struct reciprocal_value reciprocal_size;
102 unsigned int offset; /* Free pointer offset */
103 #ifdef CONFIG_SLUB_CPU_PARTIAL
104 /* Number of per cpu partial objects to keep around */
105 unsigned int cpu_partial;
106 /* Number of per cpu partial slabs to keep around */
107 unsigned int cpu_partial_slabs;
109 struct kmem_cache_order_objects oo;
111 /* Allocation and freeing of slabs */
112 struct kmem_cache_order_objects min;
113 gfp_t allocflags; /* gfp flags to use on each alloc */
114 int refcount; /* Refcount for slab cache destroy */
115 void (*ctor)(void *);
116 unsigned int inuse; /* Offset to metadata */
117 unsigned int align; /* Alignment */
118 unsigned int red_left_pad; /* Left redzone padding size */
119 const char *name; /* Name (only for display!) */
120 struct list_head list; /* List of slab caches */
122 struct kobject kobj; /* For sysfs */
124 #ifdef CONFIG_SLAB_FREELIST_HARDENED
125 unsigned long random;
130 * Defragmentation by allocating from a remote node.
132 unsigned int remote_node_defrag_ratio;
135 #ifdef CONFIG_SLAB_FREELIST_RANDOM
136 unsigned int *random_seq;
139 #ifdef CONFIG_KASAN_GENERIC
140 struct kasan_cache kasan_info;
143 #ifdef CONFIG_HARDENED_USERCOPY
144 unsigned int useroffset; /* Usercopy region offset */
145 unsigned int usersize; /* Usercopy region size */
148 struct kmem_cache_node *node[MAX_NUMNODES];
151 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
152 #define SLAB_SUPPORTS_SYSFS
153 void sysfs_slab_unlink(struct kmem_cache *);
154 void sysfs_slab_release(struct kmem_cache *);
156 static inline void sysfs_slab_unlink(struct kmem_cache *s)
159 static inline void sysfs_slab_release(struct kmem_cache *s)
164 void *fixup_red_left(struct kmem_cache *s, void *p);
166 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
168 void *object = x - (x - slab_address(slab)) % cache->size;
169 void *last_object = slab_address(slab) +
170 (slab->objects - 1) * cache->size;
171 void *result = (unlikely(object > last_object)) ? last_object : object;
173 result = fixup_red_left(cache, result);
177 /* Determine object index from a given position */
178 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
179 void *addr, void *obj)
181 return reciprocal_divide(kasan_reset_tag(obj) - addr,
182 cache->reciprocal_size);
185 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
186 const struct slab *slab, void *obj)
188 if (is_kfence_address(obj))
190 return __obj_to_index(cache, slab_address(slab), obj);
193 static inline int objs_per_slab(const struct kmem_cache *cache,
194 const struct slab *slab)
196 return slab->objects;
198 #endif /* _LINUX_SLUB_DEF_H */