1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
6 * SLUB : A Slab allocator without object queues.
8 * (C) 2007 SGI, Christoph Lameter
10 #include <linux/kobject.h>
13 ALLOC_FASTPATH, /* Allocation from cpu slab */
14 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
15 FREE_FASTPATH, /* Free to cpu slab */
16 FREE_SLOWPATH, /* Freeing not to cpu slab */
17 FREE_FROZEN, /* Freeing to frozen slab */
18 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
19 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
20 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
21 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
22 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
23 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
24 FREE_SLAB, /* Slab freed to the page allocator */
25 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
26 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
27 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
28 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
29 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
30 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
31 DEACTIVATE_BYPASS, /* Implicit deactivation */
32 ORDER_FALLBACK, /* Number of times fallback was necessary */
33 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
34 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
35 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
36 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
37 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
38 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
41 struct kmem_cache_cpu {
42 void **freelist; /* Pointer to next available object */
43 unsigned long tid; /* Globally unique transaction id */
44 struct page *page; /* The slab from which we are allocating */
45 #ifdef CONFIG_SLUB_CPU_PARTIAL
46 struct page *partial; /* Partially allocated frozen slabs */
48 #ifdef CONFIG_SLUB_STATS
49 unsigned stat[NR_SLUB_STAT_ITEMS];
53 #ifdef CONFIG_SLUB_CPU_PARTIAL
54 #define slub_percpu_partial(c) ((c)->partial)
56 #define slub_set_percpu_partial(c, p) \
58 slub_percpu_partial(c) = (p)->next; \
61 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
63 #define slub_percpu_partial(c) NULL
65 #define slub_set_percpu_partial(c, p)
67 #define slub_percpu_partial_read_once(c) NULL
68 #endif // CONFIG_SLUB_CPU_PARTIAL
71 * Word size structure that can be atomically updated or read and that
72 * contains both the order and the number of objects that a slab of the
73 * given order would contain.
75 struct kmem_cache_order_objects {
80 * Slab cache management.
83 struct kmem_cache_cpu __percpu *cpu_slab;
84 /* Used for retriving partial slabs etc */
86 unsigned long min_partial;
87 unsigned int size; /* The size of an object including meta data */
88 unsigned int object_size;/* The size of an object without meta data */
89 unsigned int offset; /* Free pointer offset. */
90 #ifdef CONFIG_SLUB_CPU_PARTIAL
91 /* Number of per cpu partial objects to keep around */
92 unsigned int cpu_partial;
94 struct kmem_cache_order_objects oo;
96 /* Allocation and freeing of slabs */
97 struct kmem_cache_order_objects max;
98 struct kmem_cache_order_objects min;
99 gfp_t allocflags; /* gfp flags to use on each alloc */
100 int refcount; /* Refcount for slab cache destroy */
101 void (*ctor)(void *);
102 unsigned int inuse; /* Offset to metadata */
103 unsigned int align; /* Alignment */
104 unsigned int red_left_pad; /* Left redzone padding size */
105 const char *name; /* Name (only for display!) */
106 struct list_head list; /* List of slab caches */
108 struct kobject kobj; /* For sysfs */
109 struct work_struct kobj_remove_work;
112 struct memcg_cache_params memcg_params;
113 /* for propagation, maximum size of a stored attr */
114 unsigned int max_attr_size;
116 struct kset *memcg_kset;
120 #ifdef CONFIG_SLAB_FREELIST_HARDENED
121 unsigned long random;
126 * Defragmentation by allocating from a remote node.
128 unsigned int remote_node_defrag_ratio;
131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
132 unsigned int *random_seq;
136 struct kasan_cache kasan_info;
139 unsigned int useroffset; /* Usercopy region offset */
140 unsigned int usersize; /* Usercopy region size */
142 struct kmem_cache_node *node[MAX_NUMNODES];
145 #ifdef CONFIG_SLUB_CPU_PARTIAL
146 #define slub_cpu_partial(s) ((s)->cpu_partial)
147 #define slub_set_cpu_partial(s, n) \
149 slub_cpu_partial(s) = (n); \
152 #define slub_cpu_partial(s) (0)
153 #define slub_set_cpu_partial(s, n)
154 #endif // CONFIG_SLUB_CPU_PARTIAL
157 #define SLAB_SUPPORTS_SYSFS
158 void sysfs_slab_unlink(struct kmem_cache *);
159 void sysfs_slab_release(struct kmem_cache *);
161 static inline void sysfs_slab_unlink(struct kmem_cache *s)
164 static inline void sysfs_slab_release(struct kmem_cache *s)
169 void object_err(struct kmem_cache *s, struct page *page,
170 u8 *object, char *reason);
172 void *fixup_red_left(struct kmem_cache *s, void *p);
174 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
176 void *object = x - (x - page_address(page)) % cache->size;
177 void *last_object = page_address(page) +
178 (page->objects - 1) * cache->size;
179 void *result = (unlikely(object > last_object)) ? last_object : object;
181 result = fixup_red_left(cache, result);
185 #endif /* _LINUX_SLUB_DEF_H */