1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
13 #include <linux/init.h>
14 #include <linux/compiler.h>
23 /* 1) Cache tunables. Protected by cache_chain_mutex */
24 unsigned int batchcount;
29 u32 reciprocal_buffer_size;
30 /* 2) touched by every alloc & free from the backend */
32 unsigned int flags; /* constant flags */
33 unsigned int num; /* # of objs per slab */
35 /* 3) cache_grow/shrink */
36 /* order of pgs per slab (2^n) */
37 unsigned int gfporder;
39 /* force GFP flags, e.g. GFP_DMA */
42 size_t colour; /* cache colouring range */
43 unsigned int colour_off; /* colour offset */
44 struct kmem_cache *slabp_cache;
45 unsigned int slab_size;
47 /* constructor func */
48 void (*ctor)(void *obj);
50 /* 4) cache creation/removal */
52 struct list_head list;
58 #ifdef CONFIG_DEBUG_SLAB
59 unsigned long num_active;
60 unsigned long num_allocations;
61 unsigned long high_mark;
65 unsigned long max_freeable;
66 unsigned long node_allocs;
67 unsigned long node_frees;
68 unsigned long node_overflow;
75 * If debugging is enabled, then the allocator can add additional
76 * fields and/or padding to every object. size contains the total
77 * object size including these internal fields, the following two
78 * variables contain the offset to the user object and its size.
81 #endif /* CONFIG_DEBUG_SLAB */
82 #ifdef CONFIG_MEMCG_KMEM
83 struct memcg_cache_params *memcg_params;
86 /* 6) per-cpu/per-node data, touched during every alloc/free */
88 * We put array[] at the end of kmem_cache, because we want to size
89 * this array to nr_cpu_ids slots instead of NR_CPUS
90 * (see kmem_cache_init())
91 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of cpus.
94 * We also need to guarantee that the list is able to accomodate a
95 * pointer for each node since "nodelists" uses the remainder of
98 struct kmem_cache_node **node;
99 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
101 * Do not add fields after array[]
105 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106 void *__kmalloc(size_t size, gfp_t flags);
108 #ifdef CONFIG_TRACING
109 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
111 static __always_inline void *
112 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
114 return kmem_cache_alloc(cachep, flags);
118 static __always_inline void *kmalloc(size_t size, gfp_t flags)
120 struct kmem_cache *cachep;
123 if (__builtin_constant_p(size)) {
127 return ZERO_SIZE_PTR;
129 i = kmalloc_index(size);
131 #ifdef CONFIG_ZONE_DMA
133 cachep = kmalloc_dma_caches[i];
136 cachep = kmalloc_caches[i];
138 ret = kmem_cache_alloc_trace(cachep, flags, size);
142 return __kmalloc(size, flags);
146 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
147 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
149 #ifdef CONFIG_TRACING
150 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
155 static __always_inline void *
156 kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
161 return kmem_cache_alloc_node(cachep, flags, nodeid);
165 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
167 struct kmem_cache *cachep;
169 if (__builtin_constant_p(size)) {
173 return ZERO_SIZE_PTR;
175 i = kmalloc_index(size);
177 #ifdef CONFIG_ZONE_DMA
179 cachep = kmalloc_dma_caches[i];
182 cachep = kmalloc_caches[i];
184 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
186 return __kmalloc_node(size, flags, node);
189 #endif /* CONFIG_NUMA */
191 #endif /* _LINUX_SLAB_DEF_H */