1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
18 #include <trace/events/kmem.h>
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
29 #ifdef ARCH_DMA_MINALIGN
30 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
32 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
35 #ifndef ARCH_SLAB_MINALIGN
37 * Enforce a minimum alignment for all caches.
38 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
39 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
40 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
41 * some debug features.
43 #define ARCH_SLAB_MINALIGN 0
53 /* 1) per-cpu data, touched during every alloc/free */
54 struct array_cache *array[NR_CPUS];
55 /* 2) Cache tunables. Protected by cache_chain_mutex */
56 unsigned int batchcount;
60 unsigned int buffer_size;
61 u32 reciprocal_buffer_size;
62 /* 3) touched by every alloc & free from the backend */
64 unsigned int flags; /* constant flags */
65 unsigned int num; /* # of objs per slab */
67 /* 4) cache_grow/shrink */
68 /* order of pgs per slab (2^n) */
69 unsigned int gfporder;
71 /* force GFP flags, e.g. GFP_DMA */
74 size_t colour; /* cache colouring range */
75 unsigned int colour_off; /* colour offset */
76 struct kmem_cache *slabp_cache;
77 unsigned int slab_size;
78 unsigned int dflags; /* dynamic flags */
80 /* constructor func */
81 void (*ctor)(void *obj);
83 /* 5) cache creation/removal */
85 struct list_head next;
88 #ifdef CONFIG_DEBUG_SLAB
89 unsigned long num_active;
90 unsigned long num_allocations;
91 unsigned long high_mark;
95 unsigned long max_freeable;
96 unsigned long node_allocs;
97 unsigned long node_frees;
98 unsigned long node_overflow;
105 * If debugging is enabled, then the allocator can add additional
106 * fields and/or padding to every object. buffer_size contains the total
107 * object size including these internal fields, the following two
108 * variables contain the offset to the user object and its size.
112 #endif /* CONFIG_DEBUG_SLAB */
115 * We put nodelists[] at the end of kmem_cache, because we want to size
116 * this array to nr_node_ids slots instead of MAX_NUMNODES
117 * (see kmem_cache_init())
118 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
119 * is statically defined, so we reserve the max number of nodes.
121 struct kmem_list3 *nodelists[MAX_NUMNODES];
123 * Do not add fields after nodelists[]
127 /* Size description struct for general caches. */
130 struct kmem_cache *cs_cachep;
131 #ifdef CONFIG_ZONE_DMA
132 struct kmem_cache *cs_dmacachep;
135 extern struct cache_sizes malloc_sizes[];
137 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138 void *__kmalloc(size_t size, gfp_t flags);
140 #ifdef CONFIG_TRACING
141 extern void *kmem_cache_alloc_trace(size_t size,
142 struct kmem_cache *cachep, gfp_t flags);
143 extern size_t slab_buffer_size(struct kmem_cache *cachep);
145 static __always_inline void *
146 kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
148 return kmem_cache_alloc(cachep, flags);
150 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
156 static __always_inline void *kmalloc(size_t size, gfp_t flags)
158 struct kmem_cache *cachep;
161 if (__builtin_constant_p(size)) {
165 return ZERO_SIZE_PTR;
172 #include <linux/kmalloc_sizes.h>
176 #ifdef CONFIG_ZONE_DMA
178 cachep = malloc_sizes[i].cs_dmacachep;
181 cachep = malloc_sizes[i].cs_cachep;
183 ret = kmem_cache_alloc_trace(size, cachep, flags);
187 return __kmalloc(size, flags);
191 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
192 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
194 #ifdef CONFIG_TRACING
195 extern void *kmem_cache_alloc_node_trace(size_t size,
196 struct kmem_cache *cachep,
200 static __always_inline void *
201 kmem_cache_alloc_node_trace(size_t size,
202 struct kmem_cache *cachep,
206 return kmem_cache_alloc_node(cachep, flags, nodeid);
210 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
212 struct kmem_cache *cachep;
214 if (__builtin_constant_p(size)) {
218 return ZERO_SIZE_PTR;
225 #include <linux/kmalloc_sizes.h>
229 #ifdef CONFIG_ZONE_DMA
231 cachep = malloc_sizes[i].cs_dmacachep;
234 cachep = malloc_sizes[i].cs_cachep;
236 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
238 return __kmalloc_node(size, flags, node);
241 #endif /* CONFIG_NUMA */
243 #endif /* _LINUX_SLAB_DEF_H */