#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include "slab.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmemcheck.h>
#include <trace/events/kmem.h>
+#include "internal.h"
+
/*
* Lock order:
- * 1. slub_lock (Global Semaphore)
+ * 1. slab_mutex (Global Mutex)
* 2. node->list_lock
* 3. slab_lock(page) (Only on some arches and for debugging)
*
- * slub_lock
+ * slab_mutex
*
- * The role of the slub_lock is to protect the list of all the slabs
+ * The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures.
*
* The slab_lock is only used for debugging and on arches that do not
static struct notifier_block slab_notifier;
#endif
-static enum {
- DOWN, /* No slab functionality available */
- PARTIAL, /* Kmem_cache_node works */
- UP, /* Everything works but does not show up in sysfs */
- SYSFS /* Sysfs up */
-} slab_state = DOWN;
-
-/* A list of all slab caches on the system */
-static DECLARE_RWSEM(slub_lock);
-static LIST_HEAD(slab_caches);
-
/*
* Tracking user of a slab.
*/
static inline void sysfs_slab_remove(struct kmem_cache *s)
{
kfree(s->name);
- kfree(s);
+ kmem_cache_free(kmem_cache, s);
}
#endif
* Core slab cache functions
*******************************************************************/
-int slab_is_available(void)
-{
- return slab_state >= UP;
-}
-
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
return s->node[node];
print_trailer(s, page, object);
}
-static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
{
va_list args;
char buf[100];
return 0;
}
-static noinline int free_debug_processing(struct kmem_cache *s,
- struct page *page, void *object, unsigned long addr)
+static noinline struct kmem_cache_node *free_debug_processing(
+ struct kmem_cache *s, struct page *page, void *object,
+ unsigned long addr, unsigned long *flags)
{
- unsigned long flags;
- int rc = 0;
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- local_irq_save(flags);
+ spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page);
if (!check_slab(s, page))
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
init_object(s, object, SLUB_RED_INACTIVE);
- rc = 1;
out:
slab_unlock(page);
- local_irq_restore(flags);
- return rc;
+ /*
+ * Keep node_lock to preserve integrity
+ * until the object is actually freed
+ */
+ return n;
fail:
+ slab_unlock(page);
+ spin_unlock_irqrestore(&n->list_lock, *flags);
slab_fix(s, "Object at 0x%p not freed", object);
- goto out;
+ return NULL;
}
static int __init setup_slub_debug(char *str)
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
-static inline int free_debug_processing(struct kmem_cache *s,
- struct page *page, void *object, unsigned long addr) { return 0; }
+static inline struct kmem_cache_node *free_debug_processing(
+ struct kmem_cache *s, struct page *page, void *object,
+ unsigned long addr, unsigned long *flags) { return NULL; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
stat(s, ORDER_FALLBACK);
}
- if (flags & __GFP_WAIT)
- local_irq_disable();
-
- if (!page)
- return NULL;
-
- if (kmemcheck_enabled
+ if (kmemcheck_enabled && page
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
kmemcheck_mark_unallocated_pages(page, pages);
}
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab = s;
__SetPageSlab(page);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page);
start = page_address(page);
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
+ __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
reset_page_mapcount(page);
if (current->reclaim_state)
local_irq_save(flags);
unfreeze_partials(s);
local_irq_restore(flags);
+ oldpage = NULL;
pobjects = 0;
pages = 0;
stat(s, CPU_PARTIAL_DRAIN);
return freelist;
}
+static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
+{
+ if (unlikely(PageSlabPfmemalloc(page)))
+ return gfp_pfmemalloc_allowed(gfpflags);
+
+ return true;
+}
+
/*
* Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
* or deactivate the page.
goto new_slab;
}
+ /*
+ * By rights, we should be searching for a slab page that was
+ * PFMEMALLOC but right now, we are losing the pfmemalloc
+ * information when the page leaves the per-cpu allocator
+ */
+ if (unlikely(!pfmemalloc_match(page, gfpflags))) {
+ deactivate_slab(s, page, c->freelist);
+ c->page = NULL;
+ c->freelist = NULL;
+ goto new_slab;
+ }
+
/* must check again c->freelist in case of cpu migration or IRQ */
freelist = c->freelist;
if (freelist)
}
page = c->page;
- if (likely(!kmem_cache_debug(s)))
+ if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist;
/* Only entered in the debug case */
- if (!alloc_debug_processing(s, page, freelist, addr))
+ if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist));
object = c->freelist;
page = c->page;
if (unlikely(!object || !node_match(page, node)))
-
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
stat(s, FREE_SLOWPATH);
- if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
+ if (kmem_cache_debug(s) &&
+ !(n = free_debug_processing(s, page, x, addr, &flags)))
return;
do {
page = virt_to_head_page(x);
+ if (kmem_cache_debug(s) && page->slab != s) {
+ pr_err("kmem_cache_free: Wrong slab cache. %s but object"
+ " is from %s\n", page->slab->name, s->name);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
slab_free(s, page, x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x);
sizeof(long), GFP_ATOMIC);
if (!map)
return;
- slab_err(s, page, "%s", text);
+ slab_err(s, page, text, s->name);
slab_lock(page);
get_map(s, page, map);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
- "Objects remaining on kmem_cache_close()");
+ "Objects remaining in %s on kmem_cache_close()");
}
}
}
int node;
flush_all(s);
- free_percpu(s->cpu_slab);
/* Attempt to free all objects */
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
if (n->nr_partial || slabs_node(s, node))
return 1;
}
+ free_percpu(s->cpu_slab);
free_kmem_cache_nodes(s);
return 0;
}
-/*
- * Close a cache and release the kmem_cache structure
- * (must be used for caches created using kmem_cache_create)
- */
-void kmem_cache_destroy(struct kmem_cache *s)
-{
- down_write(&slub_lock);
- s->refcount--;
- if (!s->refcount) {
- list_del(&s->list);
- up_write(&slub_lock);
- if (kmem_cache_close(s)) {
- printk(KERN_ERR "SLUB %s: %s called for cache that "
- "still has objects.\n", s->name, __func__);
- dump_stack();
- }
- if (s->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
- sysfs_slab_remove(s);
- } else
- up_write(&slub_lock);
+int __kmem_cache_shutdown(struct kmem_cache *s)
+{
+ return kmem_cache_close(s);
+}
+
+void __kmem_cache_destroy(struct kmem_cache *s)
+{
+ sysfs_slab_remove(s);
}
-EXPORT_SYMBOL(kmem_cache_destroy);
/********************************************************************
* Kmalloc subsystem
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches);
-static struct kmem_cache *kmem_cache;
-
#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif
/*
* This function is called with IRQs disabled during early-boot on
- * single CPU so there's no need to take slub_lock here.
+ * single CPU so there's no need to take slab_mutex here.
*/
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
kmemleak_free(x);
- put_page(page);
+ __free_pages(page, compound_order(page));
return;
}
slab_free(page->slab, page, object, _RET_IP_);
{
struct kmem_cache *s;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list)
kmem_cache_shrink(s);
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
return 0;
}
if (offline_node < 0)
return;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
n = get_node(s, offline_node);
if (n) {
kmem_cache_free(kmem_cache_node, n);
}
}
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
static int slab_mem_going_online_callback(void *arg)
* allocate a kmem_cache_node structure in order to bring the node
* online.
*/
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
/*
* XXX: kmem_cache_alloc_node will fallback to other nodes
s->node[nid] = n;
}
out:
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
return ret;
}
struct kmem_cache *s;
char *n;
- down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
s->refcount++;
if (sysfs_slab_alias(s, name)) {
s->refcount--;
- goto err;
+ return NULL;
}
- up_write(&slub_lock);
return s;
}
n = kstrdup(name, GFP_KERNEL);
if (!n)
- goto err;
+ return NULL;
- s = kmalloc(kmem_size, GFP_KERNEL);
+ s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
- list_add(&s->list, &slab_caches);
- up_write(&slub_lock);
- if (sysfs_slab_add(s)) {
- down_write(&slub_lock);
- list_del(&s->list);
- kfree(n);
- kfree(s);
- goto err;
- }
- return s;
+ int r;
+
+ mutex_unlock(&slab_mutex);
+ r = sysfs_slab_add(s);
+ mutex_lock(&slab_mutex);
+
+ if (!r)
+ return s;
+
+ kmem_cache_close(s);
}
- kfree(s);
+ kmem_cache_free(kmem_cache, s);
}
kfree(n);
-err:
- up_write(&slub_lock);
- return s;
+ return NULL;
}
#ifdef CONFIG_SMP
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
break;
default:
break;
struct kmem_cache *s = to_slab(kobj);
kfree(s->name);
- kfree(s);
+ kmem_cache_free(kmem_cache, s);
}
static const struct sysfs_ops slab_sysfs_ops = {
const char *name;
int unmergeable;
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/* Defer until later */
return 0;
static void sysfs_slab_remove(struct kmem_cache *s)
{
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/*
* Sysfs has not been setup yet so no need to remove the
* cache from sysfs.
{
struct saved_alias *al;
- if (slab_state == SYSFS) {
+ if (slab_state == FULL) {
/*
* If we have a leftover link then remove it.
*/
struct kmem_cache *s;
int err;
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) {
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS;
}
- slab_state = SYSFS;
+ slab_state = FULL;
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
kfree(al);
}
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
resiliency_test();
return 0;
}
{
loff_t n = *pos;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
if (!n)
print_slabinfo_header(m);
static void s_stop(struct seq_file *m, void *p)
{
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
static int s_show(struct seq_file *m, void *p)