hugetlb: ensure hugepage access is denied if hugepages are not supported
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / slab_common.c
index 0b7bb39..f149e67 100644 (file)
@@ -56,7 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        continue;
                }
 
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
                /*
                 * For simplicity, we won't check this in the list of memcg
                 * caches. We have control over memcg naming, and if there
@@ -171,13 +171,26 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
                        struct kmem_cache *parent_cache)
 {
        struct kmem_cache *s = NULL;
-       int err = 0;
+       int err;
 
        get_online_cpus();
        mutex_lock(&slab_mutex);
 
-       if (!kmem_cache_sanity_check(memcg, name, size) == 0)
-               goto out_locked;
+       err = kmem_cache_sanity_check(memcg, name, size);
+       if (err)
+               goto out_unlock;
+
+       if (memcg) {
+               /*
+                * Since per-memcg caches are created asynchronously on first
+                * allocation (see memcg_kmem_get_cache()), several threads can
+                * try to create the same cache, but only one of them may
+                * succeed. Therefore if we get here and see the cache has
+                * already been created, we silently return NULL.
+                */
+               if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg)))
+                       goto out_unlock;
+       }
 
        /*
         * Some allocators will constraint the set of valid flags to a subset
@@ -189,44 +202,47 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
 
        s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
        if (s)
-               goto out_locked;
+               goto out_unlock;
 
+       err = -ENOMEM;
        s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
-       if (s) {
-               s->object_size = s->size = size;
-               s->align = calculate_alignment(flags, align, size);
-               s->ctor = ctor;
+       if (!s)
+               goto out_unlock;
 
-               if (memcg_register_cache(memcg, s, parent_cache)) {
-                       kmem_cache_free(kmem_cache, s);
-                       err = -ENOMEM;
-                       goto out_locked;
-               }
+       s->object_size = s->size = size;
+       s->align = calculate_alignment(flags, align, size);
+       s->ctor = ctor;
 
-               s->name = kstrdup(name, GFP_KERNEL);
-               if (!s->name) {
-                       kmem_cache_free(kmem_cache, s);
-                       err = -ENOMEM;
-                       goto out_locked;
-               }
+       s->name = kstrdup(name, GFP_KERNEL);
+       if (!s->name)
+               goto out_free_cache;
 
-               err = __kmem_cache_create(s, flags);
-               if (!err) {
-                       s->refcount = 1;
-                       list_add(&s->list, &slab_caches);
-                       memcg_cache_list_add(memcg, s);
-               } else {
-                       kfree(s->name);
-                       kmem_cache_free(kmem_cache, s);
-               }
-       } else
-               err = -ENOMEM;
+       err = memcg_alloc_cache_params(memcg, s, parent_cache);
+       if (err)
+               goto out_free_cache;
+
+       err = __kmem_cache_create(s, flags);
+       if (err)
+               goto out_free_cache;
+
+       s->refcount = 1;
+       list_add(&s->list, &slab_caches);
+       memcg_register_cache(s);
 
-out_locked:
+out_unlock:
        mutex_unlock(&slab_mutex);
        put_online_cpus();
 
        if (err) {
+               /*
+                * There is no point in flooding logs with warnings or
+                * especially crashing the system if we fail to create a cache
+                * for a memcg. In this case we will be accounting the memcg
+                * allocation to the root cgroup until we succeed to create its
+                * own cache, but it isn't that critical.
+                */
+               if (!memcg)
+                       return NULL;
 
                if (flags & SLAB_PANIC)
                        panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
@@ -236,11 +252,15 @@ out_locked:
                                name, err);
                        dump_stack();
                }
-
                return NULL;
        }
-
        return s;
+
+out_free_cache:
+       memcg_free_cache_params(s);
+       kfree(s->name);
+       kmem_cache_free(kmem_cache, s);
+       goto out_unlock;
 }
 
 struct kmem_cache *
@@ -263,11 +283,12 @@ void kmem_cache_destroy(struct kmem_cache *s)
                list_del(&s->list);
 
                if (!__kmem_cache_shutdown(s)) {
+                       memcg_unregister_cache(s);
                        mutex_unlock(&slab_mutex);
                        if (s->flags & SLAB_DESTROY_BY_RCU)
                                rcu_barrier();
 
-                       memcg_release_cache(s);
+                       memcg_free_cache_params(s);
                        kfree(s->name);
                        kmem_cache_free(kmem_cache, s);
                } else {