memcg: do not create memsw files if swap accounting is disabled
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / memcontrol.c
index fbb60b1..00ce03e 100644 (file)
@@ -120,6 +120,14 @@ static const char * const mem_cgroup_events_names[] = {
        "pgmajfault",
 };
 
+static const char * const mem_cgroup_lru_names[] = {
+       "inactive_anon",
+       "active_anon",
+       "inactive_file",
+       "active_file",
+       "unevictable",
+};
+
 /*
  * Per memcg event counter is incremented at every pagein/pageout. With THP,
  * it will be incremated by the number of pages. This counter is used for
@@ -1524,8 +1532,9 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
        spin_unlock_irqrestore(&memcg->move_lock, *flags);
 }
 
+#define K(x) ((x) << (PAGE_SHIFT-10))
 /**
- * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
+ * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
  * @memcg: The memory cgroup that went over limit
  * @p: Task that is going to be killed
  *
@@ -1543,8 +1552,10 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
         */
        static char memcg_name[PATH_MAX];
        int ret;
+       struct mem_cgroup *iter;
+       unsigned int i;
 
-       if (!memcg || !p)
+       if (!p)
                return;
 
        rcu_read_lock();
@@ -1563,7 +1574,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
        }
        rcu_read_unlock();
 
-       printk(KERN_INFO "Task in %s killed", memcg_name);
+       pr_info("Task in %s killed", memcg_name);
 
        rcu_read_lock();
        ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
@@ -1576,22 +1587,45 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
        /*
         * Continues from above, so we don't need an KERN_ level
         */
-       printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
+       pr_cont(" as a result of limit of %s\n", memcg_name);
 done:
 
-       printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
+       pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
                res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
                res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
                res_counter_read_u64(&memcg->res, RES_FAILCNT));
-       printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
-               "failcnt %llu\n",
+       pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
                res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
                res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
                res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
-       printk(KERN_INFO "kmem: usage %llukB, limit %llukB, failcnt %llu\n",
+       pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
                res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
                res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
                res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
+
+       for_each_mem_cgroup_tree(iter, memcg) {
+               pr_info("Memory cgroup stats");
+
+               rcu_read_lock();
+               ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
+               if (!ret)
+                       pr_cont(" for %s", memcg_name);
+               rcu_read_unlock();
+               pr_cont(":");
+
+               for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+                       if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
+                               continue;
+                       pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
+                               K(mem_cgroup_read_stat(iter, i)));
+               }
+
+               for (i = 0; i < NR_LRU_LISTS; i++)
+                       pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
+                               K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
+
+               pr_cont("\n");
+       }
 }
 
 /*
@@ -4391,8 +4425,8 @@ void mem_cgroup_print_bad_page(struct page *page)
 
        pc = lookup_page_cgroup_used(page);
        if (pc) {
-               printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
-                      pc, pc->flags, pc->mem_cgroup);
+               pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
+                        pc, pc->flags, pc->mem_cgroup);
        }
 }
 #endif
@@ -5214,14 +5248,6 @@ static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
 }
 #endif /* CONFIG_NUMA */
 
-static const char * const mem_cgroup_lru_names[] = {
-       "inactive_anon",
-       "active_anon",
-       "inactive_file",
-       "active_file",
-       "unevictable",
-};
-
 static inline void mem_cgroup_lru_names_not_uptodate(void)
 {
        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
@@ -5797,33 +5823,6 @@ static struct cftype mem_cgroup_files[] = {
                .read_seq_string = memcg_numa_stat_show,
        },
 #endif
-#ifdef CONFIG_MEMCG_SWAP
-       {
-               .name = "memsw.usage_in_bytes",
-               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
-               .read = mem_cgroup_read,
-               .register_event = mem_cgroup_usage_register_event,
-               .unregister_event = mem_cgroup_usage_unregister_event,
-       },
-       {
-               .name = "memsw.max_usage_in_bytes",
-               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
-               .trigger = mem_cgroup_reset,
-               .read = mem_cgroup_read,
-       },
-       {
-               .name = "memsw.limit_in_bytes",
-               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
-               .write_string = mem_cgroup_write,
-               .read = mem_cgroup_read,
-       },
-       {
-               .name = "memsw.failcnt",
-               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
-               .trigger = mem_cgroup_reset,
-               .read = mem_cgroup_read,
-       },
-#endif
 #ifdef CONFIG_MEMCG_KMEM
        {
                .name = "kmem.limit_in_bytes",
@@ -5858,6 +5857,36 @@ static struct cftype mem_cgroup_files[] = {
        { },    /* terminate */
 };
 
+#ifdef CONFIG_MEMCG_SWAP
+static struct cftype memsw_cgroup_files[] = {
+       {
+               .name = "memsw.usage_in_bytes",
+               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+               .read = mem_cgroup_read,
+               .register_event = mem_cgroup_usage_register_event,
+               .unregister_event = mem_cgroup_usage_unregister_event,
+       },
+       {
+               .name = "memsw.max_usage_in_bytes",
+               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+               .trigger = mem_cgroup_reset,
+               .read = mem_cgroup_read,
+       },
+       {
+               .name = "memsw.limit_in_bytes",
+               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+               .write_string = mem_cgroup_write,
+               .read = mem_cgroup_read,
+       },
+       {
+               .name = "memsw.failcnt",
+               .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+               .trigger = mem_cgroup_reset,
+               .read = mem_cgroup_read,
+       },
+       { },    /* terminate */
+};
+#endif
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
        struct mem_cgroup_per_node *pn;
@@ -6281,7 +6310,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
         * Because lookup_swap_cache() updates some statistics counter,
         * we call find_get_page() with swapper_space directly.
         */
-       page = find_get_page(&swapper_space, ent.val);
+       page = find_get_page(swap_address_space(ent), ent.val);
        if (do_swap_account)
                entry->val = ent.val;
 
@@ -6322,7 +6351,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
                swp_entry_t swap = radix_to_swp_entry(page);
                if (do_swap_account)
                        *entry = swap;
-               page = find_get_page(&swapper_space, swap.val);
+               page = find_get_page(swap_address_space(swap), swap.val);
        }
 #endif
        return page;
@@ -6757,19 +6786,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
        .use_id = 1,
 };
 
-/*
- * The rest of init is performed during ->css_alloc() for root css which
- * happens before initcalls.  hotcpu_notifier() can't be done together as
- * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
- * dependency.  Do it from a subsys_initcall().
- */
-static int __init mem_cgroup_init(void)
-{
-       hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
-       return 0;
-}
-subsys_initcall(mem_cgroup_init);
-
 #ifdef CONFIG_MEMCG_SWAP
 static int __init enable_swap_account(char *s)
 {
@@ -6782,4 +6798,28 @@ static int __init enable_swap_account(char *s)
 }
 __setup("swapaccount=", enable_swap_account);
 
+static void __init memsw_file_init(void)
+{
+       if (really_do_swap_account)
+               WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys,
+                                       memsw_cgroup_files));
+}
+#else
+static void __init memsw_file_init(void)
+{
+}
 #endif
+
+/*
+ * The rest of init is performed during ->css_alloc() for root css which
+ * happens before initcalls.  hotcpu_notifier() can't be done together as
+ * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
+ * dependency.  Do it from a subsys_initcall().
+ */
+static int __init mem_cgroup_init(void)
+{
+       hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+       memsw_file_init();
+       return 0;
+}
+subsys_initcall(mem_cgroup_init);