scs: Add page accounting for shadow call stack allocations
authorSami Tolvanen <samitolvanen@google.com>
Mon, 27 Apr 2020 16:00:08 +0000 (09:00 -0700)
committerWill Deacon <will@kernel.org>
Fri, 15 May 2020 15:35:49 +0000 (16:35 +0100)
This change adds accounting for the memory allocated for shadow stacks.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
drivers/base/node.c
fs/proc/meminfo.c
include/linux/mmzone.h
kernel/scs.c
mm/page_alloc.c
mm/vmstat.c

index 10d7e818e118d32e9ca37429b10805099c467add..50b8c0d43859877621c17c532a51c31f02d31723 100644 (file)
@@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
                       "Node %d AnonPages:      %8lu kB\n"
                       "Node %d Shmem:          %8lu kB\n"
                       "Node %d KernelStack:    %8lu kB\n"
+#ifdef CONFIG_SHADOW_CALL_STACK
+                      "Node %d ShadowCallStack:%8lu kB\n"
+#endif
                       "Node %d PageTables:     %8lu kB\n"
                       "Node %d NFS_Unstable:   %8lu kB\n"
                       "Node %d Bounce:         %8lu kB\n"
@@ -438,6 +441,9 @@ static ssize_t node_read_meminfo(struct device *dev,
                       nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
                       nid, K(i.sharedram),
                       nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+                      nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_KB),
+#endif
                       nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
                       nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
                       nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
index 8c1f1bb1a5ce3fd1d9e757a87805fb3df638206c..09cd51c8d23de4e90a2d0c56758b95e52fed1b1a 100644 (file)
@@ -103,6 +103,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
        show_val_kb(m, "SUnreclaim:     ", sunreclaim);
        seq_printf(m, "KernelStack:    %8lu kB\n",
                   global_zone_page_state(NR_KERNEL_STACK_KB));
+#ifdef CONFIG_SHADOW_CALL_STACK
+       seq_printf(m, "ShadowCallStack:%8lu kB\n",
+                  global_zone_page_state(NR_KERNEL_SCS_KB));
+#endif
        show_val_kb(m, "PageTables:     ",
                    global_zone_page_state(NR_PAGETABLE));
 
index 1b9de7d220fb7856b71fc54510bdabbfc947d096..acffc3bc61785f420a47591b88cfd034b2bd036c 100644 (file)
@@ -156,6 +156,9 @@ enum zone_stat_item {
        NR_MLOCK,               /* mlock()ed pages found and moved off LRU */
        NR_PAGETABLE,           /* used for pagetables */
        NR_KERNEL_STACK_KB,     /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+       NR_KERNEL_SCS_KB,       /* measured in KiB */
+#endif
        /* Second 128 byte cacheline */
        NR_BOUNCE,
 #if IS_ENABLED(CONFIG_ZSMALLOC)
index 38f8f31c94519427b94fc5c72b85e95282241297..6d2f983ac54ef2c66d78cf30ec02c53d5e9aad25 100644 (file)
@@ -6,8 +6,10 @@
  */
 
 #include <linux/kasan.h>
+#include <linux/mm.h>
 #include <linux/scs.h>
 #include <linux/slab.h>
+#include <linux/vmstat.h>
 #include <asm/scs.h>
 
 static struct kmem_cache *scs_cache;
@@ -40,6 +42,17 @@ void __init scs_init(void)
        scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
 }
 
+static struct page *__scs_page(struct task_struct *tsk)
+{
+       return virt_to_page(task_scs(tsk));
+}
+
+static void scs_account(struct task_struct *tsk, int account)
+{
+       mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_KB,
+               account * (SCS_SIZE / 1024));
+}
+
 int scs_prepare(struct task_struct *tsk, int node)
 {
        void *s = scs_alloc(node);
@@ -49,6 +62,7 @@ int scs_prepare(struct task_struct *tsk, int node)
 
        task_scs(tsk) = s;
        task_scs_offset(tsk) = 0;
+       scs_account(tsk, 1);
 
        return 0;
 }
@@ -61,5 +75,6 @@ void scs_release(struct task_struct *tsk)
                return;
 
        WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n");
+       scs_account(tsk, -1);
        scs_free(s);
 }
index 69827d4fa0527dc1dbacdc1b0af711df0929d89f..83743d7a61775aebe4554dfc559aabd8d13410b8 100644 (file)
@@ -5411,6 +5411,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                        " managed:%lukB"
                        " mlocked:%lukB"
                        " kernel_stack:%lukB"
+#ifdef CONFIG_SHADOW_CALL_STACK
+                       " shadow_call_stack:%lukB"
+#endif
                        " pagetables:%lukB"
                        " bounce:%lukB"
                        " free_pcp:%lukB"
@@ -5433,6 +5436,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                        K(zone_managed_pages(zone)),
                        K(zone_page_state(zone, NR_MLOCK)),
                        zone_page_state(zone, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+                       zone_page_state(zone, NR_KERNEL_SCS_KB),
+#endif
                        K(zone_page_state(zone, NR_PAGETABLE)),
                        K(zone_page_state(zone, NR_BOUNCE)),
                        K(free_pcp),
index 96d21a792b57c35ad59f1f062b7efedf9f59a34c..2435d2c246570f7b72d1aa9a9babb8f01f934d5f 100644 (file)
@@ -1119,6 +1119,9 @@ const char * const vmstat_text[] = {
        "nr_mlock",
        "nr_page_table_pages",
        "nr_kernel_stack",
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+       "nr_shadow_call_stack",
+#endif
        "nr_bounce",
 #if IS_ENABLED(CONFIG_ZSMALLOC)
        "nr_zspages",