mm,slub,x86: decouple size of struct page from CONFIG_CMPXCHG_LOCAL
[platform/adaptation/renesas_rcar/renesas_kernel.git] / include / linux / mm_types.h
index c93d00a..3cc3062 100644 (file)
@@ -62,10 +62,23 @@ struct page {
                        struct {
 
                                union {
-                                       atomic_t _mapcount;     /* Count of ptes mapped in mms,
-                                                        * to show when page is mapped
-                                                        * & limit reverse map searches.
-                                                        */
+                                       /*
+                                        * Count of ptes mapped in
+                                        * mms, to show when page is
+                                        * mapped & limit reverse map
+                                        * searches.
+                                        *
+                                        * Used also for tail pages
+                                        * refcounting instead of
+                                        * _count. Tail pages cannot
+                                        * be mapped and keeping the
+                                        * tail page _count zero at
+                                        * all times guarantees
+                                        * get_page_unless_zero() will
+                                        * never succeed on tail
+                                        * pages.
+                                        */
+                                       atomic_t _mapcount;
 
                                        struct {
                                                unsigned inuse:16;
@@ -138,12 +151,11 @@ struct page {
 #endif
 }
 /*
- * If another subsystem starts using the double word pairing for atomic
- * operations on struct page then it must change the #if to ensure
- * proper alignment of the page struct.
+ * The struct page can be forced to be double word aligned so that atomic ops
+ * on double words work. The SLUB allocator can make use of such a feature.
  */
-#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
-       __attribute__((__aligned__(2*sizeof(unsigned long))))
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+       __aligned(2 * sizeof(unsigned long))
 #endif
 ;
 
@@ -304,8 +316,15 @@ struct mm_struct {
        unsigned long hiwater_rss;      /* High-watermark of RSS usage */
        unsigned long hiwater_vm;       /* High-water virtual memory usage */
 
-       unsigned long total_vm, locked_vm, shared_vm, exec_vm;
-       unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
+       unsigned long total_vm;         /* Total pages mapped */
+       unsigned long locked_vm;        /* Pages that have PG_mlocked set */
+       unsigned long pinned_vm;        /* Refcount permanently increased */
+       unsigned long shared_vm;        /* Shared pages (files) */
+       unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
+       unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
+       unsigned long reserved_vm;      /* VM_RESERVED|VM_IO pages */
+       unsigned long def_flags;
+       unsigned long nr_ptes;          /* Page table pages */
        unsigned long start_code, end_code, start_data, end_data;
        unsigned long start_brk, brk, start_stack;
        unsigned long arg_start, arg_end, env_start, env_end;
@@ -336,9 +355,6 @@ struct mm_struct {
        unsigned int token_priority;
        unsigned int last_interval;
 
-       /* How many tasks sharing this mm are OOM_DISABLE */
-       atomic_t oom_disable_count;
-
        unsigned long flags; /* Must use atomic bitops to access the bits */
 
        struct core_state *core_state; /* coredumping support */