mm/memblock.c: introduce bottom-up allocation mode
[platform/adaptation/renesas_rcar/renesas_kernel.git] / include / linux / percpu-defs.h
1 #ifndef _LINUX_PERCPU_DEFS_H
2 #define _LINUX_PERCPU_DEFS_H
3
4 /*
5  * Base implementations of per-CPU variable declarations and definitions, where
6  * the section in which the variable is to be placed is provided by the
7  * 'sec' argument.  This may be used to affect the parameters governing the
8  * variable's storage.
9  *
10  * NOTE!  The sections for the DECLARE and for the DEFINE must match, lest
11  * linkage errors occur due the compiler generating the wrong code to access
12  * that section.
13  */
14 #define __PCPU_ATTRS(sec)                                               \
15         __percpu __attribute__((section(PER_CPU_BASE_SECTION sec)))     \
16         PER_CPU_ATTRIBUTES
17
18 #define __PCPU_DUMMY_ATTRS                                              \
19         __attribute__((section(".discard"), unused))
20
21 /*
22  * Macro which verifies @ptr is a percpu pointer without evaluating
23  * @ptr.  This is to be used in percpu accessors to verify that the
24  * input parameter is a percpu pointer.
25  *
26  * + 0 is required in order to convert the pointer type from a
27  * potential array type to a pointer to a single item of the array.
28  */
29 #define __verify_pcpu_ptr(ptr)  do {                                    \
30         const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
31         (void)__vpp_verify;                                             \
32 } while (0)
33
34 /*
35  * s390 and alpha modules require percpu variables to be defined as
36  * weak to force the compiler to generate GOT based external
37  * references for them.  This is necessary because percpu sections
38  * will be located outside of the usually addressable area.
39  *
40  * This definition puts the following two extra restrictions when
41  * defining percpu variables.
42  *
43  * 1. The symbol must be globally unique, even the static ones.
44  * 2. Static percpu variables cannot be defined inside a function.
45  *
46  * Archs which need weak percpu definitions should define
47  * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
48  *
49  * To ensure that the generic code observes the above two
50  * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
51  * definition is used for all cases.
52  */
53 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
54 /*
55  * __pcpu_scope_* dummy variable is used to enforce scope.  It
56  * receives the static modifier when it's used in front of
57  * DEFINE_PER_CPU() and will trigger build failure if
58  * DECLARE_PER_CPU() is used for the same variable.
59  *
60  * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
61  * such that hidden weak symbol collision, which will cause unrelated
62  * variables to share the same address, can be detected during build.
63  */
64 #define DECLARE_PER_CPU_SECTION(type, name, sec)                        \
65         extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;             \
66         extern __PCPU_ATTRS(sec) __typeof__(type) name
67
68 #define DEFINE_PER_CPU_SECTION(type, name, sec)                         \
69         __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;                    \
70         extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;            \
71         __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;                   \
72         __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak                 \
73         __typeof__(type) name
74 #else
75 /*
76  * Normal declaration and definition macros.
77  */
78 #define DECLARE_PER_CPU_SECTION(type, name, sec)                        \
79         extern __PCPU_ATTRS(sec) __typeof__(type) name
80
81 #define DEFINE_PER_CPU_SECTION(type, name, sec)                         \
82         __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES                        \
83         __typeof__(type) name
84 #endif
85
86 /*
87  * Variant on the per-CPU variable declaration/definition theme used for
88  * ordinary per-CPU variables.
89  */
90 #define DECLARE_PER_CPU(type, name)                                     \
91         DECLARE_PER_CPU_SECTION(type, name, "")
92
93 #define DEFINE_PER_CPU(type, name)                                      \
94         DEFINE_PER_CPU_SECTION(type, name, "")
95
96 /*
97  * Declaration/definition used for per-CPU variables that must come first in
98  * the set of variables.
99  */
100 #define DECLARE_PER_CPU_FIRST(type, name)                               \
101         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
102
103 #define DEFINE_PER_CPU_FIRST(type, name)                                \
104         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
105
106 /*
107  * Declaration/definition used for per-CPU variables that must be cacheline
108  * aligned under SMP conditions so that, whilst a particular instance of the
109  * data corresponds to a particular CPU, inefficiencies due to direct access by
110  * other CPUs are reduced by preventing the data from unnecessarily spanning
111  * cachelines.
112  *
113  * An example of this would be statistical data, where each CPU's set of data
114  * is updated by that CPU alone, but the data from across all CPUs is collated
115  * by a CPU processing a read from a proc file.
116  */
117 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)                      \
118         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
119         ____cacheline_aligned_in_smp
120
121 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                       \
122         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
123         ____cacheline_aligned_in_smp
124
125 #define DECLARE_PER_CPU_ALIGNED(type, name)                             \
126         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)    \
127         ____cacheline_aligned
128
129 #define DEFINE_PER_CPU_ALIGNED(type, name)                              \
130         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)     \
131         ____cacheline_aligned
132
133 /*
134  * Declaration/definition used for per-CPU variables that must be page aligned.
135  */
136 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)                        \
137         DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")           \
138         __aligned(PAGE_SIZE)
139
140 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                         \
141         DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
142         __aligned(PAGE_SIZE)
143
144 /*
145  * Declaration/definition used for per-CPU variables that must be read mostly.
146  */
147 #define DECLARE_PER_CPU_READ_MOSTLY(type, name)                 \
148         DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
149
150 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                          \
151         DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
152
153 /*
154  * Intermodule exports for per-CPU variables.  sparse forgets about
155  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
156  * noop if __CHECKER__.
157  */
158 #ifndef __CHECKER__
159 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
160 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
161 #else
162 #define EXPORT_PER_CPU_SYMBOL(var)
163 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
164 #endif
165
166 #endif /* _LINUX_PERCPU_DEFS_H */