1 #ifndef _LINUX_PERCPU_DEFS_H
2 #define _LINUX_PERCPU_DEFS_H
5 * Base implementations of per-CPU variable declarations and definitions, where
6 * the section in which the variable is to be placed is provided by the
7 * 'sec' argument. This may be used to affect the parameters governing the
10 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
11 * linkage errors occur due the compiler generating the wrong code to access
14 #define __PCPU_ATTRS(sec) \
15 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
18 #define __PCPU_DUMMY_ATTRS \
19 __attribute__((section(".discard"), unused))
22 * Macro which verifies @ptr is a percpu pointer without evaluating
23 * @ptr. This is to be used in percpu accessors to verify that the
24 * input parameter is a percpu pointer.
26 * + 0 is required in order to convert the pointer type from a
27 * potential array type to a pointer to a single item of the array.
29 #define __verify_pcpu_ptr(ptr) do { \
30 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
35 * s390 and alpha modules require percpu variables to be defined as
36 * weak to force the compiler to generate GOT based external
37 * references for them. This is necessary because percpu sections
38 * will be located outside of the usually addressable area.
40 * This definition puts the following two extra restrictions when
41 * defining percpu variables.
43 * 1. The symbol must be globally unique, even the static ones.
44 * 2. Static percpu variables cannot be defined inside a function.
46 * Archs which need weak percpu definitions should define
47 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
49 * To ensure that the generic code observes the above two
50 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
51 * definition is used for all cases.
53 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
55 * __pcpu_scope_* dummy variable is used to enforce scope. It
56 * receives the static modifier when it's used in front of
57 * DEFINE_PER_CPU() and will trigger build failure if
58 * DECLARE_PER_CPU() is used for the same variable.
60 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
61 * such that hidden weak symbol collision, which will cause unrelated
62 * variables to share the same address, can be detected during build.
64 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
65 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
66 extern __PCPU_ATTRS(sec) __typeof__(type) name
68 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
69 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
70 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
71 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
72 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
76 * Normal declaration and definition macros.
78 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
79 extern __PCPU_ATTRS(sec) __typeof__(type) name
81 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
82 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
87 * Variant on the per-CPU variable declaration/definition theme used for
88 * ordinary per-CPU variables.
90 #define DECLARE_PER_CPU(type, name) \
91 DECLARE_PER_CPU_SECTION(type, name, "")
93 #define DEFINE_PER_CPU(type, name) \
94 DEFINE_PER_CPU_SECTION(type, name, "")
97 * Declaration/definition used for per-CPU variables that must come first in
98 * the set of variables.
100 #define DECLARE_PER_CPU_FIRST(type, name) \
101 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
103 #define DEFINE_PER_CPU_FIRST(type, name) \
104 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
107 * Declaration/definition used for per-CPU variables that must be cacheline
108 * aligned under SMP conditions so that, whilst a particular instance of the
109 * data corresponds to a particular CPU, inefficiencies due to direct access by
110 * other CPUs are reduced by preventing the data from unnecessarily spanning
113 * An example of this would be statistical data, where each CPU's set of data
114 * is updated by that CPU alone, but the data from across all CPUs is collated
115 * by a CPU processing a read from a proc file.
117 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
118 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
119 ____cacheline_aligned_in_smp
121 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
122 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
123 ____cacheline_aligned_in_smp
125 #define DECLARE_PER_CPU_ALIGNED(type, name) \
126 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
127 ____cacheline_aligned
129 #define DEFINE_PER_CPU_ALIGNED(type, name) \
130 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
131 ____cacheline_aligned
134 * Declaration/definition used for per-CPU variables that must be page aligned.
136 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
137 DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
140 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
141 DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
145 * Declaration/definition used for per-CPU variables that must be read mostly.
147 #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
148 DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
150 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
151 DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
154 * Intermodule exports for per-CPU variables. sparse forgets about
155 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
156 * noop if __CHECKER__.
159 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
160 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
162 #define EXPORT_PER_CPU_SYMBOL(var)
163 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
166 #endif /* _LINUX_PERCPU_DEFS_H */