sched/numa: Select a preferred node with the most numa hinting faults
[platform/adaptation/renesas_rcar/renesas_kernel.git] / include / linux / percpu_counter.h
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15
16 #ifdef CONFIG_SMP
17
18 struct percpu_counter {
19         raw_spinlock_t lock;
20         s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22         struct list_head list;  /* All percpu_counters are on a list */
23 #endif
24         s32 __percpu *counters;
25 };
26
27 extern int percpu_counter_batch;
28
29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30                           struct lock_class_key *key);
31
32 #define percpu_counter_init(fbc, value)                                 \
33         ({                                                              \
34                 static struct lock_class_key __key;                     \
35                                                                         \
36                 __percpu_counter_init(fbc, value, &__key);              \
37         })
38
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 s64 __percpu_counter_sum(struct percpu_counter *fbc);
43 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
44
45 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
46 {
47         __percpu_counter_add(fbc, amount, percpu_counter_batch);
48 }
49
50 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
51 {
52         s64 ret = __percpu_counter_sum(fbc);
53         return ret < 0 ? 0 : ret;
54 }
55
56 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
57 {
58         return __percpu_counter_sum(fbc);
59 }
60
61 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
62 {
63         return fbc->count;
64 }
65
66 /*
67  * It is possible for the percpu_counter_read() to return a small negative
68  * number for some counter which should never be negative.
69  *
70  */
71 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
72 {
73         s64 ret = fbc->count;
74
75         barrier();              /* Prevent reloads of fbc->count */
76         if (ret >= 0)
77                 return ret;
78         return 0;
79 }
80
81 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
82 {
83         return (fbc->counters != NULL);
84 }
85
86 #else /* !CONFIG_SMP */
87
88 struct percpu_counter {
89         s64 count;
90 };
91
92 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
93 {
94         fbc->count = amount;
95         return 0;
96 }
97
98 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
99 {
100 }
101
102 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
103 {
104         fbc->count = amount;
105 }
106
107 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
108 {
109         if (fbc->count > rhs)
110                 return 1;
111         else if (fbc->count < rhs)
112                 return -1;
113         else
114                 return 0;
115 }
116
117 static inline void
118 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
119 {
120         preempt_disable();
121         fbc->count += amount;
122         preempt_enable();
123 }
124
125 static inline void
126 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
127 {
128         percpu_counter_add(fbc, amount);
129 }
130
131 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
132 {
133         return fbc->count;
134 }
135
136 /*
137  * percpu_counter is intended to track positive numbers. In the UP case the
138  * number should never be negative.
139  */
140 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
141 {
142         return fbc->count;
143 }
144
145 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
146 {
147         return percpu_counter_read_positive(fbc);
148 }
149
150 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
151 {
152         return percpu_counter_read(fbc);
153 }
154
155 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
156 {
157         return 1;
158 }
159
160 #endif  /* CONFIG_SMP */
161
162 static inline void percpu_counter_inc(struct percpu_counter *fbc)
163 {
164         percpu_counter_add(fbc, 1);
165 }
166
167 static inline void percpu_counter_dec(struct percpu_counter *fbc)
168 {
169         percpu_counter_add(fbc, -1);
170 }
171
172 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
173 {
174         percpu_counter_add(fbc, -amount);
175 }
176
177 #endif /* _LINUX_PERCPU_COUNTER_H */