1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <asm/atomic.h>
26 #include <linux/security.h>
28 struct flow_cache_entry {
29 struct flow_cache_entry *next;
34 struct flow_cache_object *object;
37 struct flow_cache_percpu {
38 struct flow_cache_entry **hash_table;
42 struct tasklet_struct flush_tasklet;
45 struct flow_flush_info {
46 struct flow_cache *cache;
48 struct completion completion;
54 struct flow_cache_percpu *percpu;
55 struct notifier_block hotcpu_notifier;
58 struct timer_list rnd_timer;
61 atomic_t flow_cache_genid = ATOMIC_INIT(0);
62 static struct flow_cache flow_cache_global;
63 static struct kmem_cache *flow_cachep;
65 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
66 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
68 static void flow_cache_new_hashrnd(unsigned long arg)
70 struct flow_cache *fc = (void *) arg;
73 for_each_possible_cpu(i)
74 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
76 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
77 add_timer(&fc->rnd_timer);
80 static int flow_entry_valid(struct flow_cache_entry *fle)
82 if (atomic_read(&flow_cache_genid) != fle->genid)
84 if (fle->object && !fle->object->ops->check(fle->object))
89 static void flow_entry_kill(struct flow_cache *fc,
90 struct flow_cache_percpu *fcp,
91 struct flow_cache_entry *fle)
94 fle->object->ops->delete(fle->object);
95 kmem_cache_free(flow_cachep, fle);
99 static void __flow_cache_shrink(struct flow_cache *fc,
100 struct flow_cache_percpu *fcp,
103 struct flow_cache_entry *fle, **flp;
106 for (i = 0; i < flow_cache_hash_size(fc); i++) {
109 flp = &fcp->hash_table[i];
110 while ((fle = *flp) != NULL) {
111 if (saved < shrink_to &&
112 flow_entry_valid(fle)) {
117 flow_entry_kill(fc, fcp, fle);
123 static void flow_cache_shrink(struct flow_cache *fc,
124 struct flow_cache_percpu *fcp)
126 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
128 __flow_cache_shrink(fc, fcp, shrink_to);
131 static void flow_new_hash_rnd(struct flow_cache *fc,
132 struct flow_cache_percpu *fcp)
134 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
135 fcp->hash_rnd_recalc = 0;
136 __flow_cache_shrink(fc, fcp, 0);
139 static u32 flow_hash_code(struct flow_cache *fc,
140 struct flow_cache_percpu *fcp,
143 u32 *k = (u32 *) key;
145 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
146 & (flow_cache_hash_size(fc) - 1));
149 #if (BITS_PER_LONG == 64)
150 typedef u64 flow_compare_t;
152 typedef u32 flow_compare_t;
155 /* I hear what you're saying, use memcmp. But memcmp cannot make
156 * important assumptions that we can here, such as alignment and
159 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
161 flow_compare_t *k1, *k1_lim, *k2;
162 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
164 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
166 k1 = (flow_compare_t *) key1;
167 k1_lim = k1 + n_elem;
169 k2 = (flow_compare_t *) key2;
174 } while (k1 < k1_lim);
179 struct flow_cache_object *
180 flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
181 flow_resolve_t resolver, void *ctx)
183 struct flow_cache *fc = &flow_cache_global;
184 struct flow_cache_percpu *fcp;
185 struct flow_cache_entry *fle, **head;
186 struct flow_cache_object *flo;
190 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
194 /* Packet really early in init? Making flow_cache_init a
195 * pre-smp initcall would solve this. --RR */
196 if (!fcp->hash_table)
199 if (fcp->hash_rnd_recalc)
200 flow_new_hash_rnd(fc, fcp);
202 hash = flow_hash_code(fc, fcp, key);
203 head = &fcp->hash_table[hash];
204 for (fle = *head; fle; fle = fle->next) {
205 if (fle->family == family &&
207 flow_key_compare(key, &fle->key) == 0)
211 if (unlikely(!fle)) {
212 if (fcp->hash_count > fc->high_watermark)
213 flow_cache_shrink(fc, fcp);
215 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
219 fle->family = family;
221 memcpy(&fle->key, key, sizeof(*key));
225 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
229 flo = flo->ops->get(flo);
232 } else if (fle->object) {
234 flo->ops->delete(flo);
244 flo = resolver(net, key, family, dir, flo, ctx);
246 fle->genid = atomic_read(&flow_cache_genid);
252 if (flo && !IS_ERR(flo))
253 flo->ops->delete(flo);
260 static void flow_cache_flush_tasklet(unsigned long data)
262 struct flow_flush_info *info = (void *)data;
263 struct flow_cache *fc = info->cache;
264 struct flow_cache_percpu *fcp;
267 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
268 for (i = 0; i < flow_cache_hash_size(fc); i++) {
269 struct flow_cache_entry *fle;
271 fle = fcp->hash_table[i];
272 for (; fle; fle = fle->next) {
273 if (flow_entry_valid(fle))
277 fle->object->ops->delete(fle->object);
282 if (atomic_dec_and_test(&info->cpuleft))
283 complete(&info->completion);
286 static void flow_cache_flush_per_cpu(void *data)
288 struct flow_flush_info *info = data;
290 struct tasklet_struct *tasklet;
292 cpu = smp_processor_id();
293 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
294 tasklet->data = (unsigned long)info;
295 tasklet_schedule(tasklet);
298 void flow_cache_flush(void)
300 struct flow_flush_info info;
301 static DEFINE_MUTEX(flow_flush_sem);
303 /* Don't want cpus going down or up during this. */
305 mutex_lock(&flow_flush_sem);
306 info.cache = &flow_cache_global;
307 atomic_set(&info.cpuleft, num_online_cpus());
308 init_completion(&info.completion);
311 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
312 flow_cache_flush_tasklet((unsigned long)&info);
315 wait_for_completion(&info.completion);
316 mutex_unlock(&flow_flush_sem);
320 static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
321 struct flow_cache_percpu *fcp)
323 fcp->hash_table = (struct flow_cache_entry **)
324 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
325 if (!fcp->hash_table)
326 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
328 fcp->hash_rnd_recalc = 1;
330 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
333 static int flow_cache_cpu(struct notifier_block *nfb,
334 unsigned long action,
337 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
338 int cpu = (unsigned long) hcpu;
339 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
341 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
342 __flow_cache_shrink(fc, fcp, 0);
346 static int flow_cache_init(struct flow_cache *fc)
352 fc->low_watermark = 2 * flow_cache_hash_size(fc);
353 fc->high_watermark = 4 * flow_cache_hash_size(fc);
356 (PAGE_SIZE << order) <
357 (sizeof(struct flow_cache_entry *)*flow_cache_hash_size(fc));
361 fc->percpu = alloc_percpu(struct flow_cache_percpu);
363 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
365 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
366 add_timer(&fc->rnd_timer);
368 for_each_possible_cpu(i)
369 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
371 fc->hotcpu_notifier = (struct notifier_block){
372 .notifier_call = flow_cache_cpu,
374 register_hotcpu_notifier(&fc->hotcpu_notifier);
379 static int __init flow_cache_init_global(void)
381 flow_cachep = kmem_cache_create("flow_cache",
382 sizeof(struct flow_cache_entry),
383 0, SLAB_PANIC, NULL);
385 return flow_cache_init(&flow_cache_global);
388 module_init(flow_cache_init_global);
390 EXPORT_SYMBOL(flow_cache_genid);
391 EXPORT_SYMBOL(flow_cache_lookup);