1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <linux/atomic.h>
26 #include <linux/security.h>
28 struct flow_cache_entry {
30 struct hlist_node hlist;
31 struct list_head gc_list;
38 struct flow_cache_object *object;
41 struct flow_cache_percpu {
42 struct hlist_head *hash_table;
46 struct tasklet_struct flush_tasklet;
49 struct flow_flush_info {
50 struct flow_cache *cache;
52 struct completion completion;
57 struct flow_cache_percpu __percpu *percpu;
58 struct notifier_block hotcpu_notifier;
61 struct timer_list rnd_timer;
64 atomic_t flow_cache_genid = ATOMIC_INIT(0);
65 EXPORT_SYMBOL(flow_cache_genid);
66 static struct flow_cache flow_cache_global;
67 static struct kmem_cache *flow_cachep __read_mostly;
69 static DEFINE_SPINLOCK(flow_cache_gc_lock);
70 static LIST_HEAD(flow_cache_gc_list);
72 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
73 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
75 static void flow_cache_new_hashrnd(unsigned long arg)
77 struct flow_cache *fc = (void *) arg;
80 for_each_possible_cpu(i)
81 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
83 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 add_timer(&fc->rnd_timer);
87 static int flow_entry_valid(struct flow_cache_entry *fle)
89 if (atomic_read(&flow_cache_genid) != fle->genid)
91 if (fle->object && !fle->object->ops->check(fle->object))
96 static void flow_entry_kill(struct flow_cache_entry *fle)
99 fle->object->ops->delete(fle->object);
100 kmem_cache_free(flow_cachep, fle);
103 static void flow_cache_gc_task(struct work_struct *work)
105 struct list_head gc_list;
106 struct flow_cache_entry *fce, *n;
108 INIT_LIST_HEAD(&gc_list);
109 spin_lock_bh(&flow_cache_gc_lock);
110 list_splice_tail_init(&flow_cache_gc_list, &gc_list);
111 spin_unlock_bh(&flow_cache_gc_lock);
113 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114 flow_entry_kill(fce);
116 static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
118 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119 int deleted, struct list_head *gc_list)
122 fcp->hash_count -= deleted;
123 spin_lock_bh(&flow_cache_gc_lock);
124 list_splice_tail(gc_list, &flow_cache_gc_list);
125 spin_unlock_bh(&flow_cache_gc_lock);
126 schedule_work(&flow_cache_gc_work);
130 static void __flow_cache_shrink(struct flow_cache *fc,
131 struct flow_cache_percpu *fcp,
134 struct flow_cache_entry *fle;
135 struct hlist_node *tmp;
139 for (i = 0; i < flow_cache_hash_size(fc); i++) {
142 hlist_for_each_entry_safe(fle, tmp,
143 &fcp->hash_table[i], u.hlist) {
144 if (saved < shrink_to &&
145 flow_entry_valid(fle)) {
149 hlist_del(&fle->u.hlist);
150 list_add_tail(&fle->u.gc_list, &gc_list);
155 flow_cache_queue_garbage(fcp, deleted, &gc_list);
158 static void flow_cache_shrink(struct flow_cache *fc,
159 struct flow_cache_percpu *fcp)
161 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
163 __flow_cache_shrink(fc, fcp, shrink_to);
166 static void flow_new_hash_rnd(struct flow_cache *fc,
167 struct flow_cache_percpu *fcp)
169 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
170 fcp->hash_rnd_recalc = 0;
171 __flow_cache_shrink(fc, fcp, 0);
174 static u32 flow_hash_code(struct flow_cache *fc,
175 struct flow_cache_percpu *fcp,
176 const struct flowi *key,
179 const u32 *k = (const u32 *) key;
180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
182 return jhash2(k, length, fcp->hash_rnd)
183 & (flow_cache_hash_size(fc) - 1);
186 /* I hear what you're saying, use memcmp. But memcmp cannot make
187 * important assumptions that we can here, such as alignment.
189 static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
192 const flow_compare_t *k1, *k1_lim, *k2;
194 k1 = (const flow_compare_t *) key1;
195 k1_lim = k1 + keysize;
197 k2 = (const flow_compare_t *) key2;
202 } while (k1 < k1_lim);
207 struct flow_cache_object *
208 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
209 flow_resolve_t resolver, void *ctx)
211 struct flow_cache *fc = &flow_cache_global;
212 struct flow_cache_percpu *fcp;
213 struct flow_cache_entry *fle, *tfle;
214 struct flow_cache_object *flo;
219 fcp = this_cpu_ptr(fc->percpu);
224 keysize = flow_key_size(family);
228 /* Packet really early in init? Making flow_cache_init a
229 * pre-smp initcall would solve this. --RR */
230 if (!fcp->hash_table)
233 if (fcp->hash_rnd_recalc)
234 flow_new_hash_rnd(fc, fcp);
236 hash = flow_hash_code(fc, fcp, key, keysize);
237 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
238 if (tfle->net == net &&
239 tfle->family == family &&
241 flow_key_compare(key, &tfle->key, keysize) == 0) {
247 if (unlikely(!fle)) {
248 if (fcp->hash_count > fc->high_watermark)
249 flow_cache_shrink(fc, fcp);
251 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
254 fle->family = family;
256 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
258 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
261 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
265 flo = flo->ops->get(flo);
268 } else if (fle->object) {
270 flo->ops->delete(flo);
280 flo = resolver(net, key, family, dir, flo, ctx);
282 fle->genid = atomic_read(&flow_cache_genid);
288 if (!IS_ERR_OR_NULL(flo))
289 flo->ops->delete(flo);
295 EXPORT_SYMBOL(flow_cache_lookup);
297 static void flow_cache_flush_tasklet(unsigned long data)
299 struct flow_flush_info *info = (void *)data;
300 struct flow_cache *fc = info->cache;
301 struct flow_cache_percpu *fcp;
302 struct flow_cache_entry *fle;
303 struct hlist_node *tmp;
307 fcp = this_cpu_ptr(fc->percpu);
308 for (i = 0; i < flow_cache_hash_size(fc); i++) {
309 hlist_for_each_entry_safe(fle, tmp,
310 &fcp->hash_table[i], u.hlist) {
311 if (flow_entry_valid(fle))
315 hlist_del(&fle->u.hlist);
316 list_add_tail(&fle->u.gc_list, &gc_list);
320 flow_cache_queue_garbage(fcp, deleted, &gc_list);
322 if (atomic_dec_and_test(&info->cpuleft))
323 complete(&info->completion);
326 static void flow_cache_flush_per_cpu(void *data)
328 struct flow_flush_info *info = data;
329 struct tasklet_struct *tasklet;
331 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
332 tasklet->data = (unsigned long)info;
333 tasklet_schedule(tasklet);
336 void flow_cache_flush(void)
338 struct flow_flush_info info;
339 static DEFINE_MUTEX(flow_flush_sem);
341 /* Don't want cpus going down or up during this. */
343 mutex_lock(&flow_flush_sem);
344 info.cache = &flow_cache_global;
345 atomic_set(&info.cpuleft, num_online_cpus());
346 init_completion(&info.completion);
349 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
350 flow_cache_flush_tasklet((unsigned long)&info);
353 wait_for_completion(&info.completion);
354 mutex_unlock(&flow_flush_sem);
358 static void flow_cache_flush_task(struct work_struct *work)
363 static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
365 void flow_cache_flush_deferred(void)
367 schedule_work(&flow_cache_flush_work);
370 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
372 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
373 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
375 if (!fcp->hash_table) {
376 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
377 if (!fcp->hash_table) {
378 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
381 fcp->hash_rnd_recalc = 1;
383 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
388 static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
389 unsigned long action,
392 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
393 int res, cpu = (unsigned long) hcpu;
394 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
398 case CPU_UP_PREPARE_FROZEN:
399 res = flow_cache_cpu_prepare(fc, cpu);
401 return notifier_from_errno(res);
404 case CPU_DEAD_FROZEN:
405 __flow_cache_shrink(fc, fcp, 0);
411 static int __init flow_cache_init(struct flow_cache *fc)
416 fc->low_watermark = 2 * flow_cache_hash_size(fc);
417 fc->high_watermark = 4 * flow_cache_hash_size(fc);
419 fc->percpu = alloc_percpu(struct flow_cache_percpu);
423 for_each_online_cpu(i) {
424 if (flow_cache_cpu_prepare(fc, i))
427 fc->hotcpu_notifier = (struct notifier_block){
428 .notifier_call = flow_cache_cpu,
430 register_hotcpu_notifier(&fc->hotcpu_notifier);
432 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
434 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
435 add_timer(&fc->rnd_timer);
440 for_each_possible_cpu(i) {
441 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
442 kfree(fcp->hash_table);
443 fcp->hash_table = NULL;
446 free_percpu(fc->percpu);
452 static int __init flow_cache_init_global(void)
454 flow_cachep = kmem_cache_create("flow_cache",
455 sizeof(struct flow_cache_entry),
456 0, SLAB_PANIC, NULL);
458 return flow_cache_init(&flow_cache_global);
461 module_init(flow_cache_init_global);