1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
35 #include <net/ndisc.h>
37 #define TBL_MIN_BUCKETS 1024
38 #define MASK_ARRAY_SIZE_MIN 16
39 #define REHASH_INTERVAL (10 * 60 * HZ)
41 #define MC_DEFAULT_HASH_ENTRIES 256
42 #define MC_HASH_SHIFT 8
43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
50 return range->end - range->start;
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 bool full, const struct sw_flow_mask *mask)
56 int start = full ? 0 : mask->range.start;
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 const long *m = (const long *)((const u8 *)&mask->key + start);
59 const long *s = (const long *)((const u8 *)src + start);
60 long *d = (long *)((u8 *)dst + start);
63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 * if 'full' is false the memory outside of the 'mask->range' is left
65 * uninitialized. This can be used as an optimization when further
66 * operations on 'dst' only use contents within 'mask->range'.
68 for (i = 0; i < len; i += sizeof(long))
72 struct sw_flow *ovs_flow_alloc(void)
75 struct sw_flow_stats *stats;
77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
79 return ERR_PTR(-ENOMEM);
81 flow->stats_last_writer = -1;
82 flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
84 /* Initialize the default stat node. */
85 stats = kmem_cache_alloc_node(flow_stats_cache,
86 GFP_KERNEL | __GFP_ZERO,
87 node_online(0) ? 0 : NUMA_NO_NODE);
91 spin_lock_init(&stats->lock);
93 RCU_INIT_POINTER(flow->stats[0], stats);
95 cpumask_set_cpu(0, flow->cpu_used_mask);
99 kmem_cache_free(flow_cache, flow);
100 return ERR_PTR(-ENOMEM);
103 int ovs_flow_tbl_count(const struct flow_table *table)
108 static void flow_free(struct sw_flow *flow)
112 if (ovs_identifier_is_key(&flow->id))
113 kfree(flow->id.unmasked_key);
115 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
117 /* We open code this to make sure cpu 0 is always considered */
118 for (cpu = 0; cpu < nr_cpu_ids;
119 cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
120 if (flow->stats[cpu])
121 kmem_cache_free(flow_stats_cache,
122 (struct sw_flow_stats __force *)flow->stats[cpu]);
125 kmem_cache_free(flow_cache, flow);
128 static void rcu_free_flow_callback(struct rcu_head *rcu)
130 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
135 void ovs_flow_free(struct sw_flow *flow, bool deferred)
141 call_rcu(&flow->rcu, rcu_free_flow_callback);
146 static void __table_instance_destroy(struct table_instance *ti)
152 static struct table_instance *table_instance_alloc(int new_size)
154 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
160 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
167 for (i = 0; i < new_size; i++)
168 INIT_HLIST_HEAD(&ti->buckets[i]);
170 ti->n_buckets = new_size;
172 get_random_bytes(&ti->hash_seed, sizeof(u32));
177 static void __mask_array_destroy(struct mask_array *ma)
179 free_percpu(ma->masks_usage_stats);
183 static void mask_array_rcu_cb(struct rcu_head *rcu)
185 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
187 __mask_array_destroy(ma);
190 static void tbl_mask_array_reset_counters(struct mask_array *ma)
194 /* As the per CPU counters are not atomic we can not go ahead and
195 * reset them from another CPU. To be able to still have an approximate
196 * zero based counter we store the value at reset, and subtract it
197 * later when processing.
199 for (i = 0; i < ma->max; i++) {
200 ma->masks_usage_zero_cntr[i] = 0;
202 for_each_possible_cpu(cpu) {
203 struct mask_array_stats *stats;
207 stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
209 start = u64_stats_fetch_begin(&stats->syncp);
210 counter = stats->usage_cntrs[i];
211 } while (u64_stats_fetch_retry(&stats->syncp, start));
213 ma->masks_usage_zero_cntr[i] += counter;
218 static struct mask_array *tbl_mask_array_alloc(int size)
220 struct mask_array *new;
222 size = max(MASK_ARRAY_SIZE_MIN, size);
223 new = kzalloc(sizeof(struct mask_array) +
224 sizeof(struct sw_flow_mask *) * size +
225 sizeof(u64) * size, GFP_KERNEL);
229 new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
230 sizeof(struct mask_array) +
231 sizeof(struct sw_flow_mask *) *
234 new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
237 if (!new->masks_usage_stats) {
248 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
250 struct mask_array *old;
251 struct mask_array *new;
253 new = tbl_mask_array_alloc(size);
257 old = ovsl_dereference(tbl->mask_array);
261 for (i = 0; i < old->max; i++) {
262 if (ovsl_dereference(old->masks[i]))
263 new->masks[new->count++] = old->masks[i];
265 call_rcu(&old->rcu, mask_array_rcu_cb);
268 rcu_assign_pointer(tbl->mask_array, new);
273 static int tbl_mask_array_add_mask(struct flow_table *tbl,
274 struct sw_flow_mask *new)
276 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
277 int err, ma_count = READ_ONCE(ma->count);
279 if (ma_count >= ma->max) {
280 err = tbl_mask_array_realloc(tbl, ma->max +
281 MASK_ARRAY_SIZE_MIN);
285 ma = ovsl_dereference(tbl->mask_array);
287 /* On every add or delete we need to reset the counters so
288 * every new mask gets a fair chance of being prioritized.
290 tbl_mask_array_reset_counters(ma);
293 BUG_ON(ovsl_dereference(ma->masks[ma_count]));
295 rcu_assign_pointer(ma->masks[ma_count], new);
296 WRITE_ONCE(ma->count, ma_count + 1);
301 static void tbl_mask_array_del_mask(struct flow_table *tbl,
302 struct sw_flow_mask *mask)
304 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
305 int i, ma_count = READ_ONCE(ma->count);
307 /* Remove the deleted mask pointers from the array */
308 for (i = 0; i < ma_count; i++) {
309 if (mask == ovsl_dereference(ma->masks[i]))
317 WRITE_ONCE(ma->count, ma_count - 1);
319 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
320 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
322 kfree_rcu(mask, rcu);
324 /* Shrink the mask array if necessary. */
325 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
326 ma_count <= (ma->max / 3))
327 tbl_mask_array_realloc(tbl, ma->max / 2);
329 tbl_mask_array_reset_counters(ma);
333 /* Remove 'mask' from the mask list, if it is not needed any more. */
334 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
337 /* ovs-lock is required to protect mask-refcount and
341 BUG_ON(!mask->ref_count);
344 if (!mask->ref_count)
345 tbl_mask_array_del_mask(tbl, mask);
349 static void __mask_cache_destroy(struct mask_cache *mc)
351 free_percpu(mc->mask_cache);
355 static void mask_cache_rcu_cb(struct rcu_head *rcu)
357 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
359 __mask_cache_destroy(mc);
362 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
364 struct mask_cache_entry __percpu *cache = NULL;
365 struct mask_cache *new;
367 /* Only allow size to be 0, or a power of 2, and does not exceed
368 * percpu allocation size.
370 if ((!is_power_of_2(size) && size != 0) ||
371 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
374 new = kzalloc(sizeof(*new), GFP_KERNEL);
378 new->cache_size = size;
379 if (new->cache_size > 0) {
380 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
382 __alignof__(struct mask_cache_entry));
389 new->mask_cache = cache;
392 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
394 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
395 struct mask_cache *new;
397 if (size == mc->cache_size)
400 if ((!is_power_of_2(size) && size != 0) ||
401 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
404 new = tbl_mask_cache_alloc(size);
408 rcu_assign_pointer(table->mask_cache, new);
409 call_rcu(&mc->rcu, mask_cache_rcu_cb);
414 int ovs_flow_tbl_init(struct flow_table *table)
416 struct table_instance *ti, *ufid_ti;
417 struct mask_cache *mc;
418 struct mask_array *ma;
420 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
424 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
426 goto free_mask_cache;
428 ti = table_instance_alloc(TBL_MIN_BUCKETS);
430 goto free_mask_array;
432 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
436 rcu_assign_pointer(table->ti, ti);
437 rcu_assign_pointer(table->ufid_ti, ufid_ti);
438 rcu_assign_pointer(table->mask_array, ma);
439 rcu_assign_pointer(table->mask_cache, mc);
440 table->last_rehash = jiffies;
442 table->ufid_count = 0;
446 __table_instance_destroy(ti);
448 __mask_array_destroy(ma);
450 __mask_cache_destroy(mc);
454 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
456 struct table_instance *ti;
458 ti = container_of(rcu, struct table_instance, rcu);
459 __table_instance_destroy(ti);
462 static void table_instance_flow_free(struct flow_table *table,
463 struct table_instance *ti,
464 struct table_instance *ufid_ti,
465 struct sw_flow *flow)
467 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
470 if (ovs_identifier_is_ufid(&flow->id)) {
471 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
475 flow_mask_remove(table, flow->mask);
478 /* Must be called with OVS mutex held. */
479 void table_instance_flow_flush(struct flow_table *table,
480 struct table_instance *ti,
481 struct table_instance *ufid_ti)
485 for (i = 0; i < ti->n_buckets; i++) {
486 struct hlist_head *head = &ti->buckets[i];
487 struct hlist_node *n;
488 struct sw_flow *flow;
490 hlist_for_each_entry_safe(flow, n, head,
491 flow_table.node[ti->node_ver]) {
493 table_instance_flow_free(table, ti, ufid_ti,
495 ovs_flow_free(flow, true);
499 if (WARN_ON(table->count != 0 ||
500 table->ufid_count != 0)) {
502 table->ufid_count = 0;
506 static void table_instance_destroy(struct table_instance *ti,
507 struct table_instance *ufid_ti)
509 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
510 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
513 /* No need for locking this function is called from RCU callback or
516 void ovs_flow_tbl_destroy(struct flow_table *table)
518 struct table_instance *ti = rcu_dereference_raw(table->ti);
519 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
520 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
521 struct mask_array *ma = rcu_dereference_raw(table->mask_array);
523 call_rcu(&mc->rcu, mask_cache_rcu_cb);
524 call_rcu(&ma->rcu, mask_array_rcu_cb);
525 table_instance_destroy(ti, ufid_ti);
528 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
529 u32 *bucket, u32 *last)
531 struct sw_flow *flow;
532 struct hlist_head *head;
537 while (*bucket < ti->n_buckets) {
539 head = &ti->buckets[*bucket];
540 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
555 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
557 hash = jhash_1word(hash, ti->hash_seed);
558 return &ti->buckets[hash & (ti->n_buckets - 1)];
561 static void table_instance_insert(struct table_instance *ti,
562 struct sw_flow *flow)
564 struct hlist_head *head;
566 head = find_bucket(ti, flow->flow_table.hash);
567 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
570 static void ufid_table_instance_insert(struct table_instance *ti,
571 struct sw_flow *flow)
573 struct hlist_head *head;
575 head = find_bucket(ti, flow->ufid_table.hash);
576 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
579 static void flow_table_copy_flows(struct table_instance *old,
580 struct table_instance *new, bool ufid)
585 old_ver = old->node_ver;
586 new->node_ver = !old_ver;
588 /* Insert in new table. */
589 for (i = 0; i < old->n_buckets; i++) {
590 struct sw_flow *flow;
591 struct hlist_head *head = &old->buckets[i];
594 hlist_for_each_entry_rcu(flow, head,
595 ufid_table.node[old_ver],
596 lockdep_ovsl_is_held())
597 ufid_table_instance_insert(new, flow);
599 hlist_for_each_entry_rcu(flow, head,
600 flow_table.node[old_ver],
601 lockdep_ovsl_is_held())
602 table_instance_insert(new, flow);
606 static struct table_instance *table_instance_rehash(struct table_instance *ti,
607 int n_buckets, bool ufid)
609 struct table_instance *new_ti;
611 new_ti = table_instance_alloc(n_buckets);
615 flow_table_copy_flows(ti, new_ti, ufid);
620 int ovs_flow_tbl_flush(struct flow_table *flow_table)
622 struct table_instance *old_ti, *new_ti;
623 struct table_instance *old_ufid_ti, *new_ufid_ti;
625 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
628 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
632 old_ti = ovsl_dereference(flow_table->ti);
633 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
635 rcu_assign_pointer(flow_table->ti, new_ti);
636 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
637 flow_table->last_rehash = jiffies;
639 table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
640 table_instance_destroy(old_ti, old_ufid_ti);
644 __table_instance_destroy(new_ti);
648 static u32 flow_hash(const struct sw_flow_key *key,
649 const struct sw_flow_key_range *range)
651 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
653 /* Make sure number of hash bytes are multiple of u32. */
654 int hash_u32s = range_n_bytes(range) >> 2;
656 return jhash2(hash_key, hash_u32s, 0);
659 static int flow_key_start(const struct sw_flow_key *key)
664 return rounddown(offsetof(struct sw_flow_key, phy),
668 static bool cmp_key(const struct sw_flow_key *key1,
669 const struct sw_flow_key *key2,
670 int key_start, int key_end)
672 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
673 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
676 for (i = key_start; i < key_end; i += sizeof(long))
683 static bool flow_cmp_masked_key(const struct sw_flow *flow,
684 const struct sw_flow_key *key,
685 const struct sw_flow_key_range *range)
687 return cmp_key(&flow->key, key, range->start, range->end);
690 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
691 const struct sw_flow_match *match)
693 struct sw_flow_key *key = match->key;
694 int key_start = flow_key_start(key);
695 int key_end = match->range.end;
697 BUG_ON(ovs_identifier_is_ufid(&flow->id));
698 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
701 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
702 const struct sw_flow_key *unmasked,
703 const struct sw_flow_mask *mask,
706 struct sw_flow *flow;
707 struct hlist_head *head;
709 struct sw_flow_key masked_key;
711 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
712 hash = flow_hash(&masked_key, &mask->range);
713 head = find_bucket(ti, hash);
716 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
717 lockdep_ovsl_is_held()) {
718 if (flow->mask == mask && flow->flow_table.hash == hash &&
719 flow_cmp_masked_key(flow, &masked_key, &mask->range))
725 /* Flow lookup does full lookup on flow table. It starts with
726 * mask from index passed in *index.
727 * This function MUST be called with BH disabled due to the use
728 * of CPU specific variables.
730 static struct sw_flow *flow_lookup(struct flow_table *tbl,
731 struct table_instance *ti,
732 struct mask_array *ma,
733 const struct sw_flow_key *key,
738 struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
739 struct sw_flow *flow;
740 struct sw_flow_mask *mask;
743 if (likely(*index < ma->max)) {
744 mask = rcu_dereference_ovsl(ma->masks[*index]);
746 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
748 u64_stats_update_begin(&stats->syncp);
749 stats->usage_cntrs[*index]++;
750 u64_stats_update_end(&stats->syncp);
757 for (i = 0; i < ma->max; i++) {
762 mask = rcu_dereference_ovsl(ma->masks[i]);
766 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
767 if (flow) { /* Found */
769 u64_stats_update_begin(&stats->syncp);
770 stats->usage_cntrs[*index]++;
771 u64_stats_update_end(&stats->syncp);
780 * mask_cache maps flow to probable mask. This cache is not tightly
781 * coupled cache, It means updates to mask list can result in inconsistent
782 * cache entry in mask cache.
783 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
784 * In case of a hash collision the entry is hashed in next segment.
786 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
787 const struct sw_flow_key *key,
792 struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
793 struct mask_array *ma = rcu_dereference(tbl->mask_array);
794 struct table_instance *ti = rcu_dereference(tbl->ti);
795 struct mask_cache_entry *entries, *ce;
796 struct sw_flow *flow;
802 if (unlikely(!skb_hash || mc->cache_size == 0)) {
806 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
810 /* Pre and post recirulation flows usually have the same skb_hash
811 * value. To avoid hash collisions, rehash the 'skb_hash' with
814 skb_hash = jhash_1word(skb_hash, key->recirc_id);
818 entries = this_cpu_ptr(mc->mask_cache);
820 /* Find the cache entry 'ce' to operate on. */
821 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
822 int index = hash & (mc->cache_size - 1);
823 struct mask_cache_entry *e;
826 if (e->skb_hash == skb_hash) {
827 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
828 n_cache_hit, &e->mask_index);
834 if (!ce || e->skb_hash < ce->skb_hash)
835 ce = e; /* A better replacement cache candidate. */
837 hash >>= MC_HASH_SHIFT;
840 /* Cache miss, do full lookup. */
841 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
844 ce->skb_hash = skb_hash;
850 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
851 const struct sw_flow_key *key)
853 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
854 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
855 u32 __always_unused n_mask_hit;
856 u32 __always_unused n_cache_hit;
857 struct sw_flow *flow;
860 /* This function gets called trough the netlink interface and therefore
861 * is preemptible. However, flow_lookup() function needs to be called
862 * with BH disabled due to CPU specific variables.
865 flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
870 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
871 const struct sw_flow_match *match)
873 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
876 /* Always called under ovs-mutex. */
877 for (i = 0; i < ma->max; i++) {
878 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
879 u32 __always_unused n_mask_hit;
880 struct sw_flow_mask *mask;
881 struct sw_flow *flow;
883 mask = ovsl_dereference(ma->masks[i]);
887 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
888 if (flow && ovs_identifier_is_key(&flow->id) &&
889 ovs_flow_cmp_unmasked_key(flow, match)) {
897 static u32 ufid_hash(const struct sw_flow_id *sfid)
899 return jhash(sfid->ufid, sfid->ufid_len, 0);
902 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
903 const struct sw_flow_id *sfid)
905 if (flow->id.ufid_len != sfid->ufid_len)
908 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
911 bool ovs_flow_cmp(const struct sw_flow *flow,
912 const struct sw_flow_match *match)
914 if (ovs_identifier_is_ufid(&flow->id))
915 return flow_cmp_masked_key(flow, match->key, &match->range);
917 return ovs_flow_cmp_unmasked_key(flow, match);
920 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
921 const struct sw_flow_id *ufid)
923 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
924 struct sw_flow *flow;
925 struct hlist_head *head;
928 hash = ufid_hash(ufid);
929 head = find_bucket(ti, hash);
930 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
931 lockdep_ovsl_is_held()) {
932 if (flow->ufid_table.hash == hash &&
933 ovs_flow_cmp_ufid(flow, ufid))
939 int ovs_flow_tbl_num_masks(const struct flow_table *table)
941 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
942 return READ_ONCE(ma->count);
945 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
947 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
949 return READ_ONCE(mc->cache_size);
952 static struct table_instance *table_instance_expand(struct table_instance *ti,
955 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
958 /* Must be called with OVS mutex held. */
959 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
961 struct table_instance *ti = ovsl_dereference(table->ti);
962 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
964 BUG_ON(table->count == 0);
965 table_instance_flow_free(table, ti, ufid_ti, flow);
968 static struct sw_flow_mask *mask_alloc(void)
970 struct sw_flow_mask *mask;
972 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
979 static bool mask_equal(const struct sw_flow_mask *a,
980 const struct sw_flow_mask *b)
982 const u8 *a_ = (const u8 *)&a->key + a->range.start;
983 const u8 *b_ = (const u8 *)&b->key + b->range.start;
985 return (a->range.end == b->range.end)
986 && (a->range.start == b->range.start)
987 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
990 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
991 const struct sw_flow_mask *mask)
993 struct mask_array *ma;
996 ma = ovsl_dereference(tbl->mask_array);
997 for (i = 0; i < ma->max; i++) {
998 struct sw_flow_mask *t;
999 t = ovsl_dereference(ma->masks[i]);
1001 if (t && mask_equal(mask, t))
1008 /* Add 'mask' into the mask list, if it is not already there. */
1009 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
1010 const struct sw_flow_mask *new)
1012 struct sw_flow_mask *mask;
1014 mask = flow_mask_find(tbl, new);
1016 /* Allocate a new mask if none exists. */
1017 mask = mask_alloc();
1020 mask->key = new->key;
1021 mask->range = new->range;
1023 /* Add mask to mask-list. */
1024 if (tbl_mask_array_add_mask(tbl, mask)) {
1029 BUG_ON(!mask->ref_count);
1037 /* Must be called with OVS mutex held. */
1038 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1040 struct table_instance *new_ti = NULL;
1041 struct table_instance *ti;
1043 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1044 ti = ovsl_dereference(table->ti);
1045 table_instance_insert(ti, flow);
1048 /* Expand table, if necessary, to make room. */
1049 if (table->count > ti->n_buckets)
1050 new_ti = table_instance_expand(ti, false);
1051 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1052 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1055 rcu_assign_pointer(table->ti, new_ti);
1056 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1057 table->last_rehash = jiffies;
1061 /* Must be called with OVS mutex held. */
1062 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1064 struct table_instance *ti;
1066 flow->ufid_table.hash = ufid_hash(&flow->id);
1067 ti = ovsl_dereference(table->ufid_ti);
1068 ufid_table_instance_insert(ti, flow);
1069 table->ufid_count++;
1071 /* Expand table, if necessary, to make room. */
1072 if (table->ufid_count > ti->n_buckets) {
1073 struct table_instance *new_ti;
1075 new_ti = table_instance_expand(ti, true);
1077 rcu_assign_pointer(table->ufid_ti, new_ti);
1078 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1083 /* Must be called with OVS mutex held. */
1084 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1085 const struct sw_flow_mask *mask)
1089 err = flow_mask_insert(table, flow, mask);
1092 flow_key_insert(table, flow);
1093 if (ovs_identifier_is_ufid(&flow->id))
1094 flow_ufid_insert(table, flow);
1099 static int compare_mask_and_count(const void *a, const void *b)
1101 const struct mask_count *mc_a = a;
1102 const struct mask_count *mc_b = b;
1104 return (s64)mc_b->counter - (s64)mc_a->counter;
1107 /* Must be called with OVS mutex held. */
1108 void ovs_flow_masks_rebalance(struct flow_table *table)
1110 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1111 struct mask_count *masks_and_count;
1112 struct mask_array *new;
1113 int masks_entries = 0;
1116 /* Build array of all current entries with use counters. */
1117 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1119 if (!masks_and_count)
1122 for (i = 0; i < ma->max; i++) {
1123 struct sw_flow_mask *mask;
1126 mask = rcu_dereference_ovsl(ma->masks[i]);
1127 if (unlikely(!mask))
1130 masks_and_count[i].index = i;
1131 masks_and_count[i].counter = 0;
1133 for_each_possible_cpu(cpu) {
1134 struct mask_array_stats *stats;
1138 stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
1140 start = u64_stats_fetch_begin(&stats->syncp);
1141 counter = stats->usage_cntrs[i];
1142 } while (u64_stats_fetch_retry(&stats->syncp, start));
1144 masks_and_count[i].counter += counter;
1147 /* Subtract the zero count value. */
1148 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1150 /* Rather than calling tbl_mask_array_reset_counters()
1151 * below when no change is needed, do it inline here.
1153 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1157 goto free_mask_entries;
1159 /* Sort the entries */
1161 sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1162 compare_mask_and_count, NULL);
1164 /* If the order is the same, nothing to do... */
1165 for (i = 0; i < masks_entries; i++) {
1166 if (i != masks_and_count[i].index)
1169 if (i == masks_entries)
1170 goto free_mask_entries;
1172 /* Rebuilt the new list in order of usage. */
1173 new = tbl_mask_array_alloc(ma->max);
1175 goto free_mask_entries;
1177 for (i = 0; i < masks_entries; i++) {
1178 int index = masks_and_count[i].index;
1180 if (ovsl_dereference(ma->masks[index]))
1181 new->masks[new->count++] = ma->masks[index];
1184 rcu_assign_pointer(table->mask_array, new);
1185 call_rcu(&ma->rcu, mask_array_rcu_cb);
1188 kfree(masks_and_count);
1191 /* Initializes the flow module.
1192 * Returns zero if successful or a negative error code. */
1193 int ovs_flow_init(void)
1195 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1196 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1198 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1200 * sizeof(struct sw_flow_stats *))
1203 if (flow_cache == NULL)
1207 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1208 0, SLAB_HWCACHE_ALIGN, NULL);
1209 if (flow_stats_cache == NULL) {
1210 kmem_cache_destroy(flow_cache);
1218 /* Uninitializes the flow module. */
1219 void ovs_flow_exit(void)
1221 kmem_cache_destroy(flow_stats_cache);
1222 kmem_cache_destroy(flow_cache);