2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
41 #include "diag/fs_tracepoint.h"
42 #include "accel/ipsec.h"
43 #include "fpga/ipsec.h"
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
63 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
64 .children = (struct init_tree_node[]) {__VA_ARGS__},\
65 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
68 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
71 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
74 .caps = (long[]) {__VA_ARGS__} }
76 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
77 FS_CAP(flow_table_properties_nic_receive.modify_root), \
78 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
79 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 #define FS_CHAINING_CAPS_EGRESS \
83 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
84 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
85 FS_CAP(flow_table_properties_nic_transmit \
86 .identified_miss_table_mode), \
87 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 #define LEFTOVERS_NUM_LEVELS 1
90 #define LEFTOVERS_NUM_PRIOS 1
92 #define BY_PASS_PRIO_NUM_LEVELS 1
93 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
96 #define ETHTOOL_PRIO_NUM_LEVELS 1
97 #define ETHTOOL_NUM_PRIOS 11
98 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
99 /* Vlan, mac, ttc, inner ttc, aRFS */
100 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
101 #define KERNEL_NIC_NUM_PRIOS 1
102 /* One more level for tc */
103 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
105 #define KERNEL_NIC_TC_NUM_PRIOS 1
106 #define KERNEL_NIC_TC_NUM_LEVELS 2
108 #define ANCHOR_NUM_LEVELS 1
109 #define ANCHOR_NUM_PRIOS 1
110 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
112 #define OFFLOADS_MAX_FT 1
113 #define OFFLOADS_NUM_PRIOS 1
114 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
116 #define LAG_PRIO_NUM_LEVELS 1
117 #define LAG_NUM_PRIOS 1
118 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
125 static struct init_tree_node {
126 enum fs_node_type type;
127 struct init_tree_node *children;
129 struct node_caps caps;
135 .type = FS_TYPE_NAMESPACE,
137 .children = (struct init_tree_node[]) {
138 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
140 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
141 BY_PASS_PRIO_NUM_LEVELS))),
142 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
144 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
145 LAG_PRIO_NUM_LEVELS))),
146 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
147 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
148 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
150 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
151 ETHTOOL_PRIO_NUM_LEVELS))),
152 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
153 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
154 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
155 KERNEL_NIC_PRIO_NUM_LEVELS))),
156 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
158 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
159 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
160 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
164 static struct init_tree_node egress_root_fs = {
165 .type = FS_TYPE_NAMESPACE,
167 .children = (struct init_tree_node[]) {
168 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
169 FS_CHAINING_CAPS_EGRESS,
170 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
171 BY_PASS_PRIO_NUM_LEVELS))),
175 enum fs_i_lock_class {
181 static const struct rhashtable_params rhash_fte = {
182 .key_len = FIELD_SIZEOF(struct fs_fte, val),
183 .key_offset = offsetof(struct fs_fte, val),
184 .head_offset = offsetof(struct fs_fte, hash),
185 .automatic_shrinking = true,
189 static const struct rhashtable_params rhash_fg = {
190 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
191 .key_offset = offsetof(struct mlx5_flow_group, mask),
192 .head_offset = offsetof(struct mlx5_flow_group, hash),
193 .automatic_shrinking = true,
198 static void del_hw_flow_table(struct fs_node *node);
199 static void del_hw_flow_group(struct fs_node *node);
200 static void del_hw_fte(struct fs_node *node);
201 static void del_sw_flow_table(struct fs_node *node);
202 static void del_sw_flow_group(struct fs_node *node);
203 static void del_sw_fte(struct fs_node *node);
204 static void del_sw_prio(struct fs_node *node);
205 static void del_sw_ns(struct fs_node *node);
206 /* Delete rule (destination) is special case that
207 * requires to lock the FTE for all the deletion process.
209 static void del_sw_hw_rule(struct fs_node *node);
210 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
211 struct mlx5_flow_destination *d2);
212 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
213 static struct mlx5_flow_rule *
214 find_flow_rule(struct fs_fte *fte,
215 struct mlx5_flow_destination *dest);
217 static void tree_init_node(struct fs_node *node,
218 void (*del_hw_func)(struct fs_node *),
219 void (*del_sw_func)(struct fs_node *))
221 refcount_set(&node->refcount, 1);
222 INIT_LIST_HEAD(&node->list);
223 INIT_LIST_HEAD(&node->children);
224 init_rwsem(&node->lock);
225 node->del_hw_func = del_hw_func;
226 node->del_sw_func = del_sw_func;
227 node->active = false;
230 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
233 refcount_inc(&parent->refcount);
234 node->parent = parent;
236 /* Parent is the root */
240 node->root = parent->root;
243 static int tree_get_node(struct fs_node *node)
245 return refcount_inc_not_zero(&node->refcount);
248 static void nested_down_read_ref_node(struct fs_node *node,
249 enum fs_i_lock_class class)
252 down_read_nested(&node->lock, class);
253 refcount_inc(&node->refcount);
257 static void nested_down_write_ref_node(struct fs_node *node,
258 enum fs_i_lock_class class)
261 down_write_nested(&node->lock, class);
262 refcount_inc(&node->refcount);
266 static void down_write_ref_node(struct fs_node *node, bool locked)
270 down_write(&node->lock);
271 refcount_inc(&node->refcount);
275 static void up_read_ref_node(struct fs_node *node)
277 refcount_dec(&node->refcount);
278 up_read(&node->lock);
281 static void up_write_ref_node(struct fs_node *node, bool locked)
283 refcount_dec(&node->refcount);
285 up_write(&node->lock);
288 static void tree_put_node(struct fs_node *node, bool locked)
290 struct fs_node *parent_node = node->parent;
292 if (refcount_dec_and_test(&node->refcount)) {
293 if (node->del_hw_func)
294 node->del_hw_func(node);
296 /* Only root namespace doesn't have parent and we just
297 * need to free its node.
299 down_write_ref_node(parent_node, locked);
300 list_del_init(&node->list);
301 if (node->del_sw_func)
302 node->del_sw_func(node);
303 up_write_ref_node(parent_node, locked);
309 if (!node && parent_node)
310 tree_put_node(parent_node, locked);
313 static int tree_remove_node(struct fs_node *node, bool locked)
315 if (refcount_read(&node->refcount) > 1) {
316 refcount_dec(&node->refcount);
319 tree_put_node(node, locked);
323 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
326 struct fs_prio *iter_prio;
328 fs_for_each_prio(iter_prio, ns) {
329 if (iter_prio->prio == prio)
336 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
340 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
341 if (spec->match_value[i] & ~spec->match_criteria[i]) {
342 pr_warn("mlx5_core: match_value differs from match_criteria\n");
349 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
351 struct fs_node *root;
352 struct mlx5_flow_namespace *ns;
356 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
357 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
361 ns = container_of(root, struct mlx5_flow_namespace, node);
362 return container_of(ns, struct mlx5_flow_root_namespace, ns);
365 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
367 struct mlx5_flow_root_namespace *root = find_root(node);
370 return root->dev->priv.steering;
374 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
376 struct mlx5_flow_root_namespace *root = find_root(node);
383 static void del_sw_ns(struct fs_node *node)
388 static void del_sw_prio(struct fs_node *node)
393 static void del_hw_flow_table(struct fs_node *node)
395 struct mlx5_flow_root_namespace *root;
396 struct mlx5_flow_table *ft;
397 struct mlx5_core_dev *dev;
400 fs_get_obj(ft, node);
401 dev = get_dev(&ft->node);
402 root = find_root(&ft->node);
403 trace_mlx5_fs_del_ft(ft);
406 err = root->cmds->destroy_flow_table(root, ft);
408 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
412 static void del_sw_flow_table(struct fs_node *node)
414 struct mlx5_flow_table *ft;
415 struct fs_prio *prio;
417 fs_get_obj(ft, node);
419 rhltable_destroy(&ft->fgs_hash);
420 fs_get_obj(prio, ft->node.parent);
425 static void modify_fte(struct fs_fte *fte)
427 struct mlx5_flow_root_namespace *root;
428 struct mlx5_flow_table *ft;
429 struct mlx5_flow_group *fg;
430 struct mlx5_core_dev *dev;
433 fs_get_obj(fg, fte->node.parent);
434 fs_get_obj(ft, fg->node.parent);
435 dev = get_dev(&fte->node);
437 root = find_root(&ft->node);
438 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
441 "%s can't del rule fg id=%d fte_index=%d\n",
442 __func__, fg->id, fte->index);
443 fte->modify_mask = 0;
446 static void del_sw_hw_rule(struct fs_node *node)
448 struct mlx5_flow_rule *rule;
451 fs_get_obj(rule, node);
452 fs_get_obj(fte, rule->node.parent);
453 trace_mlx5_fs_del_rule(rule);
454 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
455 mutex_lock(&rule->dest_attr.ft->lock);
456 list_del(&rule->next_ft);
457 mutex_unlock(&rule->dest_attr.ft->lock);
460 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
463 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
464 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
465 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
469 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
472 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
478 static void del_hw_fte(struct fs_node *node)
480 struct mlx5_flow_root_namespace *root;
481 struct mlx5_flow_table *ft;
482 struct mlx5_flow_group *fg;
483 struct mlx5_core_dev *dev;
487 fs_get_obj(fte, node);
488 fs_get_obj(fg, fte->node.parent);
489 fs_get_obj(ft, fg->node.parent);
491 trace_mlx5_fs_del_fte(fte);
492 dev = get_dev(&ft->node);
493 root = find_root(&ft->node);
495 err = root->cmds->delete_fte(root, ft, fte);
498 "flow steering can't delete fte in index %d of flow group id %d\n",
504 static void del_sw_fte(struct fs_node *node)
506 struct mlx5_flow_steering *steering = get_steering(node);
507 struct mlx5_flow_group *fg;
511 fs_get_obj(fte, node);
512 fs_get_obj(fg, fte->node.parent);
514 err = rhashtable_remove_fast(&fg->ftes_hash,
518 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
519 kmem_cache_free(steering->ftes_cache, fte);
522 static void del_hw_flow_group(struct fs_node *node)
524 struct mlx5_flow_root_namespace *root;
525 struct mlx5_flow_group *fg;
526 struct mlx5_flow_table *ft;
527 struct mlx5_core_dev *dev;
529 fs_get_obj(fg, node);
530 fs_get_obj(ft, fg->node.parent);
531 dev = get_dev(&ft->node);
532 trace_mlx5_fs_del_fg(fg);
534 root = find_root(&ft->node);
535 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
536 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
540 static void del_sw_flow_group(struct fs_node *node)
542 struct mlx5_flow_steering *steering = get_steering(node);
543 struct mlx5_flow_group *fg;
544 struct mlx5_flow_table *ft;
547 fs_get_obj(fg, node);
548 fs_get_obj(ft, fg->node.parent);
550 rhashtable_destroy(&fg->ftes_hash);
551 ida_destroy(&fg->fte_allocator);
552 if (ft->autogroup.active)
553 ft->autogroup.num_groups--;
554 err = rhltable_remove(&ft->fgs_hash,
558 kmem_cache_free(steering->fgs_cache, fg);
561 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
566 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
570 fte->index = index + fg->start_index;
571 ret = rhashtable_insert_fast(&fg->ftes_hash,
577 tree_add_node(&fte->node, &fg->node);
578 list_add_tail(&fte->node.list, &fg->node.children);
582 ida_simple_remove(&fg->fte_allocator, index);
586 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
588 struct mlx5_flow_act *flow_act)
590 struct mlx5_flow_steering *steering = get_steering(&ft->node);
593 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
595 return ERR_PTR(-ENOMEM);
597 memcpy(fte->val, match_value, sizeof(fte->val));
598 fte->node.type = FS_TYPE_FLOW_ENTRY;
599 fte->action = *flow_act;
601 tree_init_node(&fte->node, NULL, del_sw_fte);
606 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
607 struct mlx5_flow_group *fg)
609 rhashtable_destroy(&fg->ftes_hash);
610 kmem_cache_free(steering->fgs_cache, fg);
613 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
614 u8 match_criteria_enable,
615 void *match_criteria,
619 struct mlx5_flow_group *fg;
622 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
624 return ERR_PTR(-ENOMEM);
626 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
628 kmem_cache_free(steering->fgs_cache, fg);
632 ida_init(&fg->fte_allocator);
633 fg->mask.match_criteria_enable = match_criteria_enable;
634 memcpy(&fg->mask.match_criteria, match_criteria,
635 sizeof(fg->mask.match_criteria));
636 fg->node.type = FS_TYPE_FLOW_GROUP;
637 fg->start_index = start_index;
638 fg->max_ftes = end_index - start_index + 1;
643 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
644 u8 match_criteria_enable,
645 void *match_criteria,
648 struct list_head *prev)
650 struct mlx5_flow_steering *steering = get_steering(&ft->node);
651 struct mlx5_flow_group *fg;
654 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
655 start_index, end_index);
659 /* initialize refcnt, add to parent list */
660 ret = rhltable_insert(&ft->fgs_hash,
664 dealloc_flow_group(steering, fg);
668 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
669 tree_add_node(&fg->node, &ft->node);
670 /* Add node to group list */
671 list_add(&fg->node.list, prev);
672 atomic_inc(&ft->node.version);
677 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
678 enum fs_flow_table_type table_type,
679 enum fs_flow_table_op_mod op_mod,
682 struct mlx5_flow_table *ft;
685 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
687 return ERR_PTR(-ENOMEM);
689 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
696 ft->node.type = FS_TYPE_FLOW_TABLE;
698 ft->type = table_type;
700 ft->max_fte = max_fte;
702 INIT_LIST_HEAD(&ft->fwd_rules);
703 mutex_init(&ft->lock);
708 /* If reverse is false, then we search for the first flow table in the
709 * root sub-tree from start(closest from right), else we search for the
710 * last flow table in the root sub-tree till start(closest from left).
712 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
713 struct list_head *start,
716 #define list_advance_entry(pos, reverse) \
717 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
719 #define list_for_each_advance_continue(pos, head, reverse) \
720 for (pos = list_advance_entry(pos, reverse); \
721 &pos->list != (head); \
722 pos = list_advance_entry(pos, reverse))
724 struct fs_node *iter = list_entry(start, struct fs_node, list);
725 struct mlx5_flow_table *ft = NULL;
727 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
730 list_for_each_advance_continue(iter, &root->children, reverse) {
731 if (iter->type == FS_TYPE_FLOW_TABLE) {
732 fs_get_obj(ft, iter);
735 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
743 /* If reverse if false then return the first flow table in next priority of
744 * prio in the tree, else return the last flow table in the previous priority
745 * of prio in the tree.
747 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
749 struct mlx5_flow_table *ft = NULL;
750 struct fs_node *curr_node;
751 struct fs_node *parent;
753 parent = prio->node.parent;
754 curr_node = &prio->node;
755 while (!ft && parent) {
756 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
758 parent = curr_node->parent;
763 /* Assuming all the tree is locked by mutex chain lock */
764 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
766 return find_closest_ft(prio, false);
769 /* Assuming all the tree is locked by mutex chain lock */
770 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
772 return find_closest_ft(prio, true);
775 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
776 struct fs_prio *prio,
777 struct mlx5_flow_table *ft)
779 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
780 struct mlx5_flow_table *iter;
784 fs_for_each_ft(iter, prio) {
786 err = root->cmds->modify_flow_table(root, iter, ft);
788 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
790 /* The driver is out of sync with the FW */
799 /* Connect flow tables from previous priority of prio to ft */
800 static int connect_prev_fts(struct mlx5_core_dev *dev,
801 struct mlx5_flow_table *ft,
802 struct fs_prio *prio)
804 struct mlx5_flow_table *prev_ft;
806 prev_ft = find_prev_chained_ft(prio);
808 struct fs_prio *prev_prio;
810 fs_get_obj(prev_prio, prev_ft->node.parent);
811 return connect_fts_in_prio(dev, prev_prio, ft);
816 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
819 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
820 struct mlx5_ft_underlay_qp *uqp;
821 int min_level = INT_MAX;
826 min_level = root->root_ft->level;
828 if (ft->level >= min_level)
831 if (list_empty(&root->underlay_qpns)) {
832 /* Don't set any QPN (zero) in case QPN list is empty */
834 err = root->cmds->update_root_ft(root, ft, qpn, false);
836 list_for_each_entry(uqp, &root->underlay_qpns, list) {
838 err = root->cmds->update_root_ft(root, ft,
846 mlx5_core_warn(root->dev,
847 "Update root flow table of id(%u) qpn(%d) failed\n",
855 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
856 struct mlx5_flow_destination *dest)
858 struct mlx5_flow_root_namespace *root;
859 struct mlx5_flow_table *ft;
860 struct mlx5_flow_group *fg;
862 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
865 fs_get_obj(fte, rule->node.parent);
866 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
868 down_write_ref_node(&fte->node, false);
869 fs_get_obj(fg, fte->node.parent);
870 fs_get_obj(ft, fg->node.parent);
872 memcpy(&rule->dest_attr, dest, sizeof(*dest));
873 root = find_root(&ft->node);
874 err = root->cmds->update_fte(root, ft, fg,
876 up_write_ref_node(&fte->node, false);
881 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
882 struct mlx5_flow_destination *new_dest,
883 struct mlx5_flow_destination *old_dest)
888 if (handle->num_rules != 1)
890 return _mlx5_modify_rule_destination(handle->rule[0],
894 for (i = 0; i < handle->num_rules; i++) {
895 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
896 return _mlx5_modify_rule_destination(handle->rule[i],
903 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
904 static int connect_fwd_rules(struct mlx5_core_dev *dev,
905 struct mlx5_flow_table *new_next_ft,
906 struct mlx5_flow_table *old_next_ft)
908 struct mlx5_flow_destination dest = {};
909 struct mlx5_flow_rule *iter;
912 /* new_next_ft and old_next_ft could be NULL only
913 * when we create/destroy the anchor flow table.
915 if (!new_next_ft || !old_next_ft)
918 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
919 dest.ft = new_next_ft;
921 mutex_lock(&old_next_ft->lock);
922 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
923 mutex_unlock(&old_next_ft->lock);
924 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
925 err = _mlx5_modify_rule_destination(iter, &dest);
927 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
933 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
934 struct fs_prio *prio)
936 struct mlx5_flow_table *next_ft;
939 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
941 if (list_empty(&prio->node.children)) {
942 err = connect_prev_fts(dev, ft, prio);
946 next_ft = find_next_chained_ft(prio);
947 err = connect_fwd_rules(dev, ft, next_ft);
952 if (MLX5_CAP_FLOWTABLE(dev,
953 flow_table_properties_nic_receive.modify_root))
954 err = update_root_ft_create(ft, prio);
958 static void list_add_flow_table(struct mlx5_flow_table *ft,
959 struct fs_prio *prio)
961 struct list_head *prev = &prio->node.children;
962 struct mlx5_flow_table *iter;
964 fs_for_each_ft(iter, prio) {
965 if (iter->level > ft->level)
967 prev = &iter->node.list;
969 list_add(&ft->node.list, prev);
972 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
973 struct mlx5_flow_table_attr *ft_attr,
974 enum fs_flow_table_op_mod op_mod,
977 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
978 struct mlx5_flow_table *next_ft = NULL;
979 struct fs_prio *fs_prio = NULL;
980 struct mlx5_flow_table *ft;
985 pr_err("mlx5: flow steering failed to find root of namespace\n");
986 return ERR_PTR(-ENODEV);
989 mutex_lock(&root->chain_lock);
990 fs_prio = find_prio(ns, ft_attr->prio);
995 if (ft_attr->level >= fs_prio->num_levels) {
999 /* The level is related to the
1000 * priority level range.
1002 ft_attr->level += fs_prio->start_level;
1003 ft = alloc_flow_table(ft_attr->level,
1005 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1007 op_mod, ft_attr->flags);
1013 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1014 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1015 next_ft = find_next_chained_ft(fs_prio);
1016 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
1020 err = connect_flow_table(root->dev, ft, fs_prio);
1023 ft->node.active = true;
1024 down_write_ref_node(&fs_prio->node, false);
1025 tree_add_node(&ft->node, &fs_prio->node);
1026 list_add_flow_table(ft, fs_prio);
1028 up_write_ref_node(&fs_prio->node, false);
1029 mutex_unlock(&root->chain_lock);
1030 trace_mlx5_fs_add_ft(ft);
1033 root->cmds->destroy_flow_table(root, ft);
1037 mutex_unlock(&root->chain_lock);
1038 return ERR_PTR(err);
1041 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1042 struct mlx5_flow_table_attr *ft_attr)
1044 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1047 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1048 int prio, int max_fte,
1049 u32 level, u16 vport)
1051 struct mlx5_flow_table_attr ft_attr = {};
1053 ft_attr.max_fte = max_fte;
1054 ft_attr.level = level;
1055 ft_attr.prio = prio;
1057 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1060 struct mlx5_flow_table*
1061 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1062 int prio, u32 level)
1064 struct mlx5_flow_table_attr ft_attr = {};
1066 ft_attr.level = level;
1067 ft_attr.prio = prio;
1068 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1070 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1072 struct mlx5_flow_table*
1073 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1075 int num_flow_table_entries,
1080 struct mlx5_flow_table_attr ft_attr = {};
1081 struct mlx5_flow_table *ft;
1083 if (max_num_groups > num_flow_table_entries)
1084 return ERR_PTR(-EINVAL);
1086 ft_attr.max_fte = num_flow_table_entries;
1087 ft_attr.prio = prio;
1088 ft_attr.level = level;
1089 ft_attr.flags = flags;
1091 ft = mlx5_create_flow_table(ns, &ft_attr);
1095 ft->autogroup.active = true;
1096 ft->autogroup.required_groups = max_num_groups;
1100 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1102 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1105 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1106 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1107 fg_in, match_criteria);
1108 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1110 match_criteria_enable);
1111 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1113 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1115 struct mlx5_flow_group *fg;
1118 if (ft->autogroup.active)
1119 return ERR_PTR(-EPERM);
1121 down_write_ref_node(&ft->node, false);
1122 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1123 start_index, end_index,
1124 ft->node.children.prev);
1125 up_write_ref_node(&ft->node, false);
1129 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1131 tree_put_node(&fg->node, false);
1132 return ERR_PTR(err);
1134 trace_mlx5_fs_add_fg(fg);
1135 fg->node.active = true;
1140 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1142 struct mlx5_flow_rule *rule;
1144 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1148 INIT_LIST_HEAD(&rule->next_ft);
1149 rule->node.type = FS_TYPE_FLOW_DEST;
1151 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1156 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1158 struct mlx5_flow_handle *handle;
1160 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1164 handle->num_rules = num_rules;
1169 static void destroy_flow_handle(struct fs_fte *fte,
1170 struct mlx5_flow_handle *handle,
1171 struct mlx5_flow_destination *dest,
1175 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1177 list_del(&handle->rule[i]->node.list);
1178 kfree(handle->rule[i]);
1184 static struct mlx5_flow_handle *
1185 create_flow_handle(struct fs_fte *fte,
1186 struct mlx5_flow_destination *dest,
1191 struct mlx5_flow_handle *handle;
1192 struct mlx5_flow_rule *rule = NULL;
1193 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1194 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1198 handle = alloc_handle((dest_num) ? dest_num : 1);
1200 return ERR_PTR(-ENOMEM);
1204 rule = find_flow_rule(fte, dest + i);
1206 refcount_inc(&rule->node.refcount);
1212 rule = alloc_rule(dest + i);
1216 /* Add dest to dests list- we need flow tables to be in the
1217 * end of the list for forward to next prio rules.
1219 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1221 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1222 list_add(&rule->node.list, &fte->node.children);
1224 list_add_tail(&rule->node.list, &fte->node.children);
1228 type = dest[i].type ==
1229 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1230 *modify_mask |= type ? count : dst;
1233 handle->rule[i] = rule;
1234 } while (++i < dest_num);
1239 destroy_flow_handle(fte, handle, dest, i);
1240 return ERR_PTR(-ENOMEM);
1243 /* fte should not be deleted while calling this function */
1244 static struct mlx5_flow_handle *
1245 add_rule_fte(struct fs_fte *fte,
1246 struct mlx5_flow_group *fg,
1247 struct mlx5_flow_destination *dest,
1251 struct mlx5_flow_root_namespace *root;
1252 struct mlx5_flow_handle *handle;
1253 struct mlx5_flow_table *ft;
1254 int modify_mask = 0;
1256 bool new_rule = false;
1258 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1260 if (IS_ERR(handle) || !new_rule)
1264 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1266 fs_get_obj(ft, fg->node.parent);
1267 root = find_root(&fg->node);
1268 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1269 err = root->cmds->create_fte(root, ft, fg, fte);
1271 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1275 fte->node.active = true;
1276 fte->status |= FS_FTE_STATUS_EXISTING;
1277 atomic_inc(&fte->node.version);
1283 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1284 return ERR_PTR(err);
1287 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1288 struct mlx5_flow_spec *spec)
1290 struct list_head *prev = &ft->node.children;
1291 struct mlx5_flow_group *fg;
1292 unsigned int candidate_index = 0;
1293 unsigned int group_size = 0;
1295 if (!ft->autogroup.active)
1296 return ERR_PTR(-ENOENT);
1298 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1299 /* We save place for flow groups in addition to max types */
1300 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1302 /* ft->max_fte == ft->autogroup.max_types */
1303 if (group_size == 0)
1306 /* sorted by start_index */
1307 fs_for_each_fg(fg, ft) {
1308 if (candidate_index + group_size > fg->start_index)
1309 candidate_index = fg->start_index + fg->max_ftes;
1312 prev = &fg->node.list;
1315 if (candidate_index + group_size > ft->max_fte)
1316 return ERR_PTR(-ENOSPC);
1318 fg = alloc_insert_flow_group(ft,
1319 spec->match_criteria_enable,
1320 spec->match_criteria,
1322 candidate_index + group_size - 1,
1327 ft->autogroup.num_groups++;
1333 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1334 struct mlx5_flow_group *fg)
1336 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1337 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1338 void *match_criteria_addr;
1339 u8 src_esw_owner_mask_on;
1344 in = kvzalloc(inlen, GFP_KERNEL);
1348 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1349 fg->mask.match_criteria_enable);
1350 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1351 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1354 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1356 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1357 source_eswitch_owner_vhca_id);
1358 MLX5_SET(create_flow_group_in, in,
1359 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1361 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1362 in, match_criteria);
1363 memcpy(match_criteria_addr, fg->mask.match_criteria,
1364 sizeof(fg->mask.match_criteria));
1366 err = root->cmds->create_flow_group(root, ft, in, fg);
1368 fg->node.active = true;
1369 trace_mlx5_fs_add_fg(fg);
1376 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1377 struct mlx5_flow_destination *d2)
1379 if (d1->type == d2->type) {
1380 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1381 d1->vport.num == d2->vport.num &&
1382 d1->vport.flags == d2->vport.flags &&
1383 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1384 (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
1385 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1386 d1->ft == d2->ft) ||
1387 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1388 d1->tir_num == d2->tir_num) ||
1389 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1390 d1->ft_num == d2->ft_num))
1397 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1398 struct mlx5_flow_destination *dest)
1400 struct mlx5_flow_rule *rule;
1402 list_for_each_entry(rule, &fte->node.children, node.list) {
1403 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1409 static bool check_conflicting_actions(u32 action1, u32 action2)
1411 u32 xored_actions = action1 ^ action2;
1413 /* if one rule only wants to count, it's ok */
1414 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1415 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1418 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1419 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1420 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1421 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1422 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1423 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1424 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1425 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1431 static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1433 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1434 mlx5_core_warn(get_dev(&fte->node),
1435 "Found two FTEs with conflicting actions\n");
1439 if ((flow_act->flags & FLOW_ACT_HAS_TAG) &&
1440 fte->action.flow_tag != flow_act->flow_tag) {
1441 mlx5_core_warn(get_dev(&fte->node),
1442 "FTE flow tag %u already exists with different flow tag %u\n",
1443 fte->action.flow_tag,
1444 flow_act->flow_tag);
1451 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1453 struct mlx5_flow_act *flow_act,
1454 struct mlx5_flow_destination *dest,
1458 struct mlx5_flow_handle *handle;
1463 ret = check_conflicting_ftes(fte, flow_act);
1465 return ERR_PTR(ret);
1467 old_action = fte->action.action;
1468 fte->action.action |= flow_act->action;
1469 handle = add_rule_fte(fte, fg, dest, dest_num,
1470 old_action != flow_act->action);
1471 if (IS_ERR(handle)) {
1472 fte->action.action = old_action;
1475 trace_mlx5_fs_set_fte(fte, false);
1477 for (i = 0; i < handle->num_rules; i++) {
1478 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1479 tree_add_node(&handle->rule[i]->node, &fte->node);
1480 trace_mlx5_fs_add_rule(handle->rule[i]);
1486 static bool counter_is_valid(u32 action)
1488 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1489 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1492 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1494 struct mlx5_flow_table *ft)
1496 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1497 return counter_is_valid(action);
1499 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1502 if (!dest || ((dest->type ==
1503 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1504 (dest->ft->level <= ft->level)))
1510 struct list_head list;
1511 struct mlx5_flow_group *g;
1514 struct match_list_head {
1515 struct list_head list;
1516 struct match_list first;
1519 static void free_match_list(struct match_list_head *head)
1521 if (!list_empty(&head->list)) {
1522 struct match_list *iter, *match_tmp;
1524 list_del(&head->first.list);
1525 tree_put_node(&head->first.g->node, false);
1526 list_for_each_entry_safe(iter, match_tmp, &head->list,
1528 tree_put_node(&iter->g->node, false);
1529 list_del(&iter->list);
1535 static int build_match_list(struct match_list_head *match_head,
1536 struct mlx5_flow_table *ft,
1537 struct mlx5_flow_spec *spec)
1539 struct rhlist_head *tmp, *list;
1540 struct mlx5_flow_group *g;
1544 INIT_LIST_HEAD(&match_head->list);
1545 /* Collect all fgs which has a matching match_criteria */
1546 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1547 /* RCU is atomic, we can't execute FW commands here */
1548 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1549 struct match_list *curr_match;
1551 if (likely(list_empty(&match_head->list))) {
1552 if (!tree_get_node(&g->node))
1554 match_head->first.g = g;
1555 list_add_tail(&match_head->first.list,
1560 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1562 free_match_list(match_head);
1566 if (!tree_get_node(&g->node)) {
1571 list_add_tail(&curr_match->list, &match_head->list);
1578 static u64 matched_fgs_get_version(struct list_head *match_head)
1580 struct match_list *iter;
1583 list_for_each_entry(iter, match_head, list)
1584 version += (u64)atomic_read(&iter->g->node.version);
1588 static struct fs_fte *
1589 lookup_fte_locked(struct mlx5_flow_group *g,
1593 struct fs_fte *fte_tmp;
1596 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1598 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1599 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1601 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1605 if (!fte_tmp->node.active) {
1606 tree_put_node(&fte_tmp->node, false);
1611 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1614 up_write_ref_node(&g->node, false);
1616 up_read_ref_node(&g->node);
1620 static struct mlx5_flow_handle *
1621 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1622 struct list_head *match_head,
1623 struct mlx5_flow_spec *spec,
1624 struct mlx5_flow_act *flow_act,
1625 struct mlx5_flow_destination *dest,
1629 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1630 struct mlx5_flow_group *g;
1631 struct mlx5_flow_handle *rule;
1632 struct match_list *iter;
1633 bool take_write = false;
1638 fte = alloc_fte(ft, spec->match_value, flow_act);
1640 return ERR_PTR(-ENOMEM);
1642 search_again_locked:
1643 version = matched_fgs_get_version(match_head);
1644 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1646 /* Try to find a fg that already contains a matching fte */
1647 list_for_each_entry(iter, match_head, list) {
1648 struct fs_fte *fte_tmp;
1651 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1654 rule = add_rule_fg(g, spec->match_value,
1655 flow_act, dest, dest_num, fte_tmp);
1656 up_write_ref_node(&fte_tmp->node, false);
1657 tree_put_node(&fte_tmp->node, false);
1658 kmem_cache_free(steering->ftes_cache, fte);
1663 /* No group with matching fte found, or we skipped the search.
1664 * Try to add a new fte to any matching fg.
1667 /* Check the ft version, for case that new flow group
1668 * was added while the fgs weren't locked
1670 if (atomic_read(&ft->node.version) != ft_version) {
1671 rule = ERR_PTR(-EAGAIN);
1675 /* Check the fgs version, for case the new FTE with the
1676 * same values was added while the fgs weren't locked
1678 if (version != matched_fgs_get_version(match_head)) {
1680 goto search_again_locked;
1683 list_for_each_entry(iter, match_head, list) {
1686 if (!g->node.active)
1689 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1691 err = insert_fte(g, fte);
1693 up_write_ref_node(&g->node, false);
1696 kmem_cache_free(steering->ftes_cache, fte);
1697 return ERR_PTR(err);
1700 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1701 up_write_ref_node(&g->node, false);
1702 rule = add_rule_fg(g, spec->match_value,
1703 flow_act, dest, dest_num, fte);
1704 up_write_ref_node(&fte->node, false);
1705 tree_put_node(&fte->node, false);
1708 rule = ERR_PTR(-ENOENT);
1710 kmem_cache_free(steering->ftes_cache, fte);
1714 static struct mlx5_flow_handle *
1715 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1716 struct mlx5_flow_spec *spec,
1717 struct mlx5_flow_act *flow_act,
1718 struct mlx5_flow_destination *dest,
1722 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1723 struct mlx5_flow_group *g;
1724 struct mlx5_flow_handle *rule;
1725 struct match_list_head match_head;
1726 bool take_write = false;
1732 if (!check_valid_spec(spec))
1733 return ERR_PTR(-EINVAL);
1735 for (i = 0; i < dest_num; i++) {
1736 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1737 return ERR_PTR(-EINVAL);
1739 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1740 search_again_locked:
1741 version = atomic_read(&ft->node.version);
1743 /* Collect all fgs which has a matching match_criteria */
1744 err = build_match_list(&match_head, ft, spec);
1747 up_write_ref_node(&ft->node, false);
1749 up_read_ref_node(&ft->node);
1750 return ERR_PTR(err);
1754 up_read_ref_node(&ft->node);
1756 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1758 free_match_list(&match_head);
1759 if (!IS_ERR(rule) ||
1760 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1762 up_write_ref_node(&ft->node, false);
1767 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1771 if (PTR_ERR(rule) == -EAGAIN ||
1772 version != atomic_read(&ft->node.version))
1773 goto search_again_locked;
1775 g = alloc_auto_flow_group(ft, spec);
1778 up_write_ref_node(&ft->node, false);
1782 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1783 up_write_ref_node(&ft->node, false);
1785 err = create_auto_flow_group(ft, g);
1787 goto err_release_fg;
1789 fte = alloc_fte(ft, spec->match_value, flow_act);
1792 goto err_release_fg;
1795 err = insert_fte(g, fte);
1797 kmem_cache_free(steering->ftes_cache, fte);
1798 goto err_release_fg;
1801 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1802 up_write_ref_node(&g->node, false);
1803 rule = add_rule_fg(g, spec->match_value, flow_act, dest,
1805 up_write_ref_node(&fte->node, false);
1806 tree_put_node(&fte->node, false);
1807 tree_put_node(&g->node, false);
1811 up_write_ref_node(&g->node, false);
1812 tree_put_node(&g->node, false);
1813 return ERR_PTR(err);
1816 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1818 return ((ft->type == FS_FT_NIC_RX) &&
1819 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1822 struct mlx5_flow_handle *
1823 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1824 struct mlx5_flow_spec *spec,
1825 struct mlx5_flow_act *flow_act,
1826 struct mlx5_flow_destination *dest,
1829 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1830 struct mlx5_flow_destination gen_dest = {};
1831 struct mlx5_flow_table *next_ft = NULL;
1832 struct mlx5_flow_handle *handle = NULL;
1833 u32 sw_action = flow_act->action;
1834 struct fs_prio *prio;
1836 fs_get_obj(prio, ft->node.parent);
1837 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1838 if (!fwd_next_prio_supported(ft))
1839 return ERR_PTR(-EOPNOTSUPP);
1841 return ERR_PTR(-EINVAL);
1842 mutex_lock(&root->chain_lock);
1843 next_ft = find_next_chained_ft(prio);
1845 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1846 gen_dest.ft = next_ft;
1849 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1851 mutex_unlock(&root->chain_lock);
1852 return ERR_PTR(-EOPNOTSUPP);
1856 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1858 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1859 if (!IS_ERR_OR_NULL(handle) &&
1860 (list_empty(&handle->rule[0]->next_ft))) {
1861 mutex_lock(&next_ft->lock);
1862 list_add(&handle->rule[0]->next_ft,
1863 &next_ft->fwd_rules);
1864 mutex_unlock(&next_ft->lock);
1865 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1867 mutex_unlock(&root->chain_lock);
1871 EXPORT_SYMBOL(mlx5_add_flow_rules);
1873 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1878 /* In order to consolidate the HW changes we lock the FTE for other
1879 * changes, and increase its refcount, in order not to perform the
1880 * "del" functions of the FTE. Will handle them here.
1881 * The removal of the rules is done under locked FTE.
1882 * After removing all the handle's rules, if there are remaining
1883 * rules, it means we just need to modify the FTE in FW, and
1884 * unlock/decrease the refcount we increased before.
1885 * Otherwise, it means the FTE should be deleted. First delete the
1886 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
1887 * the FTE, which will handle the last decrease of the refcount, as
1888 * well as required handling of its parent.
1890 fs_get_obj(fte, handle->rule[0]->node.parent);
1891 down_write_ref_node(&fte->node, false);
1892 for (i = handle->num_rules - 1; i >= 0; i--)
1893 tree_remove_node(&handle->rule[i]->node, true);
1894 if (fte->modify_mask && fte->dests_size) {
1896 up_write_ref_node(&fte->node, false);
1898 del_hw_fte(&fte->node);
1899 up_write(&fte->node.lock);
1900 tree_put_node(&fte->node, false);
1904 EXPORT_SYMBOL(mlx5_del_flow_rules);
1906 /* Assuming prio->node.children(flow tables) is sorted by level */
1907 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1909 struct fs_prio *prio;
1911 fs_get_obj(prio, ft->node.parent);
1913 if (!list_is_last(&ft->node.list, &prio->node.children))
1914 return list_next_entry(ft, node.list);
1915 return find_next_chained_ft(prio);
1918 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1920 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1921 struct mlx5_ft_underlay_qp *uqp;
1922 struct mlx5_flow_table *new_root_ft = NULL;
1926 if (root->root_ft != ft)
1929 new_root_ft = find_next_ft(ft);
1931 root->root_ft = NULL;
1935 if (list_empty(&root->underlay_qpns)) {
1936 /* Don't set any QPN (zero) in case QPN list is empty */
1938 err = root->cmds->update_root_ft(root, new_root_ft,
1941 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1943 err = root->cmds->update_root_ft(root,
1952 mlx5_core_warn(root->dev,
1953 "Update root flow table of id(%u) qpn(%d) failed\n",
1956 root->root_ft = new_root_ft;
1961 /* Connect flow table from previous priority to
1962 * the next flow table.
1964 static int disconnect_flow_table(struct mlx5_flow_table *ft)
1966 struct mlx5_core_dev *dev = get_dev(&ft->node);
1967 struct mlx5_flow_table *next_ft;
1968 struct fs_prio *prio;
1971 err = update_root_ft_destroy(ft);
1975 fs_get_obj(prio, ft->node.parent);
1976 if (!(list_first_entry(&prio->node.children,
1977 struct mlx5_flow_table,
1981 next_ft = find_next_chained_ft(prio);
1982 err = connect_fwd_rules(dev, next_ft, ft);
1986 err = connect_prev_fts(dev, next_ft, prio);
1988 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1993 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
1995 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1998 mutex_lock(&root->chain_lock);
1999 err = disconnect_flow_table(ft);
2001 mutex_unlock(&root->chain_lock);
2004 if (tree_remove_node(&ft->node, false))
2005 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2007 mutex_unlock(&root->chain_lock);
2011 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2013 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2015 if (tree_remove_node(&fg->node, false))
2016 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2020 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2023 struct mlx5_flow_steering *steering = dev->priv.steering;
2025 if (!steering || !steering->fdb_sub_ns)
2028 return steering->fdb_sub_ns[n];
2030 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2032 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2033 enum mlx5_flow_namespace_type type)
2035 struct mlx5_flow_steering *steering = dev->priv.steering;
2036 struct mlx5_flow_root_namespace *root_ns;
2038 struct fs_prio *fs_prio;
2039 struct mlx5_flow_namespace *ns;
2045 case MLX5_FLOW_NAMESPACE_FDB:
2046 if (steering->fdb_root_ns)
2047 return &steering->fdb_root_ns->ns;
2049 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2050 if (steering->sniffer_rx_root_ns)
2051 return &steering->sniffer_rx_root_ns->ns;
2053 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2054 if (steering->sniffer_tx_root_ns)
2055 return &steering->sniffer_tx_root_ns->ns;
2057 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2058 if (steering->rdma_rx_root_ns)
2059 return &steering->rdma_rx_root_ns->ns;
2065 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2066 root_ns = steering->egress_root_ns;
2067 } else { /* Must be NIC RX */
2068 root_ns = steering->root_ns;
2075 fs_prio = find_prio(&root_ns->ns, prio);
2079 ns = list_first_entry(&fs_prio->node.children,
2085 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2087 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2088 enum mlx5_flow_namespace_type type,
2091 struct mlx5_flow_steering *steering = dev->priv.steering;
2093 if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2097 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2098 if (steering->esw_egress_root_ns &&
2099 steering->esw_egress_root_ns[vport])
2100 return &steering->esw_egress_root_ns[vport]->ns;
2103 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2104 if (steering->esw_ingress_root_ns &&
2105 steering->esw_ingress_root_ns[vport])
2106 return &steering->esw_ingress_root_ns[vport]->ns;
2114 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2117 enum fs_node_type type)
2119 struct fs_prio *fs_prio;
2121 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2123 return ERR_PTR(-ENOMEM);
2125 fs_prio->node.type = type;
2126 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2127 tree_add_node(&fs_prio->node, &ns->node);
2128 fs_prio->num_levels = num_levels;
2129 fs_prio->prio = prio;
2130 list_add_tail(&fs_prio->node.list, &ns->node.children);
2135 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2139 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2142 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2143 unsigned int prio, int num_levels)
2145 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2148 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2151 ns->node.type = FS_TYPE_NAMESPACE;
2156 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2158 struct mlx5_flow_namespace *ns;
2160 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2162 return ERR_PTR(-ENOMEM);
2164 fs_init_namespace(ns);
2165 tree_init_node(&ns->node, NULL, del_sw_ns);
2166 tree_add_node(&ns->node, &prio->node);
2167 list_add_tail(&ns->node.list, &prio->node.children);
2172 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2173 struct init_tree_node *prio_metadata)
2175 struct fs_prio *fs_prio;
2178 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2179 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2180 if (IS_ERR(fs_prio))
2181 return PTR_ERR(fs_prio);
2186 #define FLOW_TABLE_BIT_SZ 1
2187 #define GET_FLOW_TABLE_CAP(dev, offset) \
2188 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2190 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2191 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2195 for (i = 0; i < caps->arr_sz; i++) {
2196 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2202 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2203 struct init_tree_node *init_node,
2204 struct fs_node *fs_parent_node,
2205 struct init_tree_node *init_parent_node,
2208 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2209 flow_table_properties_nic_receive.
2211 struct mlx5_flow_namespace *fs_ns;
2212 struct fs_prio *fs_prio;
2213 struct fs_node *base;
2217 if (init_node->type == FS_TYPE_PRIO) {
2218 if ((init_node->min_ft_level > max_ft_level) ||
2219 !has_required_caps(steering->dev, &init_node->caps))
2222 fs_get_obj(fs_ns, fs_parent_node);
2223 if (init_node->num_leaf_prios)
2224 return create_leaf_prios(fs_ns, prio, init_node);
2225 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2226 if (IS_ERR(fs_prio))
2227 return PTR_ERR(fs_prio);
2228 base = &fs_prio->node;
2229 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2230 fs_get_obj(fs_prio, fs_parent_node);
2231 fs_ns = fs_create_namespace(fs_prio);
2233 return PTR_ERR(fs_ns);
2234 base = &fs_ns->node;
2239 for (i = 0; i < init_node->ar_size; i++) {
2240 err = init_root_tree_recursive(steering, &init_node->children[i],
2241 base, init_node, prio);
2244 if (init_node->children[i].type == FS_TYPE_PRIO &&
2245 init_node->children[i].num_leaf_prios) {
2246 prio += init_node->children[i].num_leaf_prios;
2253 static int init_root_tree(struct mlx5_flow_steering *steering,
2254 struct init_tree_node *init_node,
2255 struct fs_node *fs_parent_node)
2258 struct mlx5_flow_namespace *fs_ns;
2261 fs_get_obj(fs_ns, fs_parent_node);
2262 for (i = 0; i < init_node->ar_size; i++) {
2263 err = init_root_tree_recursive(steering, &init_node->children[i],
2272 static struct mlx5_flow_root_namespace
2273 *create_root_ns(struct mlx5_flow_steering *steering,
2274 enum fs_flow_table_type table_type)
2276 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2277 struct mlx5_flow_root_namespace *root_ns;
2278 struct mlx5_flow_namespace *ns;
2280 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2281 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2282 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2284 /* Create the root namespace */
2285 root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
2289 root_ns->dev = steering->dev;
2290 root_ns->table_type = table_type;
2291 root_ns->cmds = cmds;
2293 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2296 fs_init_namespace(ns);
2297 mutex_init(&root_ns->chain_lock);
2298 tree_init_node(&ns->node, NULL, NULL);
2299 tree_add_node(&ns->node, NULL);
2304 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2306 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2308 struct fs_prio *prio;
2310 fs_for_each_prio(prio, ns) {
2311 /* This updates prio start_level and num_levels */
2312 set_prio_attrs_in_prio(prio, acc_level);
2313 acc_level += prio->num_levels;
2318 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2320 struct mlx5_flow_namespace *ns;
2321 int acc_level_ns = acc_level;
2323 prio->start_level = acc_level;
2324 fs_for_each_ns(ns, prio)
2325 /* This updates start_level and num_levels of ns's priority descendants */
2326 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2327 if (!prio->num_levels)
2328 prio->num_levels = acc_level_ns - prio->start_level;
2329 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2332 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2334 struct mlx5_flow_namespace *ns = &root_ns->ns;
2335 struct fs_prio *prio;
2336 int start_level = 0;
2338 fs_for_each_prio(prio, ns) {
2339 set_prio_attrs_in_prio(prio, start_level);
2340 start_level += prio->num_levels;
2344 #define ANCHOR_PRIO 0
2345 #define ANCHOR_SIZE 1
2346 #define ANCHOR_LEVEL 0
2347 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2349 struct mlx5_flow_namespace *ns = NULL;
2350 struct mlx5_flow_table_attr ft_attr = {};
2351 struct mlx5_flow_table *ft;
2353 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2357 ft_attr.max_fte = ANCHOR_SIZE;
2358 ft_attr.level = ANCHOR_LEVEL;
2359 ft_attr.prio = ANCHOR_PRIO;
2361 ft = mlx5_create_flow_table(ns, &ft_attr);
2363 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2369 static int init_root_ns(struct mlx5_flow_steering *steering)
2373 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2374 if (!steering->root_ns)
2377 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2381 set_prio_attrs(steering->root_ns);
2382 err = create_anchor_flow_table(steering);
2389 cleanup_root_ns(steering->root_ns);
2390 steering->root_ns = NULL;
2394 static void clean_tree(struct fs_node *node)
2397 struct fs_node *iter;
2398 struct fs_node *temp;
2400 tree_get_node(node);
2401 list_for_each_entry_safe(iter, temp, &node->children, list)
2403 tree_put_node(node, false);
2404 tree_remove_node(node, false);
2408 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2413 clean_tree(&root_ns->ns.node);
2416 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2418 struct mlx5_flow_steering *steering = dev->priv.steering;
2421 if (!steering->esw_egress_root_ns)
2424 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2425 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2427 kfree(steering->esw_egress_root_ns);
2430 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2432 struct mlx5_flow_steering *steering = dev->priv.steering;
2435 if (!steering->esw_ingress_root_ns)
2438 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2439 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2441 kfree(steering->esw_ingress_root_ns);
2444 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2446 struct mlx5_flow_steering *steering = dev->priv.steering;
2448 cleanup_root_ns(steering->root_ns);
2449 cleanup_egress_acls_root_ns(dev);
2450 cleanup_ingress_acls_root_ns(dev);
2451 cleanup_root_ns(steering->fdb_root_ns);
2452 steering->fdb_root_ns = NULL;
2453 kfree(steering->fdb_sub_ns);
2454 steering->fdb_sub_ns = NULL;
2455 cleanup_root_ns(steering->sniffer_rx_root_ns);
2456 cleanup_root_ns(steering->sniffer_tx_root_ns);
2457 cleanup_root_ns(steering->rdma_rx_root_ns);
2458 cleanup_root_ns(steering->egress_root_ns);
2459 mlx5_cleanup_fc_stats(dev);
2460 kmem_cache_destroy(steering->ftes_cache);
2461 kmem_cache_destroy(steering->fgs_cache);
2465 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2467 struct fs_prio *prio;
2469 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2470 if (!steering->sniffer_tx_root_ns)
2473 /* Create single prio */
2474 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2476 cleanup_root_ns(steering->sniffer_tx_root_ns);
2477 return PTR_ERR(prio);
2482 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2484 struct fs_prio *prio;
2486 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2487 if (!steering->sniffer_rx_root_ns)
2490 /* Create single prio */
2491 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2493 cleanup_root_ns(steering->sniffer_rx_root_ns);
2494 return PTR_ERR(prio);
2499 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2501 struct fs_prio *prio;
2503 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2504 if (!steering->rdma_rx_root_ns)
2507 steering->rdma_rx_root_ns->def_miss_action =
2508 MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
2510 /* Create single prio */
2511 prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
2513 cleanup_root_ns(steering->rdma_rx_root_ns);
2514 return PTR_ERR(prio);
2518 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2520 struct mlx5_flow_namespace *ns;
2521 struct fs_prio *maj_prio;
2522 struct fs_prio *min_prio;
2528 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2529 if (!steering->fdb_root_ns)
2532 steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
2533 (FDB_MAX_CHAIN + 1), GFP_KERNEL);
2534 if (!steering->fdb_sub_ns)
2537 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2539 if (IS_ERR(maj_prio)) {
2540 err = PTR_ERR(maj_prio);
2544 levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
2545 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2548 if (IS_ERR(maj_prio)) {
2549 err = PTR_ERR(maj_prio);
2553 for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
2554 ns = fs_create_namespace(maj_prio);
2560 for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
2561 min_prio = fs_create_prio(ns, prio, 2);
2562 if (IS_ERR(min_prio)) {
2563 err = PTR_ERR(min_prio);
2568 steering->fdb_sub_ns[chain] = ns;
2571 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2572 if (IS_ERR(maj_prio)) {
2573 err = PTR_ERR(maj_prio);
2577 set_prio_attrs(steering->fdb_root_ns);
2581 cleanup_root_ns(steering->fdb_root_ns);
2582 kfree(steering->fdb_sub_ns);
2583 steering->fdb_sub_ns = NULL;
2584 steering->fdb_root_ns = NULL;
2588 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2590 struct fs_prio *prio;
2592 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2593 if (!steering->esw_egress_root_ns[vport])
2597 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2598 return PTR_ERR_OR_ZERO(prio);
2601 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2603 struct fs_prio *prio;
2605 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2606 if (!steering->esw_ingress_root_ns[vport])
2610 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2611 return PTR_ERR_OR_ZERO(prio);
2614 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2616 struct mlx5_flow_steering *steering = dev->priv.steering;
2620 steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2621 sizeof(*steering->esw_egress_root_ns),
2623 if (!steering->esw_egress_root_ns)
2626 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2627 err = init_egress_acl_root_ns(steering, i);
2629 goto cleanup_root_ns;
2635 for (i--; i >= 0; i--)
2636 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2637 kfree(steering->esw_egress_root_ns);
2641 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2643 struct mlx5_flow_steering *steering = dev->priv.steering;
2647 steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2648 sizeof(*steering->esw_ingress_root_ns),
2650 if (!steering->esw_ingress_root_ns)
2653 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2654 err = init_ingress_acl_root_ns(steering, i);
2656 goto cleanup_root_ns;
2662 for (i--; i >= 0; i--)
2663 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2664 kfree(steering->esw_ingress_root_ns);
2668 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2672 steering->egress_root_ns = create_root_ns(steering,
2674 if (!steering->egress_root_ns)
2677 err = init_root_tree(steering, &egress_root_fs,
2678 &steering->egress_root_ns->ns.node);
2681 set_prio_attrs(steering->egress_root_ns);
2684 cleanup_root_ns(steering->egress_root_ns);
2685 steering->egress_root_ns = NULL;
2689 int mlx5_init_fs(struct mlx5_core_dev *dev)
2691 struct mlx5_flow_steering *steering;
2694 err = mlx5_init_fc_stats(dev);
2698 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2701 steering->dev = dev;
2702 dev->priv.steering = steering;
2704 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2705 sizeof(struct mlx5_flow_group), 0,
2707 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2709 if (!steering->ftes_cache || !steering->fgs_cache) {
2714 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2715 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2716 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2717 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2718 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2719 err = init_root_ns(steering);
2724 if (MLX5_ESWITCH_MANAGER(dev)) {
2725 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2726 err = init_fdb_root_ns(steering);
2730 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2731 err = init_egress_acls_root_ns(dev);
2735 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2736 err = init_ingress_acls_root_ns(dev);
2742 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2743 err = init_sniffer_rx_root_ns(steering);
2748 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2749 err = init_sniffer_tx_root_ns(steering);
2754 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
2755 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
2756 err = init_rdma_rx_root_ns(steering);
2761 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
2762 err = init_egress_root_ns(steering);
2769 mlx5_cleanup_fs(dev);
2773 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2775 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2776 struct mlx5_ft_underlay_qp *new_uqp;
2779 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2783 mutex_lock(&root->chain_lock);
2785 if (!root->root_ft) {
2787 goto update_ft_fail;
2790 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2793 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2795 goto update_ft_fail;
2798 new_uqp->qpn = underlay_qpn;
2799 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2801 mutex_unlock(&root->chain_lock);
2806 mutex_unlock(&root->chain_lock);
2810 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2812 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2814 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2815 struct mlx5_ft_underlay_qp *uqp;
2819 mutex_lock(&root->chain_lock);
2820 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2821 if (uqp->qpn == underlay_qpn) {
2828 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2834 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2837 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2840 list_del(&uqp->list);
2841 mutex_unlock(&root->chain_lock);
2847 mutex_unlock(&root->chain_lock);
2850 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);