2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
44 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
45 sizeof(struct init_tree_node))
47 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
48 ...) {.type = FS_TYPE_PRIO,\
49 .min_ft_level = min_level_val,\
50 .num_levels = num_levels_val,\
51 .num_leaf_prios = num_prios_val,\
53 .children = (struct init_tree_node[]) {__VA_ARGS__},\
54 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
58 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
62 .def_miss_action = def_miss_act,\
63 .children = (struct init_tree_node[]) {__VA_ARGS__},\
64 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
72 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
73 .caps = (long[]) {__VA_ARGS__} }
75 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
76 FS_CAP(flow_table_properties_nic_receive.modify_root), \
77 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
78 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
80 #define FS_CHAINING_CAPS_EGRESS \
82 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
83 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
84 FS_CAP(flow_table_properties_nic_transmit \
85 .identified_miss_table_mode), \
86 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
88 #define FS_CHAINING_CAPS_RDMA_TX \
90 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
91 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
92 FS_CAP(flow_table_properties_nic_transmit_rdma \
93 .identified_miss_table_mode), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
97 #define LEFTOVERS_NUM_LEVELS 1
98 #define LEFTOVERS_NUM_PRIOS 1
100 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
101 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
103 #define BY_PASS_PRIO_NUM_LEVELS 1
104 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
107 #define ETHTOOL_PRIO_NUM_LEVELS 1
108 #define ETHTOOL_NUM_PRIOS 11
109 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
110 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
111 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
112 #define KERNEL_NIC_NUM_PRIOS 1
113 /* One more level for tc */
114 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
116 #define KERNEL_NIC_TC_NUM_PRIOS 1
117 #define KERNEL_NIC_TC_NUM_LEVELS 3
119 #define ANCHOR_NUM_LEVELS 1
120 #define ANCHOR_NUM_PRIOS 1
121 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
123 #define OFFLOADS_MAX_FT 2
124 #define OFFLOADS_NUM_PRIOS 2
125 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
127 #define LAG_PRIO_NUM_LEVELS 1
128 #define LAG_NUM_PRIOS 1
129 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
131 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
132 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
133 #define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
140 static struct init_tree_node {
141 enum fs_node_type type;
142 struct init_tree_node *children;
144 struct node_caps caps;
149 enum mlx5_flow_table_miss_action def_miss_action;
151 .type = FS_TYPE_NAMESPACE,
153 .children = (struct init_tree_node[]){
154 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
155 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
156 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
157 BY_PASS_PRIO_NUM_LEVELS))),
158 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
159 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
160 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
161 LAG_PRIO_NUM_LEVELS))),
162 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
163 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
164 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
166 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
167 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
169 ETHTOOL_PRIO_NUM_LEVELS))),
170 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
171 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
172 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
173 KERNEL_NIC_TC_NUM_LEVELS),
174 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
175 KERNEL_NIC_PRIO_NUM_LEVELS))),
176 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
177 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
178 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
179 LEFTOVERS_NUM_LEVELS))),
180 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
181 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
182 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
183 ANCHOR_NUM_LEVELS))),
187 static struct init_tree_node egress_root_fs = {
188 .type = FS_TYPE_NAMESPACE,
190 .children = (struct init_tree_node[]) {
191 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
192 FS_CHAINING_CAPS_EGRESS,
193 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
194 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
195 BY_PASS_PRIO_NUM_LEVELS))),
196 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
197 FS_CHAINING_CAPS_EGRESS,
198 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
199 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
200 KERNEL_TX_IPSEC_NUM_LEVELS))),
205 RDMA_RX_COUNTERS_PRIO,
210 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
211 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
212 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
214 static struct init_tree_node rdma_rx_root_fs = {
215 .type = FS_TYPE_NAMESPACE,
217 .children = (struct init_tree_node[]) {
218 [RDMA_RX_COUNTERS_PRIO] =
219 ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
221 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
222 ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
223 RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
224 [RDMA_RX_BYPASS_PRIO] =
225 ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
227 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
228 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
229 BY_PASS_PRIO_NUM_LEVELS))),
230 [RDMA_RX_KERNEL_PRIO] =
231 ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
233 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
234 ADD_MULTIPLE_PRIO(1, 1))),
239 RDMA_TX_COUNTERS_PRIO,
243 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
244 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
246 static struct init_tree_node rdma_tx_root_fs = {
247 .type = FS_TYPE_NAMESPACE,
249 .children = (struct init_tree_node[]) {
250 [RDMA_TX_COUNTERS_PRIO] =
251 ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
253 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
254 ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
255 RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
256 [RDMA_TX_BYPASS_PRIO] =
257 ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
258 FS_CHAINING_CAPS_RDMA_TX,
259 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
260 ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
261 BY_PASS_PRIO_NUM_LEVELS))),
265 enum fs_i_lock_class {
271 static const struct rhashtable_params rhash_fte = {
272 .key_len = sizeof_field(struct fs_fte, val),
273 .key_offset = offsetof(struct fs_fte, val),
274 .head_offset = offsetof(struct fs_fte, hash),
275 .automatic_shrinking = true,
279 static const struct rhashtable_params rhash_fg = {
280 .key_len = sizeof_field(struct mlx5_flow_group, mask),
281 .key_offset = offsetof(struct mlx5_flow_group, mask),
282 .head_offset = offsetof(struct mlx5_flow_group, hash),
283 .automatic_shrinking = true,
288 static void del_hw_flow_table(struct fs_node *node);
289 static void del_hw_flow_group(struct fs_node *node);
290 static void del_hw_fte(struct fs_node *node);
291 static void del_sw_flow_table(struct fs_node *node);
292 static void del_sw_flow_group(struct fs_node *node);
293 static void del_sw_fte(struct fs_node *node);
294 static void del_sw_prio(struct fs_node *node);
295 static void del_sw_ns(struct fs_node *node);
296 /* Delete rule (destination) is special case that
297 * requires to lock the FTE for all the deletion process.
299 static void del_sw_hw_rule(struct fs_node *node);
300 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
301 struct mlx5_flow_destination *d2);
302 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
303 static struct mlx5_flow_rule *
304 find_flow_rule(struct fs_fte *fte,
305 struct mlx5_flow_destination *dest);
307 static void tree_init_node(struct fs_node *node,
308 void (*del_hw_func)(struct fs_node *),
309 void (*del_sw_func)(struct fs_node *))
311 refcount_set(&node->refcount, 1);
312 INIT_LIST_HEAD(&node->list);
313 INIT_LIST_HEAD(&node->children);
314 init_rwsem(&node->lock);
315 node->del_hw_func = del_hw_func;
316 node->del_sw_func = del_sw_func;
317 node->active = false;
320 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
323 refcount_inc(&parent->refcount);
324 node->parent = parent;
326 /* Parent is the root */
330 node->root = parent->root;
333 static int tree_get_node(struct fs_node *node)
335 return refcount_inc_not_zero(&node->refcount);
338 static void nested_down_read_ref_node(struct fs_node *node,
339 enum fs_i_lock_class class)
342 down_read_nested(&node->lock, class);
343 refcount_inc(&node->refcount);
347 static void nested_down_write_ref_node(struct fs_node *node,
348 enum fs_i_lock_class class)
351 down_write_nested(&node->lock, class);
352 refcount_inc(&node->refcount);
356 static void down_write_ref_node(struct fs_node *node, bool locked)
360 down_write(&node->lock);
361 refcount_inc(&node->refcount);
365 static void up_read_ref_node(struct fs_node *node)
367 refcount_dec(&node->refcount);
368 up_read(&node->lock);
371 static void up_write_ref_node(struct fs_node *node, bool locked)
373 refcount_dec(&node->refcount);
375 up_write(&node->lock);
378 static void tree_put_node(struct fs_node *node, bool locked)
380 struct fs_node *parent_node = node->parent;
382 if (refcount_dec_and_test(&node->refcount)) {
383 if (node->del_hw_func)
384 node->del_hw_func(node);
386 down_write_ref_node(parent_node, locked);
387 list_del_init(&node->list);
389 node->del_sw_func(node);
391 up_write_ref_node(parent_node, locked);
394 if (!node && parent_node)
395 tree_put_node(parent_node, locked);
398 static int tree_remove_node(struct fs_node *node, bool locked)
400 if (refcount_read(&node->refcount) > 1) {
401 refcount_dec(&node->refcount);
404 tree_put_node(node, locked);
408 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
411 struct fs_prio *iter_prio;
413 fs_for_each_prio(iter_prio, ns) {
414 if (iter_prio->prio == prio)
421 static bool is_fwd_next_action(u32 action)
423 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
424 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
427 static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
429 return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
430 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
431 type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
432 type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
433 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
434 type == MLX5_FLOW_DESTINATION_TYPE_TIR;
437 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
441 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
442 if (spec->match_value[i] & ~spec->match_criteria[i]) {
443 pr_warn("mlx5_core: match_value differs from match_criteria\n");
450 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
452 struct fs_node *root;
453 struct mlx5_flow_namespace *ns;
457 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
458 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
462 ns = container_of(root, struct mlx5_flow_namespace, node);
463 return container_of(ns, struct mlx5_flow_root_namespace, ns);
466 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
468 struct mlx5_flow_root_namespace *root = find_root(node);
471 return root->dev->priv.steering;
475 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
477 struct mlx5_flow_root_namespace *root = find_root(node);
484 static void del_sw_ns(struct fs_node *node)
489 static void del_sw_prio(struct fs_node *node)
494 static void del_hw_flow_table(struct fs_node *node)
496 struct mlx5_flow_root_namespace *root;
497 struct mlx5_flow_table *ft;
498 struct mlx5_core_dev *dev;
501 fs_get_obj(ft, node);
502 dev = get_dev(&ft->node);
503 root = find_root(&ft->node);
504 trace_mlx5_fs_del_ft(ft);
507 err = root->cmds->destroy_flow_table(root, ft);
509 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
513 static void del_sw_flow_table(struct fs_node *node)
515 struct mlx5_flow_table *ft;
516 struct fs_prio *prio;
518 fs_get_obj(ft, node);
520 rhltable_destroy(&ft->fgs_hash);
521 if (ft->node.parent) {
522 fs_get_obj(prio, ft->node.parent);
528 static void modify_fte(struct fs_fte *fte)
530 struct mlx5_flow_root_namespace *root;
531 struct mlx5_flow_table *ft;
532 struct mlx5_flow_group *fg;
533 struct mlx5_core_dev *dev;
536 fs_get_obj(fg, fte->node.parent);
537 fs_get_obj(ft, fg->node.parent);
538 dev = get_dev(&fte->node);
540 root = find_root(&ft->node);
541 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
544 "%s can't del rule fg id=%d fte_index=%d\n",
545 __func__, fg->id, fte->index);
546 fte->modify_mask = 0;
549 static void del_sw_hw_rule(struct fs_node *node)
551 struct mlx5_flow_rule *rule;
554 fs_get_obj(rule, node);
555 fs_get_obj(fte, rule->node.parent);
556 trace_mlx5_fs_del_rule(rule);
557 if (is_fwd_next_action(rule->sw_action)) {
558 mutex_lock(&rule->dest_attr.ft->lock);
559 list_del(&rule->next_ft);
560 mutex_unlock(&rule->dest_attr.ft->lock);
563 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
566 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
567 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
568 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
572 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
574 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
575 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
579 if (is_fwd_dest_type(rule->dest_attr.type)) {
584 fte->action.action &=
585 ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
587 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
594 static void del_hw_fte(struct fs_node *node)
596 struct mlx5_flow_root_namespace *root;
597 struct mlx5_flow_table *ft;
598 struct mlx5_flow_group *fg;
599 struct mlx5_core_dev *dev;
603 fs_get_obj(fte, node);
604 fs_get_obj(fg, fte->node.parent);
605 fs_get_obj(ft, fg->node.parent);
607 trace_mlx5_fs_del_fte(fte);
608 WARN_ON(fte->dests_size);
609 dev = get_dev(&ft->node);
610 root = find_root(&ft->node);
612 err = root->cmds->delete_fte(root, ft, fte);
615 "flow steering can't delete fte in index %d of flow group id %d\n",
617 node->active = false;
621 static void del_sw_fte(struct fs_node *node)
623 struct mlx5_flow_steering *steering = get_steering(node);
624 struct mlx5_flow_group *fg;
628 fs_get_obj(fte, node);
629 fs_get_obj(fg, fte->node.parent);
631 err = rhashtable_remove_fast(&fg->ftes_hash,
635 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
636 kmem_cache_free(steering->ftes_cache, fte);
639 static void del_hw_flow_group(struct fs_node *node)
641 struct mlx5_flow_root_namespace *root;
642 struct mlx5_flow_group *fg;
643 struct mlx5_flow_table *ft;
644 struct mlx5_core_dev *dev;
646 fs_get_obj(fg, node);
647 fs_get_obj(ft, fg->node.parent);
648 dev = get_dev(&ft->node);
649 trace_mlx5_fs_del_fg(fg);
651 root = find_root(&ft->node);
652 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
653 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
657 static void del_sw_flow_group(struct fs_node *node)
659 struct mlx5_flow_steering *steering = get_steering(node);
660 struct mlx5_flow_group *fg;
661 struct mlx5_flow_table *ft;
664 fs_get_obj(fg, node);
665 fs_get_obj(ft, fg->node.parent);
667 rhashtable_destroy(&fg->ftes_hash);
668 ida_destroy(&fg->fte_allocator);
669 if (ft->autogroup.active &&
670 fg->max_ftes == ft->autogroup.group_size &&
671 fg->start_index < ft->autogroup.max_fte)
672 ft->autogroup.num_groups--;
673 err = rhltable_remove(&ft->fgs_hash,
677 kmem_cache_free(steering->fgs_cache, fg);
680 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
685 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
689 fte->index = index + fg->start_index;
690 ret = rhashtable_insert_fast(&fg->ftes_hash,
696 tree_add_node(&fte->node, &fg->node);
697 list_add_tail(&fte->node.list, &fg->node.children);
701 ida_free(&fg->fte_allocator, index);
705 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
706 const struct mlx5_flow_spec *spec,
707 struct mlx5_flow_act *flow_act)
709 struct mlx5_flow_steering *steering = get_steering(&ft->node);
712 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
714 return ERR_PTR(-ENOMEM);
716 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
717 fte->node.type = FS_TYPE_FLOW_ENTRY;
718 fte->action = *flow_act;
719 fte->flow_context = spec->flow_context;
721 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
726 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
727 struct mlx5_flow_group *fg)
729 rhashtable_destroy(&fg->ftes_hash);
730 kmem_cache_free(steering->fgs_cache, fg);
733 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
734 u8 match_criteria_enable,
735 const void *match_criteria,
739 struct mlx5_flow_group *fg;
742 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
744 return ERR_PTR(-ENOMEM);
746 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
748 kmem_cache_free(steering->fgs_cache, fg);
752 ida_init(&fg->fte_allocator);
753 fg->mask.match_criteria_enable = match_criteria_enable;
754 memcpy(&fg->mask.match_criteria, match_criteria,
755 sizeof(fg->mask.match_criteria));
756 fg->node.type = FS_TYPE_FLOW_GROUP;
757 fg->start_index = start_index;
758 fg->max_ftes = end_index - start_index + 1;
763 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
764 u8 match_criteria_enable,
765 const void *match_criteria,
768 struct list_head *prev)
770 struct mlx5_flow_steering *steering = get_steering(&ft->node);
771 struct mlx5_flow_group *fg;
774 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
775 start_index, end_index);
779 /* initialize refcnt, add to parent list */
780 ret = rhltable_insert(&ft->fgs_hash,
784 dealloc_flow_group(steering, fg);
788 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
789 tree_add_node(&fg->node, &ft->node);
790 /* Add node to group list */
791 list_add(&fg->node.list, prev);
792 atomic_inc(&ft->node.version);
797 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
798 enum fs_flow_table_type table_type,
799 enum fs_flow_table_op_mod op_mod,
802 struct mlx5_flow_table *ft;
805 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
807 return ERR_PTR(-ENOMEM);
809 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
816 ft->node.type = FS_TYPE_FLOW_TABLE;
818 ft->type = table_type;
821 INIT_LIST_HEAD(&ft->fwd_rules);
822 mutex_init(&ft->lock);
827 /* If reverse is false, then we search for the first flow table in the
828 * root sub-tree from start(closest from right), else we search for the
829 * last flow table in the root sub-tree till start(closest from left).
831 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
832 struct list_head *start,
835 #define list_advance_entry(pos, reverse) \
836 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
838 #define list_for_each_advance_continue(pos, head, reverse) \
839 for (pos = list_advance_entry(pos, reverse); \
840 &pos->list != (head); \
841 pos = list_advance_entry(pos, reverse))
843 struct fs_node *iter = list_entry(start, struct fs_node, list);
844 struct mlx5_flow_table *ft = NULL;
846 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
849 list_for_each_advance_continue(iter, &root->children, reverse) {
850 if (iter->type == FS_TYPE_FLOW_TABLE) {
851 fs_get_obj(ft, iter);
854 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
862 /* If reverse is false then return the first flow table in next priority of
863 * prio in the tree, else return the last flow table in the previous priority
864 * of prio in the tree.
866 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
868 struct mlx5_flow_table *ft = NULL;
869 struct fs_node *curr_node;
870 struct fs_node *parent;
872 parent = prio->node.parent;
873 curr_node = &prio->node;
874 while (!ft && parent) {
875 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
877 parent = curr_node->parent;
882 /* Assuming all the tree is locked by mutex chain lock */
883 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
885 return find_closest_ft(prio, false);
888 /* Assuming all the tree is locked by mutex chain lock */
889 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
891 return find_closest_ft(prio, true);
894 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
895 struct mlx5_flow_act *flow_act)
897 struct fs_prio *prio;
900 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
901 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
903 return find_next_chained_ft(prio);
906 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
907 struct fs_prio *prio,
908 struct mlx5_flow_table *ft)
910 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
911 struct mlx5_flow_table *iter;
914 fs_for_each_ft(iter, prio) {
915 err = root->cmds->modify_flow_table(root, iter, ft);
918 "Failed to modify flow table id %d, type %d, err %d\n",
919 iter->id, iter->type, err);
920 /* The driver is out of sync with the FW */
927 /* Connect flow tables from previous priority of prio to ft */
928 static int connect_prev_fts(struct mlx5_core_dev *dev,
929 struct mlx5_flow_table *ft,
930 struct fs_prio *prio)
932 struct mlx5_flow_table *prev_ft;
934 prev_ft = find_prev_chained_ft(prio);
936 struct fs_prio *prev_prio;
938 fs_get_obj(prev_prio, prev_ft->node.parent);
939 return connect_fts_in_prio(dev, prev_prio, ft);
944 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
947 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
948 struct mlx5_ft_underlay_qp *uqp;
949 int min_level = INT_MAX;
954 min_level = root->root_ft->level;
956 if (ft->level >= min_level)
959 if (list_empty(&root->underlay_qpns)) {
960 /* Don't set any QPN (zero) in case QPN list is empty */
962 err = root->cmds->update_root_ft(root, ft, qpn, false);
964 list_for_each_entry(uqp, &root->underlay_qpns, list) {
966 err = root->cmds->update_root_ft(root, ft,
974 mlx5_core_warn(root->dev,
975 "Update root flow table of id(%u) qpn(%d) failed\n",
983 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
984 struct mlx5_flow_destination *dest)
986 struct mlx5_flow_root_namespace *root;
987 struct mlx5_flow_table *ft;
988 struct mlx5_flow_group *fg;
990 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
993 fs_get_obj(fte, rule->node.parent);
994 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
996 down_write_ref_node(&fte->node, false);
997 fs_get_obj(fg, fte->node.parent);
998 fs_get_obj(ft, fg->node.parent);
1000 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1001 root = find_root(&ft->node);
1002 err = root->cmds->update_fte(root, ft, fg,
1004 up_write_ref_node(&fte->node, false);
1009 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1010 struct mlx5_flow_destination *new_dest,
1011 struct mlx5_flow_destination *old_dest)
1016 if (handle->num_rules != 1)
1018 return _mlx5_modify_rule_destination(handle->rule[0],
1022 for (i = 0; i < handle->num_rules; i++) {
1023 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1024 return _mlx5_modify_rule_destination(handle->rule[i],
1031 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
1032 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1033 struct mlx5_flow_table *new_next_ft,
1034 struct mlx5_flow_table *old_next_ft)
1036 struct mlx5_flow_destination dest = {};
1037 struct mlx5_flow_rule *iter;
1040 /* new_next_ft and old_next_ft could be NULL only
1041 * when we create/destroy the anchor flow table.
1043 if (!new_next_ft || !old_next_ft)
1046 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1047 dest.ft = new_next_ft;
1049 mutex_lock(&old_next_ft->lock);
1050 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1051 mutex_unlock(&old_next_ft->lock);
1052 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1053 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1054 iter->ft->ns == new_next_ft->ns)
1057 err = _mlx5_modify_rule_destination(iter, &dest);
1059 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1065 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1066 struct fs_prio *prio)
1068 struct mlx5_flow_table *next_ft, *first_ft;
1071 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1073 first_ft = list_first_entry_or_null(&prio->node.children,
1074 struct mlx5_flow_table, node.list);
1075 if (!first_ft || first_ft->level > ft->level) {
1076 err = connect_prev_fts(dev, ft, prio);
1080 next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1081 err = connect_fwd_rules(dev, ft, next_ft);
1086 if (MLX5_CAP_FLOWTABLE(dev,
1087 flow_table_properties_nic_receive.modify_root))
1088 err = update_root_ft_create(ft, prio);
1092 static void list_add_flow_table(struct mlx5_flow_table *ft,
1093 struct fs_prio *prio)
1095 struct list_head *prev = &prio->node.children;
1096 struct mlx5_flow_table *iter;
1098 fs_for_each_ft(iter, prio) {
1099 if (iter->level > ft->level)
1101 prev = &iter->node.list;
1103 list_add(&ft->node.list, prev);
1106 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1107 struct mlx5_flow_table_attr *ft_attr,
1108 enum fs_flow_table_op_mod op_mod,
1111 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1112 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1113 struct mlx5_flow_table *next_ft;
1114 struct fs_prio *fs_prio = NULL;
1115 struct mlx5_flow_table *ft;
1119 pr_err("mlx5: flow steering failed to find root of namespace\n");
1120 return ERR_PTR(-ENODEV);
1123 mutex_lock(&root->chain_lock);
1124 fs_prio = find_prio(ns, ft_attr->prio);
1130 /* The level is related to the
1131 * priority level range.
1133 if (ft_attr->level >= fs_prio->num_levels) {
1138 ft_attr->level += fs_prio->start_level;
1141 /* The level is related to the
1142 * priority level range.
1144 ft = alloc_flow_table(ft_attr->level,
1147 op_mod, ft_attr->flags);
1153 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1154 next_ft = unmanaged ? ft_attr->next_ft :
1155 find_next_chained_ft(fs_prio);
1156 ft->def_miss_action = ns->def_miss_action;
1158 err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
1163 err = connect_flow_table(root->dev, ft, fs_prio);
1168 ft->node.active = true;
1169 down_write_ref_node(&fs_prio->node, false);
1171 tree_add_node(&ft->node, &fs_prio->node);
1172 list_add_flow_table(ft, fs_prio);
1174 ft->node.root = fs_prio->node.root;
1177 up_write_ref_node(&fs_prio->node, false);
1178 mutex_unlock(&root->chain_lock);
1179 trace_mlx5_fs_add_ft(ft);
1182 root->cmds->destroy_flow_table(root, ft);
1184 rhltable_destroy(&ft->fgs_hash);
1187 mutex_unlock(&root->chain_lock);
1188 return ERR_PTR(err);
1191 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1192 struct mlx5_flow_table_attr *ft_attr)
1194 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1196 EXPORT_SYMBOL(mlx5_create_flow_table);
1198 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
1202 EXPORT_SYMBOL(mlx5_flow_table_id);
1204 struct mlx5_flow_table *
1205 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1206 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1208 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1211 struct mlx5_flow_table*
1212 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1213 int prio, u32 level)
1215 struct mlx5_flow_table_attr ft_attr = {};
1217 ft_attr.level = level;
1218 ft_attr.prio = prio;
1219 ft_attr.max_fte = 1;
1221 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1223 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1225 #define MAX_FLOW_GROUP_SIZE BIT(24)
1226 struct mlx5_flow_table*
1227 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1228 struct mlx5_flow_table_attr *ft_attr)
1230 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1231 int max_num_groups = ft_attr->autogroup.max_num_groups;
1232 struct mlx5_flow_table *ft;
1233 int autogroups_max_fte;
1235 ft = mlx5_create_flow_table(ns, ft_attr);
1239 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1240 if (max_num_groups > autogroups_max_fte)
1242 if (num_reserved_entries > ft->max_fte)
1245 /* Align the number of groups according to the largest group size */
1246 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1247 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1249 ft->autogroup.active = true;
1250 ft->autogroup.required_groups = max_num_groups;
1251 ft->autogroup.max_fte = autogroups_max_fte;
1252 /* We save place for flow groups in addition to max types */
1253 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1258 mlx5_destroy_flow_table(ft);
1259 return ERR_PTR(-ENOSPC);
1261 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1263 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1266 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1267 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1268 fg_in, match_criteria);
1269 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1271 match_criteria_enable);
1272 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1274 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1276 struct mlx5_flow_group *fg;
1279 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1280 return ERR_PTR(-EPERM);
1282 down_write_ref_node(&ft->node, false);
1283 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1284 start_index, end_index,
1285 ft->node.children.prev);
1286 up_write_ref_node(&ft->node, false);
1290 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1292 tree_put_node(&fg->node, false);
1293 return ERR_PTR(err);
1295 trace_mlx5_fs_add_fg(fg);
1296 fg->node.active = true;
1300 EXPORT_SYMBOL(mlx5_create_flow_group);
1302 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1304 struct mlx5_flow_rule *rule;
1306 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1310 INIT_LIST_HEAD(&rule->next_ft);
1311 rule->node.type = FS_TYPE_FLOW_DEST;
1313 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1315 rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
1320 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1322 struct mlx5_flow_handle *handle;
1324 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1328 handle->num_rules = num_rules;
1333 static void destroy_flow_handle(struct fs_fte *fte,
1334 struct mlx5_flow_handle *handle,
1335 struct mlx5_flow_destination *dest,
1339 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1341 list_del(&handle->rule[i]->node.list);
1342 kfree(handle->rule[i]);
1348 static struct mlx5_flow_handle *
1349 create_flow_handle(struct fs_fte *fte,
1350 struct mlx5_flow_destination *dest,
1355 struct mlx5_flow_handle *handle;
1356 struct mlx5_flow_rule *rule = NULL;
1357 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1358 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1362 handle = alloc_handle((dest_num) ? dest_num : 1);
1364 return ERR_PTR(-ENOMEM);
1368 rule = find_flow_rule(fte, dest + i);
1370 refcount_inc(&rule->node.refcount);
1376 rule = alloc_rule(dest + i);
1380 /* Add dest to dests list- we need flow tables to be in the
1381 * end of the list for forward to next prio rules.
1383 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1385 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1386 list_add(&rule->node.list, &fte->node.children);
1388 list_add_tail(&rule->node.list, &fte->node.children);
1392 if (is_fwd_dest_type(dest[i].type))
1395 type = dest[i].type ==
1396 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1397 *modify_mask |= type ? count : dst;
1400 handle->rule[i] = rule;
1401 } while (++i < dest_num);
1406 destroy_flow_handle(fte, handle, dest, i);
1407 return ERR_PTR(-ENOMEM);
1410 /* fte should not be deleted while calling this function */
1411 static struct mlx5_flow_handle *
1412 add_rule_fte(struct fs_fte *fte,
1413 struct mlx5_flow_group *fg,
1414 struct mlx5_flow_destination *dest,
1418 struct mlx5_flow_root_namespace *root;
1419 struct mlx5_flow_handle *handle;
1420 struct mlx5_flow_table *ft;
1421 int modify_mask = 0;
1423 bool new_rule = false;
1425 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1427 if (IS_ERR(handle) || !new_rule)
1431 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1433 fs_get_obj(ft, fg->node.parent);
1434 root = find_root(&fg->node);
1435 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1436 err = root->cmds->create_fte(root, ft, fg, fte);
1438 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1442 fte->node.active = true;
1443 fte->status |= FS_FTE_STATUS_EXISTING;
1444 atomic_inc(&fg->node.version);
1450 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1451 return ERR_PTR(err);
1454 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1455 const struct mlx5_flow_spec *spec)
1457 struct list_head *prev = &ft->node.children;
1458 u32 max_fte = ft->autogroup.max_fte;
1459 unsigned int candidate_index = 0;
1460 unsigned int group_size = 0;
1461 struct mlx5_flow_group *fg;
1463 if (!ft->autogroup.active)
1464 return ERR_PTR(-ENOENT);
1466 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1467 group_size = ft->autogroup.group_size;
1469 /* max_fte == ft->autogroup.max_types */
1470 if (group_size == 0)
1473 /* sorted by start_index */
1474 fs_for_each_fg(fg, ft) {
1475 if (candidate_index + group_size > fg->start_index)
1476 candidate_index = fg->start_index + fg->max_ftes;
1479 prev = &fg->node.list;
1482 if (candidate_index + group_size > max_fte)
1483 return ERR_PTR(-ENOSPC);
1485 fg = alloc_insert_flow_group(ft,
1486 spec->match_criteria_enable,
1487 spec->match_criteria,
1489 candidate_index + group_size - 1,
1494 if (group_size == ft->autogroup.group_size)
1495 ft->autogroup.num_groups++;
1501 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1502 struct mlx5_flow_group *fg)
1504 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1505 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1506 void *match_criteria_addr;
1507 u8 src_esw_owner_mask_on;
1512 in = kvzalloc(inlen, GFP_KERNEL);
1516 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1517 fg->mask.match_criteria_enable);
1518 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1519 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1522 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1524 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1525 source_eswitch_owner_vhca_id);
1526 MLX5_SET(create_flow_group_in, in,
1527 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1529 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1530 in, match_criteria);
1531 memcpy(match_criteria_addr, fg->mask.match_criteria,
1532 sizeof(fg->mask.match_criteria));
1534 err = root->cmds->create_flow_group(root, ft, in, fg);
1536 fg->node.active = true;
1537 trace_mlx5_fs_add_fg(fg);
1544 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1545 struct mlx5_flow_destination *d2)
1547 if (d1->type == d2->type) {
1548 if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1549 d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1550 d1->vport.num == d2->vport.num &&
1551 d1->vport.flags == d2->vport.flags &&
1552 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1553 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1554 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1555 (d1->vport.pkt_reformat->id ==
1556 d2->vport.pkt_reformat->id) : true)) ||
1557 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1558 d1->ft == d2->ft) ||
1559 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1560 d1->tir_num == d2->tir_num) ||
1561 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1562 d1->ft_num == d2->ft_num) ||
1563 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1564 d1->sampler_id == d2->sampler_id))
1571 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1572 struct mlx5_flow_destination *dest)
1574 struct mlx5_flow_rule *rule;
1576 list_for_each_entry(rule, &fte->node.children, node.list) {
1577 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1583 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1584 const struct mlx5_fs_vlan *vlan1)
1586 return vlan0->ethtype != vlan1->ethtype ||
1587 vlan0->vid != vlan1->vid ||
1588 vlan0->prio != vlan1->prio;
1591 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1592 const struct mlx5_flow_act *act2)
1594 u32 action1 = act1->action;
1595 u32 action2 = act2->action;
1598 xored_actions = action1 ^ action2;
1600 /* if one rule only wants to count, it's ok */
1601 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1602 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1605 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1606 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1607 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1608 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1609 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1610 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1611 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1612 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1615 if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1616 act1->pkt_reformat != act2->pkt_reformat)
1619 if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1620 act1->modify_hdr != act2->modify_hdr)
1623 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1624 check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1627 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1628 check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1634 static int check_conflicting_ftes(struct fs_fte *fte,
1635 const struct mlx5_flow_context *flow_context,
1636 const struct mlx5_flow_act *flow_act)
1638 if (check_conflicting_actions(flow_act, &fte->action)) {
1639 mlx5_core_warn(get_dev(&fte->node),
1640 "Found two FTEs with conflicting actions\n");
1644 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1645 fte->flow_context.flow_tag != flow_context->flow_tag) {
1646 mlx5_core_warn(get_dev(&fte->node),
1647 "FTE flow tag %u already exists with different flow tag %u\n",
1648 fte->flow_context.flow_tag,
1649 flow_context->flow_tag);
1656 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1657 const struct mlx5_flow_spec *spec,
1658 struct mlx5_flow_act *flow_act,
1659 struct mlx5_flow_destination *dest,
1663 struct mlx5_flow_handle *handle;
1668 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1670 return ERR_PTR(ret);
1672 old_action = fte->action.action;
1673 fte->action.action |= flow_act->action;
1674 handle = add_rule_fte(fte, fg, dest, dest_num,
1675 old_action != flow_act->action);
1676 if (IS_ERR(handle)) {
1677 fte->action.action = old_action;
1680 trace_mlx5_fs_set_fte(fte, false);
1682 for (i = 0; i < handle->num_rules; i++) {
1683 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1684 tree_add_node(&handle->rule[i]->node, &fte->node);
1685 trace_mlx5_fs_add_rule(handle->rule[i]);
1691 static bool counter_is_valid(u32 action)
1693 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1694 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1695 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1698 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1699 struct mlx5_flow_act *flow_act,
1700 struct mlx5_flow_table *ft)
1702 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1703 u32 action = flow_act->action;
1705 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1706 return counter_is_valid(action);
1708 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1712 if (ft->type != FS_FT_FDB &&
1713 ft->type != FS_FT_NIC_RX)
1716 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1717 ft->type != dest->ft->type)
1721 if (!dest || ((dest->type ==
1722 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1723 (dest->ft->level <= ft->level && !ignore_level)))
1729 struct list_head list;
1730 struct mlx5_flow_group *g;
1733 static void free_match_list(struct match_list *head, bool ft_locked)
1735 struct match_list *iter, *match_tmp;
1737 list_for_each_entry_safe(iter, match_tmp, &head->list,
1739 tree_put_node(&iter->g->node, ft_locked);
1740 list_del(&iter->list);
1745 static int build_match_list(struct match_list *match_head,
1746 struct mlx5_flow_table *ft,
1747 const struct mlx5_flow_spec *spec,
1748 struct mlx5_flow_group *fg,
1751 struct rhlist_head *tmp, *list;
1752 struct mlx5_flow_group *g;
1756 INIT_LIST_HEAD(&match_head->list);
1757 /* Collect all fgs which has a matching match_criteria */
1758 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1759 /* RCU is atomic, we can't execute FW commands here */
1760 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1761 struct match_list *curr_match;
1766 if (unlikely(!tree_get_node(&g->node)))
1769 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1772 free_match_list(match_head, ft_locked);
1776 list_add_tail(&curr_match->list, &match_head->list);
1782 static u64 matched_fgs_get_version(struct list_head *match_head)
1784 struct match_list *iter;
1787 list_for_each_entry(iter, match_head, list)
1788 version += (u64)atomic_read(&iter->g->node.version);
1792 static struct fs_fte *
1793 lookup_fte_locked(struct mlx5_flow_group *g,
1794 const u32 *match_value,
1797 struct fs_fte *fte_tmp;
1800 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1802 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1803 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1805 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1809 if (!fte_tmp->node.active) {
1810 tree_put_node(&fte_tmp->node, false);
1815 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1818 up_write_ref_node(&g->node, false);
1820 up_read_ref_node(&g->node);
1824 static struct mlx5_flow_handle *
1825 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1826 struct list_head *match_head,
1827 const struct mlx5_flow_spec *spec,
1828 struct mlx5_flow_act *flow_act,
1829 struct mlx5_flow_destination *dest,
1833 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1834 struct mlx5_flow_group *g;
1835 struct mlx5_flow_handle *rule;
1836 struct match_list *iter;
1837 bool take_write = false;
1842 fte = alloc_fte(ft, spec, flow_act);
1844 return ERR_PTR(-ENOMEM);
1846 search_again_locked:
1847 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1849 version = matched_fgs_get_version(match_head);
1850 /* Try to find an fte with identical match value and attempt update its
1853 list_for_each_entry(iter, match_head, list) {
1854 struct fs_fte *fte_tmp;
1857 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1860 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1861 /* No error check needed here, because insert_fte() is not called */
1862 up_write_ref_node(&fte_tmp->node, false);
1863 tree_put_node(&fte_tmp->node, false);
1864 kmem_cache_free(steering->ftes_cache, fte);
1869 /* No group with matching fte found, or we skipped the search.
1870 * Try to add a new fte to any matching fg.
1873 /* Check the ft version, for case that new flow group
1874 * was added while the fgs weren't locked
1876 if (atomic_read(&ft->node.version) != ft_version) {
1877 rule = ERR_PTR(-EAGAIN);
1881 /* Check the fgs version. If version have changed it could be that an
1882 * FTE with the same match value was added while the fgs weren't
1885 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1886 version != matched_fgs_get_version(match_head)) {
1888 goto search_again_locked;
1891 list_for_each_entry(iter, match_head, list) {
1894 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1896 if (!g->node.active) {
1897 up_write_ref_node(&g->node, false);
1901 err = insert_fte(g, fte);
1903 up_write_ref_node(&g->node, false);
1906 kmem_cache_free(steering->ftes_cache, fte);
1907 return ERR_PTR(err);
1910 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1911 up_write_ref_node(&g->node, false);
1912 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1913 up_write_ref_node(&fte->node, false);
1915 tree_put_node(&fte->node, false);
1918 rule = ERR_PTR(-ENOENT);
1920 kmem_cache_free(steering->ftes_cache, fte);
1924 static struct mlx5_flow_handle *
1925 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1926 const struct mlx5_flow_spec *spec,
1927 struct mlx5_flow_act *flow_act,
1928 struct mlx5_flow_destination *dest,
1932 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1933 struct mlx5_flow_handle *rule;
1934 struct match_list match_head;
1935 struct mlx5_flow_group *g;
1936 bool take_write = false;
1942 if (!check_valid_spec(spec))
1943 return ERR_PTR(-EINVAL);
1945 if (flow_act->fg && ft->autogroup.active)
1946 return ERR_PTR(-EINVAL);
1948 for (i = 0; i < dest_num; i++) {
1949 if (!dest_is_valid(&dest[i], flow_act, ft))
1950 return ERR_PTR(-EINVAL);
1952 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1953 search_again_locked:
1954 version = atomic_read(&ft->node.version);
1956 /* Collect all fgs which has a matching match_criteria */
1957 err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
1960 up_write_ref_node(&ft->node, false);
1962 up_read_ref_node(&ft->node);
1963 return ERR_PTR(err);
1967 up_read_ref_node(&ft->node);
1969 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1971 free_match_list(&match_head, take_write);
1972 if (!IS_ERR(rule) ||
1973 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1975 up_write_ref_node(&ft->node, false);
1980 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1984 if (PTR_ERR(rule) == -EAGAIN ||
1985 version != atomic_read(&ft->node.version))
1986 goto search_again_locked;
1988 g = alloc_auto_flow_group(ft, spec);
1991 up_write_ref_node(&ft->node, false);
1995 fte = alloc_fte(ft, spec, flow_act);
1997 up_write_ref_node(&ft->node, false);
2002 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
2003 up_write_ref_node(&ft->node, false);
2005 err = create_auto_flow_group(ft, g);
2007 goto err_release_fg;
2009 err = insert_fte(g, fte);
2011 goto err_release_fg;
2013 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2014 up_write_ref_node(&g->node, false);
2015 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2016 up_write_ref_node(&fte->node, false);
2018 tree_put_node(&fte->node, false);
2019 tree_put_node(&g->node, false);
2023 up_write_ref_node(&g->node, false);
2024 kmem_cache_free(steering->ftes_cache, fte);
2026 tree_put_node(&g->node, false);
2027 return ERR_PTR(err);
2030 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2032 return ((ft->type == FS_FT_NIC_RX) &&
2033 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2036 struct mlx5_flow_handle *
2037 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2038 const struct mlx5_flow_spec *spec,
2039 struct mlx5_flow_act *flow_act,
2040 struct mlx5_flow_destination *dest,
2043 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2044 static const struct mlx5_flow_spec zero_spec = {};
2045 struct mlx5_flow_destination *gen_dest = NULL;
2046 struct mlx5_flow_table *next_ft = NULL;
2047 struct mlx5_flow_handle *handle = NULL;
2048 u32 sw_action = flow_act->action;
2054 if (!is_fwd_next_action(sw_action))
2055 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2057 if (!fwd_next_prio_supported(ft))
2058 return ERR_PTR(-EOPNOTSUPP);
2060 mutex_lock(&root->chain_lock);
2061 next_ft = find_next_fwd_ft(ft, flow_act);
2063 handle = ERR_PTR(-EOPNOTSUPP);
2067 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2070 handle = ERR_PTR(-ENOMEM);
2073 for (i = 0; i < num_dest; i++)
2074 gen_dest[i] = dest[i];
2076 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2077 gen_dest[i].ft = next_ft;
2080 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2081 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2082 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2083 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2087 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2088 mutex_lock(&next_ft->lock);
2089 list_add(&handle->rule[num_dest - 1]->next_ft,
2090 &next_ft->fwd_rules);
2091 mutex_unlock(&next_ft->lock);
2092 handle->rule[num_dest - 1]->sw_action = sw_action;
2093 handle->rule[num_dest - 1]->ft = ft;
2096 mutex_unlock(&root->chain_lock);
2100 EXPORT_SYMBOL(mlx5_add_flow_rules);
2102 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2107 /* In order to consolidate the HW changes we lock the FTE for other
2108 * changes, and increase its refcount, in order not to perform the
2109 * "del" functions of the FTE. Will handle them here.
2110 * The removal of the rules is done under locked FTE.
2111 * After removing all the handle's rules, if there are remaining
2112 * rules, it means we just need to modify the FTE in FW, and
2113 * unlock/decrease the refcount we increased before.
2114 * Otherwise, it means the FTE should be deleted. First delete the
2115 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2116 * the FTE, which will handle the last decrease of the refcount, as
2117 * well as required handling of its parent.
2119 fs_get_obj(fte, handle->rule[0]->node.parent);
2120 down_write_ref_node(&fte->node, false);
2121 for (i = handle->num_rules - 1; i >= 0; i--)
2122 tree_remove_node(&handle->rule[i]->node, true);
2123 if (list_empty(&fte->node.children)) {
2124 fte->node.del_hw_func(&fte->node);
2125 /* Avoid double call to del_hw_fte */
2126 fte->node.del_hw_func = NULL;
2127 up_write_ref_node(&fte->node, false);
2128 tree_put_node(&fte->node, false);
2129 } else if (fte->dests_size) {
2130 if (fte->modify_mask)
2132 up_write_ref_node(&fte->node, false);
2134 up_write_ref_node(&fte->node, false);
2138 EXPORT_SYMBOL(mlx5_del_flow_rules);
2140 /* Assuming prio->node.children(flow tables) is sorted by level */
2141 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2143 struct fs_prio *prio;
2145 fs_get_obj(prio, ft->node.parent);
2147 if (!list_is_last(&ft->node.list, &prio->node.children))
2148 return list_next_entry(ft, node.list);
2149 return find_next_chained_ft(prio);
2152 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2154 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2155 struct mlx5_ft_underlay_qp *uqp;
2156 struct mlx5_flow_table *new_root_ft = NULL;
2160 if (root->root_ft != ft)
2163 new_root_ft = find_next_ft(ft);
2165 root->root_ft = NULL;
2169 if (list_empty(&root->underlay_qpns)) {
2170 /* Don't set any QPN (zero) in case QPN list is empty */
2172 err = root->cmds->update_root_ft(root, new_root_ft,
2175 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2177 err = root->cmds->update_root_ft(root,
2186 mlx5_core_warn(root->dev,
2187 "Update root flow table of id(%u) qpn(%d) failed\n",
2190 root->root_ft = new_root_ft;
2195 /* Connect flow table from previous priority to
2196 * the next flow table.
2198 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2200 struct mlx5_core_dev *dev = get_dev(&ft->node);
2201 struct mlx5_flow_table *next_ft;
2202 struct fs_prio *prio;
2205 err = update_root_ft_destroy(ft);
2209 fs_get_obj(prio, ft->node.parent);
2210 if (!(list_first_entry(&prio->node.children,
2211 struct mlx5_flow_table,
2215 next_ft = find_next_ft(ft);
2216 err = connect_fwd_rules(dev, next_ft, ft);
2220 err = connect_prev_fts(dev, next_ft, prio);
2222 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2227 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2229 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2232 mutex_lock(&root->chain_lock);
2233 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2234 err = disconnect_flow_table(ft);
2236 mutex_unlock(&root->chain_lock);
2239 if (tree_remove_node(&ft->node, false))
2240 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2242 mutex_unlock(&root->chain_lock);
2246 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2248 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2250 if (tree_remove_node(&fg->node, false))
2251 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2254 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2256 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2259 struct mlx5_flow_steering *steering = dev->priv.steering;
2261 if (!steering || !steering->fdb_sub_ns)
2264 return steering->fdb_sub_ns[n];
2266 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2268 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2271 case MLX5_FLOW_NAMESPACE_BYPASS:
2272 case MLX5_FLOW_NAMESPACE_LAG:
2273 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2274 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2275 case MLX5_FLOW_NAMESPACE_KERNEL:
2276 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2277 case MLX5_FLOW_NAMESPACE_ANCHOR:
2284 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2285 enum mlx5_flow_namespace_type type)
2287 struct mlx5_flow_steering *steering = dev->priv.steering;
2288 struct mlx5_flow_root_namespace *root_ns;
2290 struct fs_prio *fs_prio;
2291 struct mlx5_flow_namespace *ns;
2297 case MLX5_FLOW_NAMESPACE_FDB:
2298 if (steering->fdb_root_ns)
2299 return &steering->fdb_root_ns->ns;
2301 case MLX5_FLOW_NAMESPACE_PORT_SEL:
2302 if (steering->port_sel_root_ns)
2303 return &steering->port_sel_root_ns->ns;
2305 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2306 if (steering->sniffer_rx_root_ns)
2307 return &steering->sniffer_rx_root_ns->ns;
2309 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2310 if (steering->sniffer_tx_root_ns)
2311 return &steering->sniffer_tx_root_ns->ns;
2313 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2314 root_ns = steering->fdb_root_ns;
2315 prio = FDB_BYPASS_PATH;
2317 case MLX5_FLOW_NAMESPACE_EGRESS:
2318 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
2319 root_ns = steering->egress_root_ns;
2320 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2322 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2323 root_ns = steering->rdma_rx_root_ns;
2324 prio = RDMA_RX_BYPASS_PRIO;
2326 case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2327 root_ns = steering->rdma_rx_root_ns;
2328 prio = RDMA_RX_KERNEL_PRIO;
2330 case MLX5_FLOW_NAMESPACE_RDMA_TX:
2331 root_ns = steering->rdma_tx_root_ns;
2333 case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2334 root_ns = steering->rdma_rx_root_ns;
2335 prio = RDMA_RX_COUNTERS_PRIO;
2337 case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2338 root_ns = steering->rdma_tx_root_ns;
2339 prio = RDMA_TX_COUNTERS_PRIO;
2341 default: /* Must be NIC RX */
2342 WARN_ON(!is_nic_rx_ns(type));
2343 root_ns = steering->root_ns;
2351 fs_prio = find_prio(&root_ns->ns, prio);
2355 ns = list_first_entry(&fs_prio->node.children,
2361 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2363 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2364 enum mlx5_flow_namespace_type type,
2367 struct mlx5_flow_steering *steering = dev->priv.steering;
2373 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2374 if (vport >= steering->esw_egress_acl_vports)
2376 if (steering->esw_egress_root_ns &&
2377 steering->esw_egress_root_ns[vport])
2378 return &steering->esw_egress_root_ns[vport]->ns;
2381 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2382 if (vport >= steering->esw_ingress_acl_vports)
2384 if (steering->esw_ingress_root_ns &&
2385 steering->esw_ingress_root_ns[vport])
2386 return &steering->esw_ingress_root_ns[vport]->ns;
2394 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2397 enum fs_node_type type)
2399 struct fs_prio *fs_prio;
2401 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2403 return ERR_PTR(-ENOMEM);
2405 fs_prio->node.type = type;
2406 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2407 tree_add_node(&fs_prio->node, &ns->node);
2408 fs_prio->num_levels = num_levels;
2409 fs_prio->prio = prio;
2410 list_add_tail(&fs_prio->node.list, &ns->node.children);
2415 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2419 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2422 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2423 unsigned int prio, int num_levels)
2425 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2428 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2431 ns->node.type = FS_TYPE_NAMESPACE;
2436 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2439 struct mlx5_flow_namespace *ns;
2441 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2443 return ERR_PTR(-ENOMEM);
2445 fs_init_namespace(ns);
2446 ns->def_miss_action = def_miss_act;
2447 tree_init_node(&ns->node, NULL, del_sw_ns);
2448 tree_add_node(&ns->node, &prio->node);
2449 list_add_tail(&ns->node.list, &prio->node.children);
2454 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2455 struct init_tree_node *prio_metadata)
2457 struct fs_prio *fs_prio;
2460 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2461 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2462 if (IS_ERR(fs_prio))
2463 return PTR_ERR(fs_prio);
2468 #define FLOW_TABLE_BIT_SZ 1
2469 #define GET_FLOW_TABLE_CAP(dev, offset) \
2470 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
2472 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2473 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2477 for (i = 0; i < caps->arr_sz; i++) {
2478 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2484 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2485 struct init_tree_node *init_node,
2486 struct fs_node *fs_parent_node,
2487 struct init_tree_node *init_parent_node,
2490 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2491 flow_table_properties_nic_receive.
2493 struct mlx5_flow_namespace *fs_ns;
2494 struct fs_prio *fs_prio;
2495 struct fs_node *base;
2499 if (init_node->type == FS_TYPE_PRIO) {
2500 if ((init_node->min_ft_level > max_ft_level) ||
2501 !has_required_caps(steering->dev, &init_node->caps))
2504 fs_get_obj(fs_ns, fs_parent_node);
2505 if (init_node->num_leaf_prios)
2506 return create_leaf_prios(fs_ns, prio, init_node);
2507 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2508 if (IS_ERR(fs_prio))
2509 return PTR_ERR(fs_prio);
2510 base = &fs_prio->node;
2511 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2512 fs_get_obj(fs_prio, fs_parent_node);
2513 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2515 return PTR_ERR(fs_ns);
2516 base = &fs_ns->node;
2521 for (i = 0; i < init_node->ar_size; i++) {
2522 err = init_root_tree_recursive(steering, &init_node->children[i],
2523 base, init_node, prio);
2526 if (init_node->children[i].type == FS_TYPE_PRIO &&
2527 init_node->children[i].num_leaf_prios) {
2528 prio += init_node->children[i].num_leaf_prios;
2535 static int init_root_tree(struct mlx5_flow_steering *steering,
2536 struct init_tree_node *init_node,
2537 struct fs_node *fs_parent_node)
2542 for (i = 0; i < init_node->ar_size; i++) {
2543 err = init_root_tree_recursive(steering, &init_node->children[i],
2552 static void del_sw_root_ns(struct fs_node *node)
2554 struct mlx5_flow_root_namespace *root_ns;
2555 struct mlx5_flow_namespace *ns;
2557 fs_get_obj(ns, node);
2558 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2559 mutex_destroy(&root_ns->chain_lock);
2563 static struct mlx5_flow_root_namespace
2564 *create_root_ns(struct mlx5_flow_steering *steering,
2565 enum fs_flow_table_type table_type)
2567 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2568 struct mlx5_flow_root_namespace *root_ns;
2569 struct mlx5_flow_namespace *ns;
2571 /* Create the root namespace */
2572 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2576 root_ns->dev = steering->dev;
2577 root_ns->table_type = table_type;
2578 root_ns->cmds = cmds;
2580 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2583 fs_init_namespace(ns);
2584 mutex_init(&root_ns->chain_lock);
2585 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2586 tree_add_node(&ns->node, NULL);
2591 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2593 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2595 struct fs_prio *prio;
2597 fs_for_each_prio(prio, ns) {
2598 /* This updates prio start_level and num_levels */
2599 set_prio_attrs_in_prio(prio, acc_level);
2600 acc_level += prio->num_levels;
2605 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2607 struct mlx5_flow_namespace *ns;
2608 int acc_level_ns = acc_level;
2610 prio->start_level = acc_level;
2611 fs_for_each_ns(ns, prio) {
2612 /* This updates start_level and num_levels of ns's priority descendants */
2613 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2615 /* If this a prio with chains, and we can jump from one chain
2616 * (namespace) to another, so we accumulate the levels
2618 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2619 acc_level = acc_level_ns;
2622 if (!prio->num_levels)
2623 prio->num_levels = acc_level_ns - prio->start_level;
2624 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2627 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2629 struct mlx5_flow_namespace *ns = &root_ns->ns;
2630 struct fs_prio *prio;
2631 int start_level = 0;
2633 fs_for_each_prio(prio, ns) {
2634 set_prio_attrs_in_prio(prio, start_level);
2635 start_level += prio->num_levels;
2639 #define ANCHOR_PRIO 0
2640 #define ANCHOR_SIZE 1
2641 #define ANCHOR_LEVEL 0
2642 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2644 struct mlx5_flow_namespace *ns = NULL;
2645 struct mlx5_flow_table_attr ft_attr = {};
2646 struct mlx5_flow_table *ft;
2648 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2652 ft_attr.max_fte = ANCHOR_SIZE;
2653 ft_attr.level = ANCHOR_LEVEL;
2654 ft_attr.prio = ANCHOR_PRIO;
2656 ft = mlx5_create_flow_table(ns, &ft_attr);
2658 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2664 static int init_root_ns(struct mlx5_flow_steering *steering)
2668 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2669 if (!steering->root_ns)
2672 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2676 set_prio_attrs(steering->root_ns);
2677 err = create_anchor_flow_table(steering);
2684 cleanup_root_ns(steering->root_ns);
2685 steering->root_ns = NULL;
2689 static void clean_tree(struct fs_node *node)
2692 struct fs_node *iter;
2693 struct fs_node *temp;
2695 tree_get_node(node);
2696 list_for_each_entry_safe(iter, temp, &node->children, list)
2698 tree_put_node(node, false);
2699 tree_remove_node(node, false);
2703 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2708 clean_tree(&root_ns->ns.node);
2711 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2713 struct fs_prio *prio;
2715 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2716 if (!steering->sniffer_tx_root_ns)
2719 /* Create single prio */
2720 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2721 return PTR_ERR_OR_ZERO(prio);
2724 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2726 struct fs_prio *prio;
2728 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2729 if (!steering->sniffer_rx_root_ns)
2732 /* Create single prio */
2733 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2734 return PTR_ERR_OR_ZERO(prio);
2737 #define PORT_SEL_NUM_LEVELS 3
2738 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2740 struct fs_prio *prio;
2742 steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2743 if (!steering->port_sel_root_ns)
2746 /* Create single prio */
2747 prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2748 PORT_SEL_NUM_LEVELS);
2749 return PTR_ERR_OR_ZERO(prio);
2752 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2756 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2757 if (!steering->rdma_rx_root_ns)
2760 err = init_root_tree(steering, &rdma_rx_root_fs,
2761 &steering->rdma_rx_root_ns->ns.node);
2765 set_prio_attrs(steering->rdma_rx_root_ns);
2770 cleanup_root_ns(steering->rdma_rx_root_ns);
2771 steering->rdma_rx_root_ns = NULL;
2775 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2779 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2780 if (!steering->rdma_tx_root_ns)
2783 err = init_root_tree(steering, &rdma_tx_root_fs,
2784 &steering->rdma_tx_root_ns->ns.node);
2788 set_prio_attrs(steering->rdma_tx_root_ns);
2793 cleanup_root_ns(steering->rdma_tx_root_ns);
2794 steering->rdma_tx_root_ns = NULL;
2798 /* FT and tc chains are stored in the same array so we can re-use the
2799 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2800 * When creating a new ns for each chain store it in the first available slot.
2801 * Assume tc chains are created and stored first and only then the FT chain.
2803 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2804 struct mlx5_flow_namespace *ns)
2808 while (steering->fdb_sub_ns[chain])
2811 steering->fdb_sub_ns[chain] = ns;
2814 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2815 struct fs_prio *maj_prio)
2817 struct mlx5_flow_namespace *ns;
2818 struct fs_prio *min_prio;
2821 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2825 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2826 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2827 if (IS_ERR(min_prio))
2828 return PTR_ERR(min_prio);
2831 store_fdb_sub_ns_prio_chain(steering, ns);
2836 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2840 struct fs_prio *maj_prio;
2845 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2846 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2849 if (IS_ERR(maj_prio))
2850 return PTR_ERR(maj_prio);
2852 for (chain = 0; chain < chains; chain++) {
2853 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2861 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2865 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2866 sizeof(*steering->fdb_sub_ns),
2868 if (!steering->fdb_sub_ns)
2871 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2875 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2882 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2884 struct mlx5_flow_namespace *ns;
2885 struct fs_prio *prio;
2888 prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2890 return PTR_ERR(prio);
2892 ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2896 for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2897 prio = fs_create_prio(ns, i, 1);
2899 return PTR_ERR(prio);
2904 static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
2906 cleanup_root_ns(steering->fdb_root_ns);
2907 steering->fdb_root_ns = NULL;
2908 kfree(steering->fdb_sub_ns);
2909 steering->fdb_sub_ns = NULL;
2912 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2914 struct fs_prio *maj_prio;
2917 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2918 if (!steering->fdb_root_ns)
2921 err = create_fdb_bypass(steering);
2925 err = create_fdb_fast_path(steering);
2929 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2930 if (IS_ERR(maj_prio)) {
2931 err = PTR_ERR(maj_prio);
2935 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2936 if (IS_ERR(maj_prio)) {
2937 err = PTR_ERR(maj_prio);
2941 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2942 if (IS_ERR(maj_prio)) {
2943 err = PTR_ERR(maj_prio);
2947 /* We put this priority last, knowing that nothing will get here
2948 * unless explicitly forwarded to. This is possible because the
2949 * slow path tables have catch all rules and nothing gets passed
2952 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2953 if (IS_ERR(maj_prio)) {
2954 err = PTR_ERR(maj_prio);
2958 set_prio_attrs(steering->fdb_root_ns);
2962 cleanup_fdb_root_ns(steering);
2966 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2968 struct fs_prio *prio;
2970 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2971 if (!steering->esw_egress_root_ns[vport])
2975 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2976 return PTR_ERR_OR_ZERO(prio);
2979 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2981 struct fs_prio *prio;
2983 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2984 if (!steering->esw_ingress_root_ns[vport])
2988 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2989 return PTR_ERR_OR_ZERO(prio);
2992 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2994 struct mlx5_flow_steering *steering = dev->priv.steering;
2998 steering->esw_egress_root_ns =
2999 kcalloc(total_vports,
3000 sizeof(*steering->esw_egress_root_ns),
3002 if (!steering->esw_egress_root_ns)
3005 for (i = 0; i < total_vports; i++) {
3006 err = init_egress_acl_root_ns(steering, i);
3008 goto cleanup_root_ns;
3010 steering->esw_egress_acl_vports = total_vports;
3014 for (i--; i >= 0; i--)
3015 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3016 kfree(steering->esw_egress_root_ns);
3017 steering->esw_egress_root_ns = NULL;
3021 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3023 struct mlx5_flow_steering *steering = dev->priv.steering;
3026 if (!steering->esw_egress_root_ns)
3029 for (i = 0; i < steering->esw_egress_acl_vports; i++)
3030 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3032 kfree(steering->esw_egress_root_ns);
3033 steering->esw_egress_root_ns = NULL;
3036 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3038 struct mlx5_flow_steering *steering = dev->priv.steering;
3042 steering->esw_ingress_root_ns =
3043 kcalloc(total_vports,
3044 sizeof(*steering->esw_ingress_root_ns),
3046 if (!steering->esw_ingress_root_ns)
3049 for (i = 0; i < total_vports; i++) {
3050 err = init_ingress_acl_root_ns(steering, i);
3052 goto cleanup_root_ns;
3054 steering->esw_ingress_acl_vports = total_vports;
3058 for (i--; i >= 0; i--)
3059 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3060 kfree(steering->esw_ingress_root_ns);
3061 steering->esw_ingress_root_ns = NULL;
3065 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3067 struct mlx5_flow_steering *steering = dev->priv.steering;
3070 if (!steering->esw_ingress_root_ns)
3073 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3074 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3076 kfree(steering->esw_ingress_root_ns);
3077 steering->esw_ingress_root_ns = NULL;
3080 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3082 struct mlx5_flow_root_namespace *root;
3083 struct mlx5_flow_namespace *ns;
3085 ns = mlx5_get_flow_namespace(dev, type);
3089 root = find_root(&ns->node);
3093 return root->cmds->get_capabilities(root, root->table_type);
3096 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3100 steering->egress_root_ns = create_root_ns(steering,
3102 if (!steering->egress_root_ns)
3105 err = init_root_tree(steering, &egress_root_fs,
3106 &steering->egress_root_ns->ns.node);
3109 set_prio_attrs(steering->egress_root_ns);
3112 cleanup_root_ns(steering->egress_root_ns);
3113 steering->egress_root_ns = NULL;
3117 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3119 struct mlx5_flow_steering *steering = dev->priv.steering;
3121 cleanup_root_ns(steering->root_ns);
3122 cleanup_fdb_root_ns(steering);
3123 cleanup_root_ns(steering->port_sel_root_ns);
3124 cleanup_root_ns(steering->sniffer_rx_root_ns);
3125 cleanup_root_ns(steering->sniffer_tx_root_ns);
3126 cleanup_root_ns(steering->rdma_rx_root_ns);
3127 cleanup_root_ns(steering->rdma_tx_root_ns);
3128 cleanup_root_ns(steering->egress_root_ns);
3131 int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3133 struct mlx5_flow_steering *steering = dev->priv.steering;
3136 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3137 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3138 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3139 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3140 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3141 err = init_root_ns(steering);
3146 if (MLX5_ESWITCH_MANAGER(dev)) {
3147 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3148 err = init_fdb_root_ns(steering);
3154 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3155 err = init_sniffer_rx_root_ns(steering);
3160 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3161 err = init_sniffer_tx_root_ns(steering);
3166 if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3167 err = init_port_sel_root_ns(steering);
3172 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3173 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3174 err = init_rdma_rx_root_ns(steering);
3179 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3180 err = init_rdma_tx_root_ns(steering);
3185 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3186 err = init_egress_root_ns(steering);
3194 mlx5_fs_core_cleanup(dev);
3198 void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3200 struct mlx5_flow_steering *steering = dev->priv.steering;
3202 kmem_cache_destroy(steering->ftes_cache);
3203 kmem_cache_destroy(steering->fgs_cache);
3205 mlx5_ft_pool_destroy(dev);
3206 mlx5_cleanup_fc_stats(dev);
3209 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3211 struct mlx5_flow_steering *steering;
3214 err = mlx5_init_fc_stats(dev);
3218 err = mlx5_ft_pool_init(dev);
3222 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3228 steering->dev = dev;
3229 dev->priv.steering = steering;
3231 if (mlx5_fs_dr_is_supported(dev))
3232 steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3234 steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3236 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3237 sizeof(struct mlx5_flow_group), 0,
3239 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3241 if (!steering->ftes_cache || !steering->fgs_cache) {
3249 mlx5_fs_core_free(dev);
3253 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3255 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3256 struct mlx5_ft_underlay_qp *new_uqp;
3259 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3263 mutex_lock(&root->chain_lock);
3265 if (!root->root_ft) {
3267 goto update_ft_fail;
3270 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3273 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3275 goto update_ft_fail;
3278 new_uqp->qpn = underlay_qpn;
3279 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3281 mutex_unlock(&root->chain_lock);
3286 mutex_unlock(&root->chain_lock);
3290 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3292 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3294 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3295 struct mlx5_ft_underlay_qp *uqp;
3299 mutex_lock(&root->chain_lock);
3300 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3301 if (uqp->qpn == underlay_qpn) {
3308 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3314 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3317 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3320 list_del(&uqp->list);
3321 mutex_unlock(&root->chain_lock);
3327 mutex_unlock(&root->chain_lock);
3330 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3332 static struct mlx5_flow_root_namespace
3333 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3335 struct mlx5_flow_namespace *ns;
3337 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3338 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3339 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3341 ns = mlx5_get_flow_namespace(dev, ns_type);
3345 return find_root(&ns->node);
3348 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3349 u8 ns_type, u8 num_actions,
3350 void *modify_actions)
3352 struct mlx5_flow_root_namespace *root;
3353 struct mlx5_modify_hdr *modify_hdr;
3356 root = get_root_namespace(dev, ns_type);
3358 return ERR_PTR(-EOPNOTSUPP);
3360 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3362 return ERR_PTR(-ENOMEM);
3364 modify_hdr->ns_type = ns_type;
3365 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3366 modify_actions, modify_hdr);
3369 return ERR_PTR(err);
3374 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3376 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3377 struct mlx5_modify_hdr *modify_hdr)
3379 struct mlx5_flow_root_namespace *root;
3381 root = get_root_namespace(dev, modify_hdr->ns_type);
3384 root->cmds->modify_header_dealloc(root, modify_hdr);
3387 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3389 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3390 struct mlx5_pkt_reformat_params *params,
3391 enum mlx5_flow_namespace_type ns_type)
3393 struct mlx5_pkt_reformat *pkt_reformat;
3394 struct mlx5_flow_root_namespace *root;
3397 root = get_root_namespace(dev, ns_type);
3399 return ERR_PTR(-EOPNOTSUPP);
3401 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3403 return ERR_PTR(-ENOMEM);
3405 pkt_reformat->ns_type = ns_type;
3406 pkt_reformat->reformat_type = params->type;
3407 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3410 kfree(pkt_reformat);
3411 return ERR_PTR(err);
3414 return pkt_reformat;
3416 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3418 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3419 struct mlx5_pkt_reformat *pkt_reformat)
3421 struct mlx5_flow_root_namespace *root;
3423 root = get_root_namespace(dev, pkt_reformat->ns_type);
3426 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3427 kfree(pkt_reformat);
3429 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3431 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3436 struct mlx5_flow_definer *
3437 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3438 enum mlx5_flow_namespace_type ns_type, u16 format_id,
3441 struct mlx5_flow_root_namespace *root;
3442 struct mlx5_flow_definer *definer;
3445 root = get_root_namespace(dev, ns_type);
3447 return ERR_PTR(-EOPNOTSUPP);
3449 definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3451 return ERR_PTR(-ENOMEM);
3453 definer->ns_type = ns_type;
3454 id = root->cmds->create_match_definer(root, format_id, match_mask);
3456 mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3464 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3465 struct mlx5_flow_definer *definer)
3467 struct mlx5_flow_root_namespace *root;
3469 root = get_root_namespace(dev, definer->ns_type);
3473 root->cmds->destroy_match_definer(root, definer->id);
3477 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3478 struct mlx5_flow_root_namespace *peer_ns)
3480 if (peer_ns && ns->mode != peer_ns->mode) {
3481 mlx5_core_err(ns->dev,
3482 "Can't peer namespace of different steering mode\n");
3486 return ns->cmds->set_peer(ns, peer_ns);
3489 /* This function should be called only at init stage of the namespace.
3490 * It is not safe to call this function while steering operations
3491 * are executed in the namespace.
3493 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3494 enum mlx5_flow_steering_mode mode)
3496 struct mlx5_flow_root_namespace *root;
3497 const struct mlx5_flow_cmds *cmds;
3500 root = find_root(&ns->node);
3501 if (&root->ns != ns)
3502 /* Can't set cmds to non root namespace */
3505 if (root->table_type != FS_FT_FDB)
3508 if (root->mode == mode)
3511 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3512 cmds = mlx5_fs_cmd_get_dr_cmds();
3514 cmds = mlx5_fs_cmd_get_fw_cmds();
3518 err = cmds->create_ns(root);
3520 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3525 root->cmds->destroy_ns(root);