net/mlx5e: CT: Fix setting flow_source for smfs ct tuples
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc / ct_fs_smfs.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3
4 #include <linux/refcount.h>
5
6 #include "en_tc.h"
7 #include "en/tc_priv.h"
8 #include "en/tc_ct.h"
9 #include "en/tc/ct_fs.h"
10
11 #include "lib/smfs.h"
12
13 #define INIT_ERR_PREFIX "ct_fs_smfs init failed"
14 #define ct_dbg(fmt, args...)\
15         netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
16 #define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
17
18 struct mlx5_ct_fs_smfs_matcher {
19         struct mlx5dr_matcher *dr_matcher;
20         struct list_head list;
21         int prio;
22         refcount_t ref;
23 };
24
25 struct mlx5_ct_fs_smfs_matchers {
26         struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
27         struct list_head used;
28 };
29
30 struct mlx5_ct_fs_smfs {
31         struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
32         struct mlx5_ct_fs_smfs_matchers matchers;
33         struct mlx5_ct_fs_smfs_matchers matchers_nat;
34         struct mlx5dr_action *fwd_action;
35         struct mlx5_flow_table *ct_nat;
36         struct mutex lock; /* Guards matchers */
37 };
38
39 struct mlx5_ct_fs_smfs_rule {
40         struct mlx5_ct_fs_rule fs_rule;
41         struct mlx5dr_rule *rule;
42         struct mlx5dr_action *count_action;
43         struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
44 };
45
46 static inline void
47 mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
48                           bool gre)
49 {
50         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
51
52         if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
53                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
54         else
55                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
56
57         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
58         if (likely(ipv4)) {
59                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
60                                  src_ipv4_src_ipv6.ipv4_layout.ipv4);
61                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
62                                  dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
63         } else {
64                 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
65                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
66                        0xFF,
67                        MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
68                                          dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
69                 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
70                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
71                        0xFF,
72                        MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
73                                          src_ipv4_src_ipv6.ipv6_layout.ipv6));
74         }
75
76         if (likely(tcp)) {
77                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
78                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
79                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
80                          ntohs(MLX5_CT_TCP_FLAGS_MASK));
81         } else if (!gre) {
82                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
83                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
84         }
85
86         mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
87 }
88
89 static struct mlx5dr_matcher *
90 mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
91                                bool tcp, bool gre, u32 priority)
92 {
93         struct mlx5dr_matcher *dr_matcher;
94         struct mlx5_flow_spec *spec;
95
96         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
97         if (!spec)
98                 return ERR_PTR(-ENOMEM);
99
100         mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
101         spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
102
103         dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
104         kfree(spec);
105         if (!dr_matcher)
106                 return ERR_PTR(-EINVAL);
107
108         return dr_matcher;
109 }
110
111 static struct mlx5_ct_fs_smfs_matcher *
112 mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
113 {
114         struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
115         struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
116         struct mlx5_ct_fs_smfs_matchers *matchers;
117         struct mlx5dr_matcher *dr_matcher;
118         struct mlx5dr_table *tbl;
119         struct list_head *prev;
120         int prio;
121
122         matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
123         smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
124
125         if (refcount_inc_not_zero(&smfs_matcher->ref))
126                 return smfs_matcher;
127
128         mutex_lock(&fs_smfs->lock);
129
130         /* Retry with lock, as another thread might have already created the relevant matcher
131          * till we acquired the lock
132          */
133         if (refcount_inc_not_zero(&smfs_matcher->ref))
134                 goto out_unlock;
135
136         // Find next available priority in sorted used list
137         prio = 0;
138         prev = &matchers->used;
139         list_for_each_entry(m, &matchers->used, list) {
140                 prev = &m->list;
141
142                 if (m->prio == prio)
143                         prio = m->prio + 1;
144                 else
145                         break;
146         }
147
148         tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
149         dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
150         if (IS_ERR(dr_matcher)) {
151                 netdev_warn(fs->netdev,
152                             "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
153                             nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
154
155                 smfs_matcher = ERR_CAST(dr_matcher);
156                 goto out_unlock;
157         }
158
159         smfs_matcher->dr_matcher = dr_matcher;
160         smfs_matcher->prio = prio;
161         list_add(&smfs_matcher->list, prev);
162         refcount_set(&smfs_matcher->ref, 1);
163
164 out_unlock:
165         mutex_unlock(&fs_smfs->lock);
166         return smfs_matcher;
167 }
168
169 static void
170 mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
171 {
172         struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
173
174         if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
175                 return;
176
177         mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
178         list_del(&smfs_matcher->list);
179         mutex_unlock(&fs_smfs->lock);
180 }
181
182 static int
183 mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
184                      struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
185 {
186         struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
187         struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
188
189         post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
190         ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
191         ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
192         fs_smfs->ct_nat = ct_nat;
193
194         if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
195                 netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
196                 return -EOPNOTSUPP;
197         }
198
199         ct_dbg("using smfs steering");
200
201         fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
202         if (!fs_smfs->fwd_action) {
203                 return -EINVAL;
204         }
205
206         fs_smfs->ct_tbl = ct_tbl;
207         fs_smfs->ct_nat_tbl = ct_nat_tbl;
208         mutex_init(&fs_smfs->lock);
209         INIT_LIST_HEAD(&fs_smfs->matchers.used);
210         INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
211
212         return 0;
213 }
214
215 static void
216 mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
217 {
218         struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
219
220         mlx5_smfs_action_destroy(fs_smfs->fwd_action);
221 }
222
223 static inline bool
224 mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
225 {
226 #define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
227         const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
228         const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
229         const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
230         const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
231         const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
232         const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
233         const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
234
235         return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
236                 used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
237 }
238
239 static bool
240 mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
241 {
242         struct flow_match_ipv4_addrs ipv4_addrs;
243         struct flow_match_ipv6_addrs ipv6_addrs;
244         struct flow_match_control control;
245         struct flow_match_basic basic;
246         struct flow_match_ports ports;
247         struct flow_match_tcp tcp;
248
249         if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
250                 ct_dbg("rule uses unexpected dissectors (0x%08x)",
251                        flow_rule->match.dissector->used_keys);
252                 return false;
253         }
254
255         flow_rule_match_basic(flow_rule, &basic);
256         flow_rule_match_control(flow_rule, &control);
257         flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
258         flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
259         if (basic.key->ip_proto != IPPROTO_GRE)
260                 flow_rule_match_ports(flow_rule, &ports);
261         if (basic.key->ip_proto == IPPROTO_TCP)
262                 flow_rule_match_tcp(flow_rule, &tcp);
263
264         if (basic.mask->n_proto != htons(0xFFFF) ||
265             (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
266             basic.mask->ip_proto != 0xFF ||
267             (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
268              basic.key->ip_proto != IPPROTO_GRE)) {
269                 ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
270                        ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
271                        basic.key->ip_proto, basic.mask->ip_proto);
272                 return false;
273         }
274
275         if (basic.key->ip_proto != IPPROTO_GRE &&
276             (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
277                 ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
278                        ports.mask->src, ports.mask->dst);
279                 return false;
280         }
281
282         if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
283                 ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
284                 return false;
285         }
286
287         return true;
288 }
289
290 static struct mlx5_ct_fs_rule *
291 mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
292                             struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
293 {
294         struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
295         struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
296         struct mlx5_ct_fs_smfs_rule *smfs_rule;
297         struct mlx5dr_action *actions[5];
298         struct mlx5dr_rule *rule;
299         int num_actions = 0, err;
300         bool nat, tcp, ipv4, gre;
301
302         if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
303                 return ERR_PTR(-EOPNOTSUPP);
304
305         smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
306         if (!smfs_rule)
307                 return ERR_PTR(-ENOMEM);
308
309         smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
310         if (!smfs_rule->count_action) {
311                 err = -EINVAL;
312                 goto err_count;
313         }
314
315         actions[num_actions++] = smfs_rule->count_action;
316         actions[num_actions++] = attr->modify_hdr->action.dr_action;
317         actions[num_actions++] = fs_smfs->fwd_action;
318
319         nat = (attr->ft == fs_smfs->ct_nat);
320         ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
321         tcp = MLX5_GET(fte_match_param, spec->match_value,
322                        outer_headers.ip_protocol) == IPPROTO_TCP;
323         gre = MLX5_GET(fte_match_param, spec->match_value,
324                        outer_headers.ip_protocol) == IPPROTO_GRE;
325
326         smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
327         if (IS_ERR(smfs_matcher)) {
328                 err = PTR_ERR(smfs_matcher);
329                 goto err_matcher;
330         }
331
332         rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
333                                      spec->flow_context.flow_source);
334         if (!rule) {
335                 err = -EINVAL;
336                 goto err_create;
337         }
338
339         smfs_rule->rule = rule;
340         smfs_rule->smfs_matcher = smfs_matcher;
341
342         return &smfs_rule->fs_rule;
343
344 err_create:
345         mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
346 err_matcher:
347         mlx5_smfs_action_destroy(smfs_rule->count_action);
348 err_count:
349         kfree(smfs_rule);
350         return ERR_PTR(err);
351 }
352
353 static void
354 mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
355 {
356         struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
357                                                               struct mlx5_ct_fs_smfs_rule,
358                                                               fs_rule);
359
360         mlx5_smfs_rule_destroy(smfs_rule->rule);
361         mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
362         mlx5_smfs_action_destroy(smfs_rule->count_action);
363         kfree(smfs_rule);
364 }
365
366 static struct mlx5_ct_fs_ops fs_smfs_ops = {
367         .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
368         .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
369
370         .init = mlx5_ct_fs_smfs_init,
371         .destroy = mlx5_ct_fs_smfs_destroy,
372
373         .priv_size = sizeof(struct mlx5_ct_fs_smfs),
374 };
375
376 struct mlx5_ct_fs_ops *
377 mlx5_ct_fs_smfs_ops_get(void)
378 {
379         return &fs_smfs_ops;
380 }