net/mlx5e: Refactor FTE setup code to be more clear
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ipsec_fs.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "ipsec.h"
8 #include "fs_core.h"
9
10 #define NUM_IPSEC_FTE BIT(15)
11
12 struct mlx5e_ipsec_rx_err {
13         struct mlx5_flow_table *ft;
14         struct mlx5_flow_handle *rule;
15         struct mlx5_modify_hdr *copy_modify_hdr;
16 };
17
18 struct mlx5e_ipsec_ft {
19         struct mutex mutex; /* Protect changes to this struct */
20         struct mlx5_flow_table *sa;
21         u32 refcnt;
22 };
23
24 struct mlx5e_ipsec_rx {
25         struct mlx5e_ipsec_ft ft;
26         struct mlx5_flow_group *miss_group;
27         struct mlx5_flow_handle *miss_rule;
28         struct mlx5_flow_destination default_dest;
29         struct mlx5e_ipsec_rx_err rx_err;
30 };
31
32 struct mlx5e_ipsec_tx {
33         struct mlx5e_ipsec_ft ft;
34         struct mlx5_flow_namespace *ns;
35 };
36
37 /* IPsec RX flow steering */
38 static enum mlx5_traffic_types family2tt(u32 family)
39 {
40         if (family == AF_INET)
41                 return MLX5_TT_IPV4_IPSEC_ESP;
42         return MLX5_TT_IPV6_IPSEC_ESP;
43 }
44
45 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
46                                                int level, int prio,
47                                                int max_num_groups)
48 {
49         struct mlx5_flow_table_attr ft_attr = {};
50
51         ft_attr.autogroup.num_reserved_entries = 1;
52         ft_attr.autogroup.max_num_groups = max_num_groups;
53         ft_attr.max_fte = NUM_IPSEC_FTE;
54         ft_attr.level = level;
55         ft_attr.prio = prio;
56
57         return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
58 }
59
60 static int rx_err_add_rule(struct mlx5_core_dev *mdev,
61                            struct mlx5e_ipsec_rx *rx,
62                            struct mlx5e_ipsec_rx_err *rx_err)
63 {
64         u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
65         struct mlx5_flow_act flow_act = {};
66         struct mlx5_modify_hdr *modify_hdr;
67         struct mlx5_flow_handle *fte;
68         struct mlx5_flow_spec *spec;
69         int err;
70
71         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
72         if (!spec)
73                 return -ENOMEM;
74
75         /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
76         MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
77         MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
78         MLX5_SET(copy_action_in, action, src_offset, 0);
79         MLX5_SET(copy_action_in, action, length, 7);
80         MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
81         MLX5_SET(copy_action_in, action, dst_offset, 24);
82
83         modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
84                                               1, action);
85
86         if (IS_ERR(modify_hdr)) {
87                 err = PTR_ERR(modify_hdr);
88                 mlx5_core_err(mdev,
89                               "fail to alloc ipsec copy modify_header_id err=%d\n", err);
90                 goto out_spec;
91         }
92
93         /* create fte */
94         flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
95                           MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
96         flow_act.modify_hdr = modify_hdr;
97         fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
98                                   &rx->default_dest, 1);
99         if (IS_ERR(fte)) {
100                 err = PTR_ERR(fte);
101                 mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
102                 goto out;
103         }
104
105         kvfree(spec);
106         rx_err->rule = fte;
107         rx_err->copy_modify_hdr = modify_hdr;
108         return 0;
109
110 out:
111         mlx5_modify_header_dealloc(mdev, modify_hdr);
112 out_spec:
113         kvfree(spec);
114         return err;
115 }
116
117 static int rx_fs_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
118 {
119         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
120         struct mlx5_flow_table *ft = rx->ft.sa;
121         struct mlx5_flow_group *miss_group;
122         struct mlx5_flow_handle *miss_rule;
123         MLX5_DECLARE_FLOW_ACT(flow_act);
124         struct mlx5_flow_spec *spec;
125         u32 *flow_group_in;
126         int err = 0;
127
128         flow_group_in = kvzalloc(inlen, GFP_KERNEL);
129         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
130         if (!flow_group_in || !spec) {
131                 err = -ENOMEM;
132                 goto out;
133         }
134
135         /* Create miss_group */
136         MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
137         MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
138         miss_group = mlx5_create_flow_group(ft, flow_group_in);
139         if (IS_ERR(miss_group)) {
140                 err = PTR_ERR(miss_group);
141                 mlx5_core_err(mdev, "fail to create ipsec rx miss_group err=%d\n", err);
142                 goto out;
143         }
144         rx->miss_group = miss_group;
145
146         /* Create miss rule */
147         miss_rule =
148                 mlx5_add_flow_rules(ft, spec, &flow_act, &rx->default_dest, 1);
149         if (IS_ERR(miss_rule)) {
150                 mlx5_destroy_flow_group(rx->miss_group);
151                 err = PTR_ERR(miss_rule);
152                 mlx5_core_err(mdev, "fail to create ipsec rx miss_rule err=%d\n", err);
153                 goto out;
154         }
155         rx->miss_rule = miss_rule;
156 out:
157         kvfree(flow_group_in);
158         kvfree(spec);
159         return err;
160 }
161
162 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
163 {
164         mlx5_del_flow_rules(rx->miss_rule);
165         mlx5_destroy_flow_group(rx->miss_group);
166         mlx5_destroy_flow_table(rx->ft.sa);
167
168         mlx5_del_flow_rules(rx->rx_err.rule);
169         mlx5_modify_header_dealloc(mdev, rx->rx_err.copy_modify_hdr);
170         mlx5_destroy_flow_table(rx->rx_err.ft);
171 }
172
173 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
174                      struct mlx5e_ipsec_rx *rx, u32 family)
175 {
176         struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
177         struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
178         struct mlx5_flow_table *ft;
179         int err;
180
181         rx->default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
182
183         ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
184                              MLX5E_NIC_PRIO, 1);
185         if (IS_ERR(ft))
186                 return PTR_ERR(ft);
187
188         rx->rx_err.ft = ft;
189         err = rx_err_add_rule(mdev, rx, &rx->rx_err);
190         if (err)
191                 goto err_add;
192
193         /* Create FT */
194         ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
195                              1);
196         if (IS_ERR(ft)) {
197                 err = PTR_ERR(ft);
198                 goto err_fs_ft;
199         }
200         rx->ft.sa = ft;
201
202         err = rx_fs_create(mdev, rx);
203         if (err)
204                 goto err_fs;
205
206         return 0;
207
208 err_fs:
209         mlx5_destroy_flow_table(rx->ft.sa);
210 err_fs_ft:
211         mlx5_del_flow_rules(rx->rx_err.rule);
212         mlx5_modify_header_dealloc(mdev, rx->rx_err.copy_modify_hdr);
213 err_add:
214         mlx5_destroy_flow_table(rx->rx_err.ft);
215         return err;
216 }
217
218 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
219                                         struct mlx5e_ipsec *ipsec, u32 family)
220 {
221         struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
222         struct mlx5_flow_destination dest = {};
223         struct mlx5e_ipsec_rx *rx;
224         int err = 0;
225
226         if (family == AF_INET)
227                 rx = ipsec->rx_ipv4;
228         else
229                 rx = ipsec->rx_ipv6;
230
231         mutex_lock(&rx->ft.mutex);
232         if (rx->ft.refcnt)
233                 goto skip;
234
235         /* create FT */
236         err = rx_create(mdev, ipsec, rx, family);
237         if (err)
238                 goto out;
239
240         /* connect */
241         dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
242         dest.ft = rx->ft.sa;
243         mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
244
245 skip:
246         rx->ft.refcnt++;
247 out:
248         mutex_unlock(&rx->ft.mutex);
249         if (err)
250                 return ERR_PTR(err);
251         return rx;
252 }
253
254 static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
255                       u32 family)
256 {
257         struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
258         struct mlx5e_ipsec_rx *rx;
259
260         if (family == AF_INET)
261                 rx = ipsec->rx_ipv4;
262         else
263                 rx = ipsec->rx_ipv6;
264
265         mutex_lock(&rx->ft.mutex);
266         rx->ft.refcnt--;
267         if (rx->ft.refcnt)
268                 goto out;
269
270         /* disconnect */
271         mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
272
273         /* remove FT */
274         rx_destroy(mdev, rx);
275
276 out:
277         mutex_unlock(&rx->ft.mutex);
278 }
279
280 /* IPsec TX flow steering */
281 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
282 {
283         struct mlx5_flow_table *ft;
284
285         ft = ipsec_ft_create(tx->ns, 0, 0, 1);
286         if (IS_ERR(ft))
287                 return PTR_ERR(ft);
288
289         tx->ft.sa = ft;
290         return 0;
291 }
292
293 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
294                                         struct mlx5e_ipsec *ipsec)
295 {
296         struct mlx5e_ipsec_tx *tx = ipsec->tx;
297         int err = 0;
298
299         mutex_lock(&tx->ft.mutex);
300         if (tx->ft.refcnt)
301                 goto skip;
302
303         err = tx_create(mdev, tx);
304         if (err)
305                 goto out;
306 skip:
307         tx->ft.refcnt++;
308 out:
309         mutex_unlock(&tx->ft.mutex);
310         if (err)
311                 return ERR_PTR(err);
312         return tx;
313 }
314
315 static void tx_ft_put(struct mlx5e_ipsec *ipsec)
316 {
317         struct mlx5e_ipsec_tx *tx = ipsec->tx;
318
319         mutex_lock(&tx->ft.mutex);
320         tx->ft.refcnt--;
321         if (tx->ft.refcnt)
322                 goto out;
323
324         mlx5_destroy_flow_table(tx->ft.sa);
325 out:
326         mutex_unlock(&tx->ft.mutex);
327 }
328
329 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
330                             __be32 *daddr)
331 {
332         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
333
334         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
335         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
336
337         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
338                             outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
339         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
340                             outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
341         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
342                          outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
343         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
344                          outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
345 }
346
347 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
348                             __be32 *daddr)
349 {
350         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
351
352         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
353         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
354
355         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
356                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
357         memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
358                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
359         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
360                             outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
361         memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
362                             outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
363 }
364
365 static void setup_fte_esp(struct mlx5_flow_spec *spec)
366 {
367         /* ESP header */
368         spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
369
370         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
371         MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
372 }
373
374 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
375 {
376         /* SPI number */
377         spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
378
379         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
380         MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
381 }
382
383 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
384 {
385         /* Non fragmented */
386         spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
387
388         MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
389         MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
390 }
391
392 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
393 {
394         /* Add IPsec indicator in metadata_reg_a */
395         spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
396
397         MLX5_SET(fte_match_param, spec->match_criteria,
398                  misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
399         MLX5_SET(fte_match_param, spec->match_value,
400                  misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
401 }
402
403 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
404 {
405         u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
406         struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
407         struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
408         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
409         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
410         struct mlx5_modify_hdr *modify_hdr = NULL;
411         struct mlx5_flow_destination dest = {};
412         struct mlx5_flow_act flow_act = {};
413         struct mlx5_flow_handle *rule;
414         struct mlx5_flow_spec *spec;
415         struct mlx5e_ipsec_rx *rx;
416         int err = 0;
417
418         rx = rx_ft_get(mdev, ipsec, attrs->family);
419         if (IS_ERR(rx))
420                 return PTR_ERR(rx);
421
422         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
423         if (!spec) {
424                 err = -ENOMEM;
425                 goto out_err;
426         }
427
428         if (attrs->family == AF_INET)
429                 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
430         else
431                 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
432
433         setup_fte_spi(spec, attrs->spi);
434         setup_fte_esp(spec);
435         setup_fte_no_frags(spec);
436
437         /* Set bit[31] ipsec marker */
438         /* Set bit[23-0] ipsec_obj_id */
439         MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
440         MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
441         MLX5_SET(set_action_in, action, data,
442                  (sa_entry->ipsec_obj_id | BIT(31)));
443         MLX5_SET(set_action_in, action, offset, 0);
444         MLX5_SET(set_action_in, action, length, 32);
445
446         modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
447                                               1, action);
448         if (IS_ERR(modify_hdr)) {
449                 err = PTR_ERR(modify_hdr);
450                 mlx5_core_err(mdev,
451                               "fail to alloc ipsec set modify_header_id err=%d\n", err);
452                 modify_hdr = NULL;
453                 goto out_err;
454         }
455
456         flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
457         flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
458         flow_act.flags |= FLOW_ACT_NO_APPEND;
459         flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
460                           MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
461                           MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
462         dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
463         flow_act.modify_hdr = modify_hdr;
464         dest.ft = rx->rx_err.ft;
465         rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
466         if (IS_ERR(rule)) {
467                 err = PTR_ERR(rule);
468                 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
469                 goto out_err;
470         }
471
472         ipsec_rule->rule = rule;
473         ipsec_rule->set_modify_hdr = modify_hdr;
474         goto out;
475
476 out_err:
477         if (modify_hdr)
478                 mlx5_modify_header_dealloc(mdev, modify_hdr);
479         rx_ft_put(mdev, ipsec, attrs->family);
480
481 out:
482         kvfree(spec);
483         return err;
484 }
485
486 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
487 {
488         struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
489         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
490         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
491         struct mlx5_flow_act flow_act = {};
492         struct mlx5_flow_handle *rule;
493         struct mlx5_flow_spec *spec;
494         struct mlx5e_ipsec_tx *tx;
495         int err = 0;
496
497         tx = tx_ft_get(mdev, ipsec);
498         if (IS_ERR(tx))
499                 return PTR_ERR(tx);
500
501         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
502         if (!spec) {
503                 err = -ENOMEM;
504                 goto out;
505         }
506
507         if (attrs->family == AF_INET)
508                 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
509         else
510                 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
511
512         setup_fte_spi(spec, attrs->spi);
513         setup_fte_esp(spec);
514         setup_fte_no_frags(spec);
515         setup_fte_reg_a(spec);
516
517         flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
518         flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
519         flow_act.flags |= FLOW_ACT_NO_APPEND;
520         flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
521                           MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
522         rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, NULL, 0);
523         if (IS_ERR(rule)) {
524                 err = PTR_ERR(rule);
525                 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
526                 goto out;
527         }
528
529         sa_entry->ipsec_rule.rule = rule;
530
531 out:
532         kvfree(spec);
533         if (err)
534                 tx_ft_put(ipsec);
535         return err;
536 }
537
538 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
539 {
540         if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
541                 return tx_add_rule(sa_entry);
542
543         return rx_add_rule(sa_entry);
544 }
545
546 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
547 {
548         struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
549         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
550
551         mlx5_del_flow_rules(ipsec_rule->rule);
552
553         if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
554                 tx_ft_put(sa_entry->ipsec);
555                 return;
556         }
557
558         mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
559         rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
560 }
561
562 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
563 {
564         if (!ipsec->tx)
565                 return;
566
567         mutex_destroy(&ipsec->tx->ft.mutex);
568         WARN_ON(ipsec->tx->ft.refcnt);
569         kfree(ipsec->tx);
570
571         mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
572         WARN_ON(ipsec->rx_ipv4->ft.refcnt);
573         kfree(ipsec->rx_ipv4);
574
575         mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
576         WARN_ON(ipsec->rx_ipv6->ft.refcnt);
577         kfree(ipsec->rx_ipv6);
578 }
579
580 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
581 {
582         struct mlx5_flow_namespace *ns;
583         int err = -ENOMEM;
584
585         ns = mlx5_get_flow_namespace(ipsec->mdev,
586                                      MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
587         if (!ns)
588                 return -EOPNOTSUPP;
589
590         ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
591         if (!ipsec->tx)
592                 return -ENOMEM;
593
594         ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
595         if (!ipsec->rx_ipv4)
596                 goto err_rx_ipv4;
597
598         ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
599         if (!ipsec->rx_ipv6)
600                 goto err_rx_ipv6;
601
602         mutex_init(&ipsec->tx->ft.mutex);
603         mutex_init(&ipsec->rx_ipv4->ft.mutex);
604         mutex_init(&ipsec->rx_ipv6->ft.mutex);
605         ipsec->tx->ns = ns;
606
607         return 0;
608
609 err_rx_ipv6:
610         kfree(ipsec->rx_ipv4);
611 err_rx_ipv4:
612         kfree(ipsec->tx);
613         return err;
614 }