1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2021 Corigine, Inc. */
4 #include <net/tc_act/tc_csum.h>
5 #include <net/tc_act/tc_ct.h>
8 #include "../nfp_port.h"
10 const struct rhashtable_params nfp_tc_ct_merge_params = {
11 .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
13 .key_len = sizeof(unsigned long) * 2,
14 .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
15 .automatic_shrinking = true,
18 const struct rhashtable_params nfp_nft_ct_merge_params = {
19 .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
21 .key_len = sizeof(unsigned long) * 3,
22 .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
23 .automatic_shrinking = true,
26 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
27 enum flow_action_id act_id);
30 * get_hashentry() - Wrapper around hashtable lookup.
31 * @ht: hashtable where entry could be found
33 * @params: hashtable params
34 * @size: size of entry to allocate if not in table
36 * Returns an entry from a hashtable. If entry does not exist
37 * yet allocate the memory for it and return the new entry.
39 static void *get_hashentry(struct rhashtable *ht, void *key,
40 const struct rhashtable_params params, size_t size)
44 result = rhashtable_lookup_fast(ht, key, params);
49 result = kzalloc(size, GFP_KERNEL);
51 return ERR_PTR(-ENOMEM);
56 bool is_pre_ct_flow(struct flow_cls_offload *flow)
58 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
59 struct flow_dissector *dissector = rule->match.dissector;
60 struct flow_action_entry *act;
61 struct flow_match_ct ct;
64 if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
65 flow_rule_match_ct(rule, &ct);
70 if (flow->common.chain_index)
73 flow_action_for_each(i, act, &flow->rule->action) {
74 if (act->id == FLOW_ACTION_CT) {
75 /* The pre_ct rule only have the ct or ct nat action, cannot
76 * contains other ct action e.g ct commit and so on.
78 if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
88 bool is_post_ct_flow(struct flow_cls_offload *flow)
90 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
91 struct flow_dissector *dissector = rule->match.dissector;
92 struct flow_action_entry *act;
93 bool exist_ct_clear = false;
94 struct flow_match_ct ct;
97 if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
98 flow_rule_match_ct(rule, &ct);
99 if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
102 /* post ct entry cannot contains any ct action except ct_clear. */
103 flow_action_for_each(i, act, &flow->rule->action) {
104 if (act->id == FLOW_ACTION_CT) {
105 /* ignore ct clear action. */
106 if (act->ct.action == TCA_CT_ACT_CLEAR) {
107 exist_ct_clear = true;
114 /* when do nat with ct, the post ct entry ignore the ct status,
115 * will match the nat field(sip/dip) instead. In this situation,
116 * the flow chain index is not zero and contains ct clear action.
118 if (flow->common.chain_index && exist_ct_clear)
126 * get_mangled_key() - Mangle the key if mangle act exists
127 * @rule: rule that carries the actions
128 * @buf: pointer to key to be mangled
129 * @offset: used to adjust mangled offset in L2/L3/L4 header
131 * @htype: mangling type
133 * Returns buf where the mangled key stores.
135 static void *get_mangled_key(struct flow_rule *rule, void *buf,
136 u32 offset, size_t key_sz,
137 enum flow_action_mangle_base htype)
139 struct flow_action_entry *act;
140 u32 *val = (u32 *)buf;
144 flow_action_for_each(i, act, &rule->action) {
145 if (act->id == FLOW_ACTION_MANGLE &&
146 act->mangle.htype == htype) {
147 off = act->mangle.offset - offset;
148 msk = act->mangle.mask;
149 key = act->mangle.val;
151 /* Mangling is supposed to be u32 aligned */
152 if (off % 4 || off >= key_sz)
155 val[off >> 2] &= msk;
156 val[off >> 2] |= key;
163 /* Only tos and ttl are involved in flow_match_ip structure, which
164 * doesn't conform to the layout of ip/ipv6 header definition. So
165 * they need particular process here: fill them into the ip/ipv6
166 * header, so that mangling actions can work directly.
168 #define NFP_IPV4_TOS_MASK GENMASK(23, 16)
169 #define NFP_IPV4_TTL_MASK GENMASK(31, 24)
170 #define NFP_IPV6_TCLASS_MASK GENMASK(27, 20)
171 #define NFP_IPV6_HLIMIT_MASK GENMASK(7, 0)
172 static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
175 struct flow_match_ip match;
176 /* IPv4's ttl field is in third dword. */
180 flow_rule_match_ip(rule, &match);
183 tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos);
184 ip_hdr[0] = cpu_to_be32(tmp);
185 tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl);
186 ip_hdr[1] = cpu_to_be32(tmp);
187 hdr_len = 2 * sizeof(__be32);
189 tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos);
190 ip_hdr[0] = cpu_to_be32(tmp);
191 tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl);
192 ip_hdr[2] = cpu_to_be32(tmp);
193 hdr_len = 3 * sizeof(__be32);
196 get_mangled_key(rule, ip_hdr, 0, hdr_len,
197 is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 :
198 FLOW_ACT_MANGLE_HDR_TYPE_IP4);
203 tmp = be32_to_cpu(ip_hdr[0]);
204 match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp);
205 tmp = be32_to_cpu(ip_hdr[1]);
206 match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp);
208 tmp = be32_to_cpu(ip_hdr[0]);
209 match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp);
210 tmp = be32_to_cpu(ip_hdr[2]);
211 match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp);
217 /* Note entry1 and entry2 are not swappable. only skip ip and
218 * tport merge check for pre_ct and post_ct when pre_ct do nat.
220 static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
221 struct nfp_fl_ct_flow_entry *entry2)
223 /* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
224 if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
225 entry2->type == CT_TYPE_POST_CT)
231 /* Note entry1 and entry2 are not swappable, entry1 should be
232 * the former flow whose mangle action need be taken into account
233 * if existed, and entry2 should be the latter flow whose action
236 static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
237 struct nfp_fl_ct_flow_entry *entry2)
239 unsigned long long ovlp_keys;
240 bool out, is_v6 = false;
242 ovlp_keys = entry1->rule->match.dissector->used_keys &
243 entry2->rule->match.dissector->used_keys;
244 /* Temporary buffer for mangling keys, 64 is enough to cover max
245 * struct size of key in various fields that may be mangled.
246 * Supported fields to mangle:
247 * mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
248 * nw_tos/nw_ttl(struct flow_match_ip, 2B)
249 * nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
250 * tp_src/tp_dst(struct flow_match_ports, 4B)
254 if (entry1->netdev && entry2->netdev &&
255 entry1->netdev != entry2->netdev)
258 /* Check the overlapped fields one by one, the unmasked part
259 * should not conflict with each other.
261 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) {
262 struct flow_match_control match1, match2;
264 flow_rule_match_control(entry1->rule, &match1);
265 flow_rule_match_control(entry2->rule, &match2);
266 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
271 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) {
272 struct flow_match_basic match1, match2;
274 flow_rule_match_basic(entry1->rule, &match1);
275 flow_rule_match_basic(entry2->rule, &match2);
277 /* n_proto field is a must in ct-related flows,
278 * it should be either ipv4 or ipv6.
280 is_v6 = match1.key->n_proto == htons(ETH_P_IPV6);
281 /* ip_proto field is a must when port field is cared */
282 ip_proto = match1.key->ip_proto;
284 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
289 /* if pre ct entry do nat, the nat ip exists in nft entry,
290 * will be do merge check when do nft and post ct merge,
291 * so skip this ip merge check here.
293 if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
294 nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
295 struct flow_match_ipv4_addrs match1, match2;
297 flow_rule_match_ipv4_addrs(entry1->rule, &match1);
298 flow_rule_match_ipv4_addrs(entry2->rule, &match2);
300 memcpy(buf, match1.key, sizeof(*match1.key));
301 match1.key = get_mangled_key(entry1->rule, buf,
302 offsetof(struct iphdr, saddr),
304 FLOW_ACT_MANGLE_HDR_TYPE_IP4);
306 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
311 /* if pre ct entry do nat, the nat ip exists in nft entry,
312 * will be do merge check when do nft and post ct merge,
313 * so skip this ip merge check here.
315 if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
316 nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
317 struct flow_match_ipv6_addrs match1, match2;
319 flow_rule_match_ipv6_addrs(entry1->rule, &match1);
320 flow_rule_match_ipv6_addrs(entry2->rule, &match2);
322 memcpy(buf, match1.key, sizeof(*match1.key));
323 match1.key = get_mangled_key(entry1->rule, buf,
324 offsetof(struct ipv6hdr, saddr),
326 FLOW_ACT_MANGLE_HDR_TYPE_IP6);
328 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
333 /* if pre ct entry do nat, the nat tport exists in nft entry,
334 * will be do merge check when do nft and post ct merge,
335 * so skip this tport merge check here.
337 if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_PORTS)) &&
338 nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
339 enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
340 struct flow_match_ports match1, match2;
342 flow_rule_match_ports(entry1->rule, &match1);
343 flow_rule_match_ports(entry2->rule, &match2);
345 if (ip_proto == IPPROTO_UDP)
346 htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
347 else if (ip_proto == IPPROTO_TCP)
348 htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
350 memcpy(buf, match1.key, sizeof(*match1.key));
351 match1.key = get_mangled_key(entry1->rule, buf, 0,
352 sizeof(*match1.key), htype);
354 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
359 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
360 struct flow_match_eth_addrs match1, match2;
362 flow_rule_match_eth_addrs(entry1->rule, &match1);
363 flow_rule_match_eth_addrs(entry2->rule, &match2);
365 memcpy(buf, match1.key, sizeof(*match1.key));
366 match1.key = get_mangled_key(entry1->rule, buf, 0,
368 FLOW_ACT_MANGLE_HDR_TYPE_ETH);
370 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
375 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN)) {
376 struct flow_match_vlan match1, match2;
378 flow_rule_match_vlan(entry1->rule, &match1);
379 flow_rule_match_vlan(entry2->rule, &match2);
380 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
385 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_MPLS)) {
386 struct flow_match_mpls match1, match2;
388 flow_rule_match_mpls(entry1->rule, &match1);
389 flow_rule_match_mpls(entry2->rule, &match2);
390 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
395 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_TCP)) {
396 struct flow_match_tcp match1, match2;
398 flow_rule_match_tcp(entry1->rule, &match1);
399 flow_rule_match_tcp(entry2->rule, &match2);
400 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
405 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IP)) {
406 struct flow_match_ip match1, match2;
408 flow_rule_match_ip(entry1->rule, &match1);
409 flow_rule_match_ip(entry2->rule, &match2);
411 match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6);
412 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
417 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
418 struct flow_match_enc_keyid match1, match2;
420 flow_rule_match_enc_keyid(entry1->rule, &match1);
421 flow_rule_match_enc_keyid(entry2->rule, &match2);
422 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
427 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
428 struct flow_match_ipv4_addrs match1, match2;
430 flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
431 flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
432 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
437 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
438 struct flow_match_ipv6_addrs match1, match2;
440 flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
441 flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
442 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
447 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
448 struct flow_match_control match1, match2;
450 flow_rule_match_enc_control(entry1->rule, &match1);
451 flow_rule_match_enc_control(entry2->rule, &match2);
452 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
457 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) {
458 struct flow_match_ip match1, match2;
460 flow_rule_match_enc_ip(entry1->rule, &match1);
461 flow_rule_match_enc_ip(entry2->rule, &match2);
462 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
467 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
468 struct flow_match_enc_opts match1, match2;
470 flow_rule_match_enc_opts(entry1->rule, &match1);
471 flow_rule_match_enc_opts(entry2->rule, &match2);
472 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
483 static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
484 struct flow_rule *rule)
486 struct flow_match_vlan match;
488 if (unlikely(flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)))
491 /* post_ct does not match VLAN KEY, can be merged. */
492 if (likely(!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)))
496 /* pre_ct has pop vlan, post_ct cannot match VLAN KEY, cannot be merged. */
497 case FLOW_ACTION_VLAN_POP:
500 case FLOW_ACTION_VLAN_PUSH:
501 case FLOW_ACTION_VLAN_MANGLE:
502 flow_rule_match_vlan(rule, &match);
503 /* different vlan id, cannot be merged. */
504 if ((match.key->vlan_id & match.mask->vlan_id) ^
505 (a_in->vlan.vid & match.mask->vlan_id))
508 /* different tpid, cannot be merged. */
509 if ((match.key->vlan_tpid & match.mask->vlan_tpid) ^
510 (a_in->vlan.proto & match.mask->vlan_tpid))
513 /* different priority, cannot be merged. */
514 if ((match.key->vlan_priority & match.mask->vlan_priority) ^
515 (a_in->vlan.prio & match.mask->vlan_priority))
526 /* Extra check for multiple ct-zones merge
527 * currently surpport nft entries merge check in different zones
529 static int nfp_ct_merge_extra_check(struct nfp_fl_ct_flow_entry *nft_entry,
530 struct nfp_fl_ct_tc_merge *tc_m_entry)
532 struct nfp_fl_nft_tc_merge *prev_nft_m_entry;
533 struct nfp_fl_ct_flow_entry *pre_ct_entry;
535 pre_ct_entry = tc_m_entry->pre_ct_parent;
536 prev_nft_m_entry = pre_ct_entry->prev_m_entries[pre_ct_entry->num_prev_m_entries - 1];
538 return nfp_ct_merge_check(prev_nft_m_entry->nft_parent, nft_entry);
541 static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
542 struct nfp_fl_ct_flow_entry *post_ct_entry,
543 struct nfp_fl_ct_flow_entry *nft_entry)
545 struct flow_action_entry *act;
548 /* Check for pre_ct->action conflicts */
549 flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
551 case FLOW_ACTION_VLAN_PUSH:
552 case FLOW_ACTION_VLAN_POP:
553 case FLOW_ACTION_VLAN_MANGLE:
554 err = nfp_ct_check_vlan_merge(act, post_ct_entry->rule);
558 case FLOW_ACTION_MPLS_PUSH:
559 case FLOW_ACTION_MPLS_POP:
560 case FLOW_ACTION_MPLS_MANGLE:
567 /* Check for nft->action conflicts */
568 flow_action_for_each(i, act, &nft_entry->rule->action) {
570 case FLOW_ACTION_VLAN_PUSH:
571 case FLOW_ACTION_VLAN_POP:
572 case FLOW_ACTION_VLAN_MANGLE:
573 case FLOW_ACTION_MPLS_PUSH:
574 case FLOW_ACTION_MPLS_POP:
575 case FLOW_ACTION_MPLS_MANGLE:
584 static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
585 struct nfp_fl_ct_flow_entry *nft_entry)
587 struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
588 struct flow_action_entry *ct_met;
589 struct flow_match_ct ct;
592 ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
593 if (ct_met && (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT))) {
596 act_lbl = ct_met->ct_metadata.labels;
597 flow_rule_match_ct(post_ct_entry->rule, &ct);
598 for (i = 0; i < 4; i++) {
599 if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
600 (act_lbl[i] & ct.mask->ct_labels[i]))
604 if ((ct.key->ct_mark & ct.mask->ct_mark) ^
605 (ct_met->ct_metadata.mark & ct.mask->ct_mark))
610 /* post_ct with ct clear action will not match the
611 * ct status when nft is nat entry.
613 if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
621 nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
625 /* This field must always be present */
626 key_size = sizeof(struct nfp_flower_meta_tci);
627 map[FLOW_PAY_META_TCI] = 0;
629 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
630 map[FLOW_PAY_EXT_META] = key_size;
631 key_size += sizeof(struct nfp_flower_ext_meta);
633 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
634 map[FLOW_PAY_INPORT] = key_size;
635 key_size += sizeof(struct nfp_flower_in_port);
637 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
638 map[FLOW_PAY_MAC_MPLS] = key_size;
639 key_size += sizeof(struct nfp_flower_mac_mpls);
641 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
642 map[FLOW_PAY_L4] = key_size;
643 key_size += sizeof(struct nfp_flower_tp_ports);
645 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
646 map[FLOW_PAY_IPV4] = key_size;
647 key_size += sizeof(struct nfp_flower_ipv4);
649 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
650 map[FLOW_PAY_IPV6] = key_size;
651 key_size += sizeof(struct nfp_flower_ipv6);
654 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
655 map[FLOW_PAY_QINQ] = key_size;
656 key_size += sizeof(struct nfp_flower_vlan);
659 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
660 map[FLOW_PAY_GRE] = key_size;
661 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
662 key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
664 key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
667 if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
668 (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
669 map[FLOW_PAY_UDP_TUN] = key_size;
670 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
671 key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
673 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
676 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
677 map[FLOW_PAY_GENEVE_OPT] = key_size;
678 key_size += sizeof(struct nfp_flower_geneve_options);
684 /* get the csum flag according the ip proto and mangle action. */
685 static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
687 if (a_in->id != FLOW_ACTION_MANGLE)
690 switch (a_in->mangle.htype) {
691 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
692 *csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
693 if (ip_proto == IPPROTO_TCP)
694 *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
695 else if (ip_proto == IPPROTO_UDP)
696 *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
698 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
699 *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
701 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
702 *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
709 static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
710 struct nfp_flower_priv *priv,
711 struct net_device *netdev,
712 struct nfp_fl_payload *flow_pay,
715 enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
716 struct flow_action_entry *a_in;
717 int i, j, id, num_actions = 0;
718 struct flow_rule *a_rule;
719 int err = 0, offset = 0;
721 for (i = 0; i < num_rules; i++)
722 num_actions += rules[i]->action.num_entries;
724 /* Add one action to make sure there is enough room to add an checksum action
727 a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
731 /* post_ct entry have one action at least. */
732 if (rules[num_rules - 1]->action.num_entries != 0)
733 tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;
735 /* Actions need a BASIC dissector. */
736 a_rule->match = rules[0]->match;
739 for (j = 0; j < num_rules; j++) {
740 u32 csum_updated = 0;
743 if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
744 struct flow_match_basic match;
746 /* ip_proto is the only field that is needed in later compile_action,
747 * needed to set the correct checksum flags. It doesn't really matter
748 * which input rule's ip_proto field we take as the earlier merge checks
749 * would have made sure that they don't conflict. We do not know which
750 * of the subflows would have the ip_proto filled in, so we need to iterate
751 * through the subflows and assign the proper subflow to a_rule
753 flow_rule_match_basic(rules[j], &match);
754 if (match.mask->ip_proto) {
755 a_rule->match = rules[j]->match;
756 ip_proto = match.key->ip_proto;
760 for (i = 0; i < rules[j]->action.num_entries; i++) {
761 a_in = &rules[j]->action.entries[i];
764 /* Ignore CT related actions as these would already have
765 * been taken care of by previous checks, and we do not send
766 * any CT actions to the firmware.
770 case FLOW_ACTION_GOTO:
771 case FLOW_ACTION_CT_METADATA:
774 /* nft entry is generated by tc ct, which mangle action do not care
775 * the stats, inherit the post entry stats to meet the
776 * flow_action_hw_stats_check.
777 * nft entry flow rules are at odd array index.
780 if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
781 a_in->hw_stats = tmp_stats;
782 nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
784 memcpy(&a_rule->action.entries[offset++],
785 a_in, sizeof(struct flow_action_entry));
789 /* nft entry have mangle action, but do not have checksum action when do NAT,
790 * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
791 * to meet csum action check.
794 struct flow_action_entry *csum_action;
796 csum_action = &a_rule->action.entries[offset++];
797 csum_action->id = FLOW_ACTION_CSUM;
798 csum_action->csum_flags = csum_updated;
799 csum_action->hw_stats = tmp_stats;
803 /* Some actions would have been ignored, so update the num_entries field */
804 a_rule->action.num_entries = offset;
805 err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
811 static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
813 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
814 struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
815 struct flow_rule *rules[NFP_MAX_ENTRY_RULES];
816 struct nfp_fl_ct_flow_entry *pre_ct_entry;
817 struct nfp_fl_key_ls key_layer, tmp_layer;
818 struct nfp_flower_priv *priv = zt->priv;
819 u16 key_map[_FLOW_PAY_LAYERS_MAX];
820 struct nfp_fl_payload *flow_pay;
821 u8 *key, *msk, *kdata, *mdata;
822 struct nfp_port *port = NULL;
823 int num_rules, err, i, j = 0;
824 struct net_device *netdev;
829 netdev = m_entry->netdev;
830 qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
832 pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
833 num_rules = pre_ct_entry->num_prev_m_entries * 2 + _CT_TYPE_MAX;
835 for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++) {
836 rules[j++] = pre_ct_entry->prev_m_entries[i]->tc_m_parent->pre_ct_parent->rule;
837 rules[j++] = pre_ct_entry->prev_m_entries[i]->nft_parent->rule;
840 rules[j++] = m_entry->tc_m_parent->pre_ct_parent->rule;
841 rules[j++] = m_entry->nft_parent->rule;
842 rules[j++] = m_entry->tc_m_parent->post_ct_parent->rule;
844 memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
845 memset(&key_map, 0, sizeof(key_map));
847 /* Calculate the resultant key layer and size for offload */
848 for (i = 0; i < num_rules; i++) {
849 err = nfp_flower_calculate_key_layers(priv->app,
851 &tmp_layer, rules[i],
856 key_layer.key_layer |= tmp_layer.key_layer;
857 key_layer.key_layer_two |= tmp_layer.key_layer_two;
859 key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
861 flow_pay = nfp_flower_allocate_new(&key_layer);
865 memset(flow_pay->unmasked_data, 0, key_layer.key_size);
866 memset(flow_pay->mask_data, 0, key_layer.key_size);
868 kdata = flow_pay->unmasked_data;
869 mdata = flow_pay->mask_data;
871 offset = key_map[FLOW_PAY_META_TCI];
872 key = kdata + offset;
873 msk = mdata + offset;
874 nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
875 (struct nfp_flower_meta_tci *)msk,
876 key_layer.key_layer);
878 if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
879 offset = key_map[FLOW_PAY_EXT_META];
880 key = kdata + offset;
881 msk = mdata + offset;
882 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
883 key_layer.key_layer_two);
884 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
885 key_layer.key_layer_two);
888 /* Using in_port from the -trk rule. The tc merge checks should already
889 * be checking that the ingress netdevs are the same
891 port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
892 offset = key_map[FLOW_PAY_INPORT];
893 key = kdata + offset;
894 msk = mdata + offset;
895 err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
896 port_id, false, tun_type, NULL);
899 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
900 port_id, true, tun_type, NULL);
904 /* This following part works on the assumption that previous checks has
905 * already filtered out flows that has different values for the different
906 * layers. Here we iterate through all three rules and merge their respective
907 * masked value(cared bits), basic method is:
908 * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
909 * final_mask = r1_mask | r2_mask | r3_mask
910 * If none of the rules contains a match that is also fine, that simply means
911 * that the layer is not present.
914 for (i = 0; i < num_rules; i++) {
915 offset = key_map[FLOW_PAY_META_TCI];
916 key = kdata + offset;
917 msk = mdata + offset;
918 nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
919 (struct nfp_flower_meta_tci *)msk,
924 if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
925 offset = key_map[FLOW_PAY_MAC_MPLS];
926 key = kdata + offset;
927 msk = mdata + offset;
928 for (i = 0; i < num_rules; i++) {
929 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
930 (struct nfp_flower_mac_mpls *)msk,
932 err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
933 (struct nfp_flower_mac_mpls *)msk,
940 if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
941 offset = key_map[FLOW_PAY_IPV4];
942 key = kdata + offset;
943 msk = mdata + offset;
944 for (i = 0; i < num_rules; i++) {
945 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
946 (struct nfp_flower_ipv4 *)msk,
951 if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
952 offset = key_map[FLOW_PAY_IPV6];
953 key = kdata + offset;
954 msk = mdata + offset;
955 for (i = 0; i < num_rules; i++) {
956 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
957 (struct nfp_flower_ipv6 *)msk,
962 if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
963 offset = key_map[FLOW_PAY_L4];
964 key = kdata + offset;
965 msk = mdata + offset;
966 for (i = 0; i < num_rules; i++) {
967 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
968 (struct nfp_flower_tp_ports *)msk,
973 if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
974 offset = key_map[FLOW_PAY_QINQ];
975 key = kdata + offset;
976 msk = mdata + offset;
977 for (i = 0; i < num_rules; i++) {
978 nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
979 (struct nfp_flower_vlan *)msk,
984 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
985 offset = key_map[FLOW_PAY_GRE];
986 key = kdata + offset;
987 msk = mdata + offset;
988 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
989 struct nfp_flower_ipv6_gre_tun *gre_match;
990 struct nfp_ipv6_addr_entry *entry;
991 struct in6_addr *dst;
993 for (i = 0; i < num_rules; i++) {
994 nfp_flower_compile_ipv6_gre_tun((void *)key,
995 (void *)msk, rules[i]);
997 gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
998 dst = &gre_match->ipv6.dst;
1000 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
1003 goto ct_offload_err;
1006 flow_pay->nfp_tun_ipv6 = entry;
1010 for (i = 0; i < num_rules; i++) {
1011 nfp_flower_compile_ipv4_gre_tun((void *)key,
1012 (void *)msk, rules[i]);
1014 dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
1016 /* Store the tunnel destination in the rule data.
1017 * This must be present and be an exact match.
1019 flow_pay->nfp_tun_ipv4_addr = dst;
1020 nfp_tunnel_add_ipv4_off(priv->app, dst);
1024 if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
1025 key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
1026 offset = key_map[FLOW_PAY_UDP_TUN];
1027 key = kdata + offset;
1028 msk = mdata + offset;
1029 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
1030 struct nfp_flower_ipv6_udp_tun *udp_match;
1031 struct nfp_ipv6_addr_entry *entry;
1032 struct in6_addr *dst;
1034 for (i = 0; i < num_rules; i++) {
1035 nfp_flower_compile_ipv6_udp_tun((void *)key,
1036 (void *)msk, rules[i]);
1038 udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
1039 dst = &udp_match->ipv6.dst;
1041 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
1044 goto ct_offload_err;
1047 flow_pay->nfp_tun_ipv6 = entry;
1051 for (i = 0; i < num_rules; i++) {
1052 nfp_flower_compile_ipv4_udp_tun((void *)key,
1053 (void *)msk, rules[i]);
1055 dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
1057 /* Store the tunnel destination in the rule data.
1058 * This must be present and be an exact match.
1060 flow_pay->nfp_tun_ipv4_addr = dst;
1061 nfp_tunnel_add_ipv4_off(priv->app, dst);
1064 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
1065 offset = key_map[FLOW_PAY_GENEVE_OPT];
1066 key = kdata + offset;
1067 msk = mdata + offset;
1068 for (i = 0; i < num_rules; i++)
1069 nfp_flower_compile_geneve_opt(key, msk, rules[i]);
1073 /* Merge actions into flow_pay */
1074 err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
1076 goto ct_offload_err;
1078 /* Use the pointer address as the cookie, but set the last bit to 1.
1079 * This is to avoid the 'is_merge_flow' check from detecting this as
1080 * an already merged flow. This works since address alignment means
1081 * that the last bit for pointer addresses will be 0.
1083 flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
1084 err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
1085 flow_pay, netdev, NULL);
1087 goto ct_offload_err;
1089 if (nfp_netdev_is_nfp_repr(netdev))
1090 port = nfp_port_from_netdev(netdev);
1092 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1093 nfp_flower_table_params);
1095 goto ct_release_offload_meta_err;
1097 err = nfp_flower_xmit_flow(priv->app, flow_pay,
1098 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1100 goto ct_remove_rhash_err;
1102 m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
1103 m_entry->flow_pay = flow_pay;
1106 port->tc_offload_cnt++;
1110 ct_remove_rhash_err:
1111 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1113 nfp_flower_table_params));
1114 ct_release_offload_meta_err:
1115 nfp_modify_flow_metadata(priv->app, flow_pay);
1117 if (flow_pay->nfp_tun_ipv4_addr)
1118 nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
1119 if (flow_pay->nfp_tun_ipv6)
1120 nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
1121 kfree(flow_pay->action_data);
1122 kfree(flow_pay->mask_data);
1123 kfree(flow_pay->unmasked_data);
1128 static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
1129 struct net_device *netdev)
1131 struct nfp_flower_priv *priv = app->priv;
1132 struct nfp_fl_payload *flow_pay;
1133 struct nfp_port *port = NULL;
1136 if (nfp_netdev_is_nfp_repr(netdev))
1137 port = nfp_port_from_netdev(netdev);
1139 flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
1143 err = nfp_modify_flow_metadata(app, flow_pay);
1145 goto err_free_merge_flow;
1147 if (flow_pay->nfp_tun_ipv4_addr)
1148 nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
1150 if (flow_pay->nfp_tun_ipv6)
1151 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1153 if (!flow_pay->in_hw) {
1155 goto err_free_merge_flow;
1158 err = nfp_flower_xmit_flow(app, flow_pay,
1159 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1161 err_free_merge_flow:
1162 nfp_flower_del_linked_merge_flows(app, flow_pay);
1164 port->tc_offload_cnt--;
1165 kfree(flow_pay->action_data);
1166 kfree(flow_pay->mask_data);
1167 kfree(flow_pay->unmasked_data);
1168 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1170 nfp_flower_table_params));
1171 kfree_rcu(flow_pay, rcu);
1175 static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
1176 struct nfp_fl_ct_flow_entry *nft_entry,
1177 struct nfp_fl_ct_tc_merge *tc_m_entry)
1179 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
1180 struct nfp_fl_nft_tc_merge *nft_m_entry;
1181 unsigned long new_cookie[3];
1184 pre_ct_entry = tc_m_entry->pre_ct_parent;
1185 post_ct_entry = tc_m_entry->post_ct_parent;
1187 err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
1191 /* Check that the two tc flows are also compatible with
1192 * the nft entry. No need to check the pre_ct and post_ct
1193 * entries as that was already done during pre_merge.
1194 * The nft entry does not have a chain populated, so
1197 err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
1200 err = nfp_ct_merge_check(nft_entry, post_ct_entry);
1203 err = nfp_ct_check_meta(post_ct_entry, nft_entry);
1207 if (pre_ct_entry->num_prev_m_entries > 0) {
1208 err = nfp_ct_merge_extra_check(nft_entry, tc_m_entry);
1213 /* Combine tc_merge and nft cookies for this cookie. */
1214 new_cookie[0] = tc_m_entry->cookie[0];
1215 new_cookie[1] = tc_m_entry->cookie[1];
1216 new_cookie[2] = nft_entry->cookie;
1217 nft_m_entry = get_hashentry(&zt->nft_merge_tb,
1219 nfp_nft_ct_merge_params,
1220 sizeof(*nft_m_entry));
1222 if (IS_ERR(nft_m_entry))
1223 return PTR_ERR(nft_m_entry);
1225 /* nft_m_entry already present, not merging again */
1226 if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
1229 memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
1230 nft_m_entry->zt = zt;
1231 nft_m_entry->tc_m_parent = tc_m_entry;
1232 nft_m_entry->nft_parent = nft_entry;
1233 nft_m_entry->tc_flower_cookie = 0;
1234 /* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
1235 * it only combined them if the netdevs were the same, so can use any of them.
1237 nft_m_entry->netdev = pre_ct_entry->netdev;
1239 /* Add this entry to the tc_m_list and nft_flow lists */
1240 list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
1241 list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
1243 err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
1244 nfp_nft_ct_merge_params);
1246 goto err_nft_ct_merge_insert;
1248 zt->nft_merge_count++;
1250 if (post_ct_entry->goto_chain_index > 0)
1251 return nfp_fl_create_new_pre_ct(nft_m_entry);
1253 /* Generate offload structure and send to nfp */
1254 err = nfp_fl_ct_add_offload(nft_m_entry);
1256 goto err_nft_ct_offload;
1261 nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
1262 nft_m_entry->netdev);
1263 err_nft_ct_merge_insert:
1264 list_del(&nft_m_entry->tc_merge_list);
1265 list_del(&nft_m_entry->nft_flow_list);
1270 static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
1271 struct nfp_fl_ct_flow_entry *ct_entry1,
1272 struct nfp_fl_ct_flow_entry *ct_entry2)
1274 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
1275 struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
1276 struct nfp_fl_ct_tc_merge *m_entry;
1277 unsigned long new_cookie[2];
1280 if (ct_entry1->type == CT_TYPE_PRE_CT) {
1281 pre_ct_entry = ct_entry1;
1282 post_ct_entry = ct_entry2;
1284 post_ct_entry = ct_entry1;
1285 pre_ct_entry = ct_entry2;
1288 /* Checks that the chain_index of the filter matches the
1289 * chain_index of the GOTO action.
1291 if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index)
1294 err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
1298 new_cookie[0] = pre_ct_entry->cookie;
1299 new_cookie[1] = post_ct_entry->cookie;
1300 m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
1301 nfp_tc_ct_merge_params, sizeof(*m_entry));
1302 if (IS_ERR(m_entry))
1303 return PTR_ERR(m_entry);
1305 /* m_entry already present, not merging again */
1306 if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
1309 memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
1311 m_entry->post_ct_parent = post_ct_entry;
1312 m_entry->pre_ct_parent = pre_ct_entry;
1314 /* Add this entry to the pre_ct and post_ct lists */
1315 list_add(&m_entry->post_ct_list, &post_ct_entry->children);
1316 list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
1317 INIT_LIST_HEAD(&m_entry->children);
1319 err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
1320 nfp_tc_ct_merge_params);
1322 goto err_ct_tc_merge_insert;
1323 zt->tc_merge_count++;
1325 /* Merge with existing nft flows */
1326 list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
1328 nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
1333 err_ct_tc_merge_insert:
1334 list_del(&m_entry->post_ct_list);
1335 list_del(&m_entry->pre_ct_list);
1341 nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
1342 u16 zone, bool wildcarded)
1344 struct nfp_fl_ct_zone_entry *zt;
1347 if (wildcarded && priv->ct_zone_wc)
1348 return priv->ct_zone_wc;
1351 zt = get_hashentry(&priv->ct_zone_table, &zone,
1352 nfp_zone_table_params, sizeof(*zt));
1354 /* If priv is set this is an existing entry, just return it */
1355 if (IS_ERR(zt) || zt->priv)
1358 zt = kzalloc(sizeof(*zt), GFP_KERNEL);
1360 return ERR_PTR(-ENOMEM);
1367 /* init the various hash tables and lists */
1368 INIT_LIST_HEAD(&zt->pre_ct_list);
1369 INIT_LIST_HEAD(&zt->post_ct_list);
1370 INIT_LIST_HEAD(&zt->nft_flows_list);
1372 err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
1374 goto err_tc_merge_tb_init;
1376 err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
1378 goto err_nft_merge_tb_init;
1381 priv->ct_zone_wc = zt;
1383 err = rhashtable_insert_fast(&priv->ct_zone_table,
1385 nfp_zone_table_params);
1387 goto err_zone_insert;
1393 rhashtable_destroy(&zt->nft_merge_tb);
1394 err_nft_merge_tb_init:
1395 rhashtable_destroy(&zt->tc_merge_tb);
1396 err_tc_merge_tb_init:
1398 return ERR_PTR(err);
1401 static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
1403 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
1404 struct flow_match_meta match;
1406 flow_rule_match_meta(rule, &match);
1407 if (match.key->ingress_ifindex & match.mask->ingress_ifindex)
1408 return __dev_get_by_index(&init_net,
1409 match.key->ingress_ifindex);
1415 static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
1417 if (mangle_action->id != FLOW_ACTION_MANGLE)
1420 switch (mangle_action->mangle.htype) {
1421 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1422 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
1423 mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
1424 mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
1427 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1428 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1429 mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
1430 mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
1438 static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
1439 struct nfp_fl_ct_flow_entry *entry)
1442 case FLOW_ACTION_CT:
1443 if (act->ct.action == TCA_CT_ACT_NAT)
1444 entry->flags |= NFP_FL_ACTION_DO_NAT;
1447 case FLOW_ACTION_MANGLE:
1448 entry->flags |= NFP_FL_ACTION_DO_MANGLE;
1459 nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
1460 struct net_device *netdev,
1461 struct flow_cls_offload *flow,
1462 bool is_nft, struct netlink_ext_ack *extack)
1464 struct nf_flow_match *nft_match = NULL;
1465 struct nfp_fl_ct_flow_entry *entry;
1466 struct nfp_fl_ct_map_entry *map;
1467 struct flow_action_entry *act;
1470 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1472 return ERR_PTR(-ENOMEM);
1474 entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
1477 goto err_pre_ct_rule;
1480 /* nft flows gets destroyed after callback return, so need
1481 * to do a full copy instead of just a reference.
1484 nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
1487 goto err_pre_ct_act;
1489 memcpy(&nft_match->dissector, flow->rule->match.dissector,
1490 sizeof(nft_match->dissector));
1491 memcpy(&nft_match->mask, flow->rule->match.mask,
1492 sizeof(nft_match->mask));
1493 memcpy(&nft_match->key, flow->rule->match.key,
1494 sizeof(nft_match->key));
1495 entry->rule->match.dissector = &nft_match->dissector;
1496 entry->rule->match.mask = &nft_match->mask;
1497 entry->rule->match.key = &nft_match->key;
1500 netdev = get_netdev_from_rule(entry->rule);
1502 entry->rule->match.dissector = flow->rule->match.dissector;
1503 entry->rule->match.mask = flow->rule->match.mask;
1504 entry->rule->match.key = flow->rule->match.key;
1508 entry->netdev = netdev;
1509 entry->cookie = flow->cookie > 0 ? flow->cookie : (unsigned long)entry;
1510 entry->chain_index = flow->common.chain_index;
1511 entry->tun_offset = NFP_FL_CT_NO_TUN;
1513 /* Copy over action data. Unfortunately we do not get a handle to the
1514 * original tcf_action data, and the flow objects gets destroyed, so we
1515 * cannot just save a pointer to this either, so need to copy over the
1516 * data unfortunately.
1518 entry->rule->action.num_entries = flow->rule->action.num_entries;
1519 flow_action_for_each(i, act, &flow->rule->action) {
1520 struct flow_action_entry *new_act;
1522 new_act = &entry->rule->action.entries[i];
1523 memcpy(new_act, act, sizeof(struct flow_action_entry));
1524 /* nft entry mangle field is host byte order, need translate to
1525 * network byte order.
1528 nfp_nft_ct_translate_mangle_action(new_act);
1530 nfp_nft_ct_set_flow_flag(new_act, entry);
1531 /* Entunnel is a special case, need to allocate and copy
1534 if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
1535 struct ip_tunnel_info *tun = act->tunnel;
1536 size_t tun_size = sizeof(*tun) + tun->options_len;
1538 new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
1539 if (!new_act->tunnel) {
1541 goto err_pre_ct_tun_cp;
1543 entry->tun_offset = i;
1547 INIT_LIST_HEAD(&entry->children);
1549 if (flow->cookie == 0)
1552 /* Now add a ct map entry to flower-priv */
1553 map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
1554 nfp_ct_map_params, sizeof(*map));
1556 NL_SET_ERR_MSG_MOD(extack,
1557 "offload error: ct map entry creation failed");
1559 goto err_ct_flow_insert;
1561 map->cookie = flow->cookie;
1562 map->ct_entry = entry;
1563 err = rhashtable_insert_fast(&zt->priv->ct_map_table,
1567 NL_SET_ERR_MSG_MOD(extack,
1568 "offload error: ct map entry table add failed");
1569 goto err_map_insert;
1577 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1578 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1585 return ERR_PTR(err);
1588 static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
1590 struct nfp_fl_ct_zone_entry *zt;
1595 /* Flow is in HW, need to delete */
1596 if (m_entry->tc_flower_cookie) {
1597 err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
1603 WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
1604 &m_entry->hash_node,
1605 nfp_nft_ct_merge_params));
1606 zt->nft_merge_count--;
1607 list_del(&m_entry->tc_merge_list);
1608 list_del(&m_entry->nft_flow_list);
1610 if (m_entry->next_pre_ct_entry) {
1611 struct nfp_fl_ct_map_entry pre_ct_map_ent;
1613 pre_ct_map_ent.ct_entry = m_entry->next_pre_ct_entry;
1614 pre_ct_map_ent.cookie = 0;
1615 nfp_fl_ct_del_flow(&pre_ct_map_ent);
1621 static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
1623 struct nfp_fl_nft_tc_merge *m_entry, *tmp;
1625 /* These post entries are parts of two lists, one is a list of nft_entries
1626 * and the other is of from a list of tc_merge structures. Iterate
1627 * through the relevant list and cleanup the entries.
1631 /* Need to iterate through list of nft_flow entries */
1632 struct nfp_fl_ct_flow_entry *ct_entry = entry;
1634 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1636 cleanup_nft_merge_entry(m_entry);
1639 /* Need to iterate through list of tc_merged_flow entries */
1640 struct nfp_fl_ct_tc_merge *ct_entry = entry;
1642 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1644 cleanup_nft_merge_entry(m_entry);
1649 static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
1651 struct nfp_fl_ct_zone_entry *zt;
1655 err = rhashtable_remove_fast(&zt->tc_merge_tb,
1657 nfp_tc_ct_merge_params);
1659 pr_warn("WARNING: could not remove merge_entry from hashtable\n");
1660 zt->tc_merge_count--;
1661 list_del(&m_ent->post_ct_list);
1662 list_del(&m_ent->pre_ct_list);
1664 if (!list_empty(&m_ent->children))
1665 nfp_free_nft_merge_children(m_ent, false);
1669 static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
1671 struct nfp_fl_ct_tc_merge *m_ent, *tmp;
1673 switch (entry->type) {
1674 case CT_TYPE_PRE_CT:
1675 list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
1676 nfp_del_tc_merge_entry(m_ent);
1679 case CT_TYPE_POST_CT:
1680 list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
1681 nfp_del_tc_merge_entry(m_ent);
1689 void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
1691 list_del(&entry->list_node);
1693 if (!list_empty(&entry->children)) {
1694 if (entry->type == CT_TYPE_NFT)
1695 nfp_free_nft_merge_children(entry, true);
1697 nfp_free_tc_merge_children(entry);
1700 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1701 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1703 if (entry->type == CT_TYPE_NFT) {
1704 struct nf_flow_match *nft_match;
1706 nft_match = container_of(entry->rule->match.dissector,
1707 struct nf_flow_match, dissector);
1715 static struct flow_action_entry *get_flow_act_ct(struct flow_rule *rule)
1717 struct flow_action_entry *act;
1720 /* More than one ct action may be present in a flow rule,
1721 * Return the first one that is not a CT clear action
1723 flow_action_for_each(i, act, &rule->action) {
1724 if (act->id == FLOW_ACTION_CT && act->ct.action != TCA_CT_ACT_CLEAR)
1731 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
1732 enum flow_action_id act_id)
1734 struct flow_action_entry *act = NULL;
1737 flow_action_for_each(i, act, &rule->action) {
1738 if (act->id == act_id)
1745 nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
1746 struct nfp_fl_ct_zone_entry *zt_src,
1747 struct nfp_fl_ct_zone_entry *zt_dst)
1749 struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
1750 struct list_head *ct_list;
1752 if (ct_entry1->type == CT_TYPE_PRE_CT)
1753 ct_list = &zt_src->post_ct_list;
1754 else if (ct_entry1->type == CT_TYPE_POST_CT)
1755 ct_list = &zt_src->pre_ct_list;
1759 list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
1761 nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
1766 nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
1767 struct nfp_fl_ct_zone_entry *zt)
1769 struct nfp_fl_ct_tc_merge *tc_merge_entry;
1770 struct rhashtable_iter iter;
1772 rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
1773 rhashtable_walk_start(&iter);
1774 while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
1775 if (IS_ERR(tc_merge_entry))
1777 rhashtable_walk_stop(&iter);
1778 nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
1779 rhashtable_walk_start(&iter);
1781 rhashtable_walk_stop(&iter);
1782 rhashtable_walk_exit(&iter);
1785 int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
1786 struct net_device *netdev,
1787 struct flow_cls_offload *flow,
1788 struct netlink_ext_ack *extack,
1789 struct nfp_fl_nft_tc_merge *m_entry)
1791 struct flow_action_entry *ct_act, *ct_goto;
1792 struct nfp_fl_ct_flow_entry *ct_entry;
1793 struct nfp_fl_ct_zone_entry *zt;
1796 ct_act = get_flow_act_ct(flow->rule);
1798 NL_SET_ERR_MSG_MOD(extack,
1799 "unsupported offload: Conntrack action empty in conntrack offload");
1803 ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1805 NL_SET_ERR_MSG_MOD(extack,
1806 "unsupported offload: Conntrack requires ACTION_GOTO");
1810 zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
1812 NL_SET_ERR_MSG_MOD(extack,
1813 "offload error: Could not create zone table entry");
1818 zt->nft = ct_act->ct.flow_table;
1819 err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
1821 NL_SET_ERR_MSG_MOD(extack,
1822 "offload error: Could not register nft_callback");
1827 /* Add entry to pre_ct_list */
1828 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1829 if (IS_ERR(ct_entry))
1830 return PTR_ERR(ct_entry);
1831 ct_entry->type = CT_TYPE_PRE_CT;
1832 ct_entry->chain_index = flow->common.chain_index;
1833 ct_entry->goto_chain_index = ct_goto->chain_index;
1836 struct nfp_fl_ct_flow_entry *pre_ct_entry;
1839 pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
1840 for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++)
1841 ct_entry->prev_m_entries[i] = pre_ct_entry->prev_m_entries[i];
1842 ct_entry->prev_m_entries[i++] = m_entry;
1843 ct_entry->num_prev_m_entries = i;
1845 m_entry->next_pre_ct_entry = ct_entry;
1848 list_add(&ct_entry->list_node, &zt->pre_ct_list);
1851 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1853 /* Need to check and merge with tables in the wc_zone as well */
1854 if (priv->ct_zone_wc)
1855 nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
1860 int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
1861 struct net_device *netdev,
1862 struct flow_cls_offload *flow,
1863 struct netlink_ext_ack *extack)
1865 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1866 struct nfp_fl_ct_flow_entry *ct_entry;
1867 struct nfp_fl_ct_zone_entry *zt;
1868 bool wildcarded = false;
1869 struct flow_match_ct ct;
1870 struct flow_action_entry *ct_goto;
1872 flow_rule_match_ct(rule, &ct);
1873 if (!ct.mask->ct_zone) {
1875 } else if (ct.mask->ct_zone != U16_MAX) {
1876 NL_SET_ERR_MSG_MOD(extack,
1877 "unsupported offload: partially wildcarded ct_zone is not supported");
1881 zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
1883 NL_SET_ERR_MSG_MOD(extack,
1884 "offload error: Could not create zone table entry");
1888 /* Add entry to post_ct_list */
1889 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1890 if (IS_ERR(ct_entry))
1891 return PTR_ERR(ct_entry);
1893 ct_entry->type = CT_TYPE_POST_CT;
1894 ct_entry->chain_index = flow->common.chain_index;
1895 ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1896 ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0;
1897 list_add(&ct_entry->list_node, &zt->post_ct_list);
1898 zt->post_ct_count++;
1901 /* Iterate through all zone tables if not empty, look for merges with
1902 * pre_ct entries and merge them.
1904 struct rhashtable_iter iter;
1905 struct nfp_fl_ct_zone_entry *zone_table;
1907 rhashtable_walk_enter(&priv->ct_zone_table, &iter);
1908 rhashtable_walk_start(&iter);
1909 while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
1910 if (IS_ERR(zone_table))
1912 rhashtable_walk_stop(&iter);
1913 nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
1914 rhashtable_walk_start(&iter);
1916 rhashtable_walk_stop(&iter);
1917 rhashtable_walk_exit(&iter);
1919 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1925 int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry)
1927 struct nfp_fl_ct_flow_entry *pre_ct_entry, *post_ct_entry;
1928 struct flow_cls_offload new_pre_ct_flow;
1931 pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
1932 if (pre_ct_entry->num_prev_m_entries >= NFP_MAX_RECIRC_CT_ZONES - 1)
1935 post_ct_entry = m_entry->tc_m_parent->post_ct_parent;
1936 memset(&new_pre_ct_flow, 0, sizeof(struct flow_cls_offload));
1937 new_pre_ct_flow.rule = post_ct_entry->rule;
1938 new_pre_ct_flow.common.chain_index = post_ct_entry->chain_index;
1940 err = nfp_fl_ct_handle_pre_ct(pre_ct_entry->zt->priv,
1941 pre_ct_entry->netdev,
1942 &new_pre_ct_flow, NULL,
1948 nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
1949 enum ct_entry_type type, u64 *m_pkts,
1950 u64 *m_bytes, u64 *m_used)
1952 struct nfp_flower_priv *priv = nft_merge->zt->priv;
1953 struct nfp_fl_payload *nfp_flow;
1956 nfp_flow = nft_merge->flow_pay;
1960 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1961 *m_pkts += priv->stats[ctx_id].pkts;
1962 *m_bytes += priv->stats[ctx_id].bytes;
1963 *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
1965 /* If request is for a sub_flow which is part of a tunnel merged
1966 * flow then update stats from tunnel merged flows first.
1968 if (!list_empty(&nfp_flow->linked_flows))
1969 nfp_flower_update_merge_stats(priv->app, nfp_flow);
1971 if (type != CT_TYPE_NFT) {
1972 /* Update nft cached stats */
1973 flow_stats_update(&nft_merge->nft_parent->stats,
1974 priv->stats[ctx_id].bytes,
1975 priv->stats[ctx_id].pkts,
1976 0, priv->stats[ctx_id].used,
1977 FLOW_ACTION_HW_STATS_DELAYED);
1979 /* Update pre_ct cached stats */
1980 flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
1981 priv->stats[ctx_id].bytes,
1982 priv->stats[ctx_id].pkts,
1983 0, priv->stats[ctx_id].used,
1984 FLOW_ACTION_HW_STATS_DELAYED);
1985 /* Update post_ct cached stats */
1986 flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
1987 priv->stats[ctx_id].bytes,
1988 priv->stats[ctx_id].pkts,
1989 0, priv->stats[ctx_id].used,
1990 FLOW_ACTION_HW_STATS_DELAYED);
1993 /* Update previous pre_ct/post_ct/nft flow stats */
1994 if (nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries > 0) {
1995 struct nfp_fl_nft_tc_merge *tmp_nft_merge;
1998 for (i = 0; i < nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries; i++) {
1999 tmp_nft_merge = nft_merge->tc_m_parent->pre_ct_parent->prev_m_entries[i];
2000 flow_stats_update(&tmp_nft_merge->tc_m_parent->pre_ct_parent->stats,
2001 priv->stats[ctx_id].bytes,
2002 priv->stats[ctx_id].pkts,
2003 0, priv->stats[ctx_id].used,
2004 FLOW_ACTION_HW_STATS_DELAYED);
2005 flow_stats_update(&tmp_nft_merge->tc_m_parent->post_ct_parent->stats,
2006 priv->stats[ctx_id].bytes,
2007 priv->stats[ctx_id].pkts,
2008 0, priv->stats[ctx_id].used,
2009 FLOW_ACTION_HW_STATS_DELAYED);
2010 flow_stats_update(&tmp_nft_merge->nft_parent->stats,
2011 priv->stats[ctx_id].bytes,
2012 priv->stats[ctx_id].pkts,
2013 0, priv->stats[ctx_id].used,
2014 FLOW_ACTION_HW_STATS_DELAYED);
2018 /* Reset stats from the nfp */
2019 priv->stats[ctx_id].pkts = 0;
2020 priv->stats[ctx_id].bytes = 0;
2023 int nfp_fl_ct_stats(struct flow_cls_offload *flow,
2024 struct nfp_fl_ct_map_entry *ct_map_ent)
2026 struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
2027 struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
2028 struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
2030 u64 pkts = 0, bytes = 0, used = 0;
2031 u64 m_pkts, m_bytes, m_used;
2033 spin_lock_bh(&ct_entry->zt->priv->stats_lock);
2035 if (ct_entry->type == CT_TYPE_PRE_CT) {
2036 /* Iterate tc_merge entries associated with this flow */
2037 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
2042 /* Iterate nft_merge entries associated with this tc_merge flow */
2043 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
2045 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
2046 &m_pkts, &m_bytes, &m_used);
2050 used = max_t(u64, used, m_used);
2051 /* Update post_ct partner */
2052 flow_stats_update(&tc_merge->post_ct_parent->stats,
2053 m_bytes, m_pkts, 0, m_used,
2054 FLOW_ACTION_HW_STATS_DELAYED);
2056 } else if (ct_entry->type == CT_TYPE_POST_CT) {
2057 /* Iterate tc_merge entries associated with this flow */
2058 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
2063 /* Iterate nft_merge entries associated with this tc_merge flow */
2064 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
2066 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
2067 &m_pkts, &m_bytes, &m_used);
2071 used = max_t(u64, used, m_used);
2072 /* Update pre_ct partner */
2073 flow_stats_update(&tc_merge->pre_ct_parent->stats,
2074 m_bytes, m_pkts, 0, m_used,
2075 FLOW_ACTION_HW_STATS_DELAYED);
2078 /* Iterate nft_merge entries associated with this nft flow */
2079 list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
2081 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
2082 &pkts, &bytes, &used);
2086 /* Add stats from this request to stats potentially cached by
2087 * previous requests.
2089 flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
2090 FLOW_ACTION_HW_STATS_DELAYED);
2091 /* Finally update the flow stats from the original stats request */
2092 flow_stats_update(&flow->stats, ct_entry->stats.bytes,
2093 ct_entry->stats.pkts, 0,
2094 ct_entry->stats.lastused,
2095 FLOW_ACTION_HW_STATS_DELAYED);
2096 /* Stats has been synced to original flow, can now clear
2099 ct_entry->stats.pkts = 0;
2100 ct_entry->stats.bytes = 0;
2101 spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
2107 nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
2109 struct flow_rule *flow_rule = flow->rule;
2110 struct flow_action *flow_action =
2112 struct flow_action_entry *act;
2115 flow_action_for_each(i, act, flow_action) {
2116 if (act->id == FLOW_ACTION_CT_METADATA) {
2117 enum ip_conntrack_info ctinfo =
2118 act->ct_metadata.cookie & NFCT_INFOMASK;
2120 return ctinfo != IP_CT_NEW;
2128 nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
2130 struct nfp_fl_ct_map_entry *ct_map_ent;
2131 struct nfp_fl_ct_flow_entry *ct_entry;
2132 struct netlink_ext_ack *extack = NULL;
2136 extack = flow->common.extack;
2137 switch (flow->command) {
2138 case FLOW_CLS_REPLACE:
2139 if (!nfp_fl_ct_offload_nft_supported(flow))
2142 /* Netfilter can request offload multiple times for the same
2143 * flow - protect against adding duplicates.
2145 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2148 ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
2149 if (IS_ERR(ct_entry))
2150 return PTR_ERR(ct_entry);
2151 ct_entry->type = CT_TYPE_NFT;
2152 list_add(&ct_entry->list_node, &zt->nft_flows_list);
2153 zt->nft_flows_count++;
2154 nfp_ct_merge_nft_with_tc(ct_entry, zt);
2157 case FLOW_CLS_DESTROY:
2158 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2160 return nfp_fl_ct_del_flow(ct_map_ent);
2161 case FLOW_CLS_STATS:
2162 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2165 return nfp_fl_ct_stats(flow, ct_map_ent);
2173 int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
2175 struct flow_cls_offload *flow = type_data;
2176 struct nfp_fl_ct_zone_entry *zt = cb_priv;
2177 int err = -EOPNOTSUPP;
2180 case TC_SETUP_CLSFLOWER:
2182 err = nfp_fl_ct_offload_nft_flow(zt, flow);
2192 nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
2194 struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
2195 struct nfp_fl_ct_map_entry *ct_map_ent;
2197 list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
2199 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
2202 nfp_fl_ct_del_flow(ct_map_ent);
2206 int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
2208 struct nfp_fl_ct_flow_entry *ct_entry;
2209 struct nfp_fl_ct_zone_entry *zt;
2210 struct rhashtable *m_table;
2215 zt = ct_map_ent->ct_entry->zt;
2216 ct_entry = ct_map_ent->ct_entry;
2217 m_table = &zt->priv->ct_map_table;
2219 switch (ct_entry->type) {
2220 case CT_TYPE_PRE_CT:
2222 if (ct_map_ent->cookie > 0)
2223 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2225 nfp_fl_ct_clean_flow_entry(ct_entry);
2226 if (ct_map_ent->cookie > 0)
2229 if (!zt->pre_ct_count) {
2231 nfp_fl_ct_clean_nft_entries(zt);
2234 case CT_TYPE_POST_CT:
2235 zt->post_ct_count--;
2236 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2238 nfp_fl_ct_clean_flow_entry(ct_entry);
2242 zt->nft_flows_count--;
2243 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2245 nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);