1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 TCPHDR_PSH | TCPHDR_URG)
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 (FLOW_DIS_IS_FRAGMENT | \
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 BIT(FLOW_DISSECTOR_KEY_IP))
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
57 #define NFP_FLOWER_MERGE_FIELDS \
58 (NFP_FLOWER_LAYER_PORT | \
59 NFP_FLOWER_LAYER_MAC | \
60 NFP_FLOWER_LAYER_TP | \
61 NFP_FLOWER_LAYER_IPV4 | \
62 NFP_FLOWER_LAYER_IPV6)
64 struct nfp_flower_merge_check {
68 struct nfp_flower_mac_mpls l2;
69 struct nfp_flower_tp_ports l4;
71 struct nfp_flower_ipv4 ipv4;
72 struct nfp_flower_ipv6 ipv6;
75 unsigned long vals[8];
80 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
83 u32 meta_len, key_len, mask_len, act_len, tot_len;
87 meta_len = sizeof(struct nfp_fl_rule_metadata);
88 key_len = nfp_flow->meta.key_len;
89 mask_len = nfp_flow->meta.mask_len;
90 act_len = nfp_flow->meta.act_len;
92 tot_len = meta_len + key_len + mask_len + act_len;
94 /* Convert to long words as firmware expects
95 * lengths in units of NFP_FL_LW_SIZ.
97 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
98 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
99 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
101 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
105 msg = nfp_flower_cmsg_get_data(skb);
106 memcpy(msg, &nfp_flow->meta, meta_len);
107 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
108 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
109 memcpy(&msg[meta_len + key_len + mask_len],
110 nfp_flow->action_data, act_len);
112 /* Convert back to bytes as software expects
113 * lengths in units of bytes.
115 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
116 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
117 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
119 nfp_ctrl_tx(app->ctrl, skb);
124 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
126 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
128 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
129 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
130 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
131 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
134 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
136 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
138 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
139 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
143 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
144 u32 *key_layer_two, int *key_size,
145 struct netlink_ext_ack *extack)
147 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) {
148 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
152 if (enc_opts->len > 0) {
153 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
154 *key_size += sizeof(struct nfp_flower_geneve_options);
161 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
162 struct flow_dissector_key_enc_opts *enc_op,
163 u32 *key_layer_two, u8 *key_layer, int *key_size,
164 struct nfp_flower_priv *priv,
165 enum nfp_flower_tun_type *tun_type,
166 struct netlink_ext_ack *extack)
170 switch (enc_ports->dst) {
171 case htons(IANA_VXLAN_UDP_PORT):
172 *tun_type = NFP_FL_TUNNEL_VXLAN;
173 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
174 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
177 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
181 case htons(GENEVE_UDP_PORT):
182 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
183 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
186 *tun_type = NFP_FL_TUNNEL_GENEVE;
187 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
188 *key_size += sizeof(struct nfp_flower_ext_meta);
189 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
190 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
194 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
195 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
198 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two,
204 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
212 nfp_flower_calculate_key_layers(struct nfp_app *app,
213 struct net_device *netdev,
214 struct nfp_fl_key_ls *ret_key_ls,
215 struct flow_cls_offload *flow,
216 enum nfp_flower_tun_type *tun_type,
217 struct netlink_ext_ack *extack)
219 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
220 struct flow_dissector *dissector = rule->match.dissector;
221 struct flow_match_basic basic = { NULL, NULL};
222 struct nfp_flower_priv *priv = app->priv;
228 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
229 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
233 /* If any tun dissector is used then the required set must be used. */
234 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
235 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
236 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
237 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
242 key_layer = NFP_FLOWER_LAYER_PORT;
243 key_size = sizeof(struct nfp_flower_meta_tci) +
244 sizeof(struct nfp_flower_in_port);
246 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
247 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
248 key_layer |= NFP_FLOWER_LAYER_MAC;
249 key_size += sizeof(struct nfp_flower_mac_mpls);
252 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
253 struct flow_match_vlan vlan;
255 flow_rule_match_vlan(rule, &vlan);
256 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
257 vlan.key->vlan_priority) {
258 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
263 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
264 struct flow_match_enc_opts enc_op = { NULL, NULL };
265 struct flow_match_ipv4_addrs ipv4_addrs;
266 struct flow_match_control enc_ctl;
267 struct flow_match_ports enc_ports;
269 flow_rule_match_enc_control(rule, &enc_ctl);
271 if (enc_ctl.mask->addr_type != 0xffff) {
272 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
275 if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
276 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported");
280 /* These fields are already verified as used. */
281 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
282 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
283 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
287 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
288 flow_rule_match_enc_opts(rule, &enc_op);
291 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
292 /* check if GRE, which has no enc_ports */
293 if (netif_is_gretap(netdev)) {
294 *tun_type = NFP_FL_TUNNEL_GRE;
295 key_layer |= NFP_FLOWER_LAYER_EXT_META;
296 key_size += sizeof(struct nfp_flower_ext_meta);
297 key_layer_two |= NFP_FLOWER_LAYER2_GRE;
299 sizeof(struct nfp_flower_ipv4_gre_tun);
302 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
306 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
310 flow_rule_match_enc_ports(rule, &enc_ports);
311 if (enc_ports.mask->dst != cpu_to_be16(~0)) {
312 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
316 err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
325 /* Ensure the ingress netdev matches the expected
328 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
329 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
335 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
336 flow_rule_match_basic(rule, &basic);
338 if (basic.mask && basic.mask->n_proto) {
339 /* Ethernet type is present in the key. */
340 switch (basic.key->n_proto) {
341 case cpu_to_be16(ETH_P_IP):
342 key_layer |= NFP_FLOWER_LAYER_IPV4;
343 key_size += sizeof(struct nfp_flower_ipv4);
346 case cpu_to_be16(ETH_P_IPV6):
347 key_layer |= NFP_FLOWER_LAYER_IPV6;
348 key_size += sizeof(struct nfp_flower_ipv6);
351 /* Currently we do not offload ARP
352 * because we rely on it to get to the host.
354 case cpu_to_be16(ETH_P_ARP):
355 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
358 case cpu_to_be16(ETH_P_MPLS_UC):
359 case cpu_to_be16(ETH_P_MPLS_MC):
360 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
361 key_layer |= NFP_FLOWER_LAYER_MAC;
362 key_size += sizeof(struct nfp_flower_mac_mpls);
366 /* Will be included in layer 2. */
367 case cpu_to_be16(ETH_P_8021Q):
371 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
374 } else if (nfp_flower_check_higher_than_mac(flow)) {
375 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
379 if (basic.mask && basic.mask->ip_proto) {
380 switch (basic.key->ip_proto) {
386 key_layer |= NFP_FLOWER_LAYER_TP;
387 key_size += sizeof(struct nfp_flower_tp_ports);
392 if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
393 nfp_flower_check_higher_than_l3(flow)) {
394 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
398 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
399 struct flow_match_tcp tcp;
402 flow_rule_match_tcp(rule, &tcp);
403 tcp_flags = be16_to_cpu(tcp.key->flags);
405 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
406 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
410 /* We only support PSH and URG flags when either
411 * FIN, SYN or RST is present as well.
413 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
414 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
415 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
419 /* We need to store TCP flags in the either the IPv4 or IPv6 key
420 * space, thus we need to ensure we include a IPv4/IPv6 key
421 * layer if we have not done so already.
424 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
428 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
429 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
430 switch (basic.key->n_proto) {
431 case cpu_to_be16(ETH_P_IP):
432 key_layer |= NFP_FLOWER_LAYER_IPV4;
433 key_size += sizeof(struct nfp_flower_ipv4);
436 case cpu_to_be16(ETH_P_IPV6):
437 key_layer |= NFP_FLOWER_LAYER_IPV6;
438 key_size += sizeof(struct nfp_flower_ipv6);
442 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
448 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
449 struct flow_match_control ctl;
451 flow_rule_match_control(rule, &ctl);
452 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
453 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
458 ret_key_ls->key_layer = key_layer;
459 ret_key_ls->key_layer_two = key_layer_two;
460 ret_key_ls->key_size = key_size;
465 static struct nfp_fl_payload *
466 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
468 struct nfp_fl_payload *flow_pay;
470 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
474 flow_pay->meta.key_len = key_layer->key_size;
475 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
476 if (!flow_pay->unmasked_data)
479 flow_pay->meta.mask_len = key_layer->key_size;
480 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
481 if (!flow_pay->mask_data)
482 goto err_free_unmasked;
484 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
485 if (!flow_pay->action_data)
488 flow_pay->nfp_tun_ipv4_addr = 0;
489 flow_pay->meta.flags = 0;
490 INIT_LIST_HEAD(&flow_pay->linked_flows);
491 flow_pay->in_hw = false;
492 flow_pay->pre_tun_rule.dev = NULL;
497 kfree(flow_pay->mask_data);
499 kfree(flow_pay->unmasked_data);
506 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
507 struct nfp_flower_merge_check *merge,
508 u8 *last_act_id, int *act_out)
510 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
511 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
512 struct nfp_fl_set_ip4_addrs *ipv4_add;
513 struct nfp_fl_set_ipv6_addr *ipv6_add;
514 struct nfp_fl_push_vlan *push_vlan;
515 struct nfp_fl_set_tport *tport;
516 struct nfp_fl_set_eth *eth;
517 struct nfp_fl_act_head *a;
518 unsigned int act_off = 0;
523 while (act_off < flow->meta.act_len) {
524 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
528 case NFP_FL_ACTION_OPCODE_OUTPUT:
532 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
533 push_vlan = (struct nfp_fl_push_vlan *)a;
534 if (push_vlan->vlan_tci)
535 merge->tci = cpu_to_be16(0xffff);
537 case NFP_FL_ACTION_OPCODE_POP_VLAN:
538 merge->tci = cpu_to_be16(0);
540 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
541 /* New tunnel header means l2 to l4 can be matched. */
542 eth_broadcast_addr(&merge->l2.mac_dst[0]);
543 eth_broadcast_addr(&merge->l2.mac_src[0]);
544 memset(&merge->l4, 0xff,
545 sizeof(struct nfp_flower_tp_ports));
546 memset(&merge->ipv4, 0xff,
547 sizeof(struct nfp_flower_ipv4));
549 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
550 eth = (struct nfp_fl_set_eth *)a;
551 for (i = 0; i < ETH_ALEN; i++)
552 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
553 for (i = 0; i < ETH_ALEN; i++)
554 merge->l2.mac_src[i] |=
555 eth->eth_addr_mask[ETH_ALEN + i];
557 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
558 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
559 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
560 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
562 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
563 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
564 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
565 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
567 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
568 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
569 for (i = 0; i < 4; i++)
570 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
571 ipv6_add->ipv6[i].mask;
573 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
574 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
575 for (i = 0; i < 4; i++)
576 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
577 ipv6_add->ipv6[i].mask;
579 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
580 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
581 merge->ipv6.ip_ext.ttl |=
582 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
583 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
584 merge->ipv6.ipv6_flow_label_exthdr |=
585 ipv6_tc_hl_fl->ipv6_label_mask;
587 case NFP_FL_ACTION_OPCODE_SET_UDP:
588 case NFP_FL_ACTION_OPCODE_SET_TCP:
589 tport = (struct nfp_fl_set_tport *)a;
590 ports = (u8 *)&merge->l4.port_src;
591 for (i = 0; i < 4; i++)
592 ports[i] |= tport->tp_port_mask[i];
594 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
595 case NFP_FL_ACTION_OPCODE_PRE_LAG:
596 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
602 act_off += a->len_lw << NFP_FL_LW_SIZ;
606 *last_act_id = act_id;
612 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
613 struct nfp_flower_merge_check *merge,
616 struct nfp_flower_meta_tci *meta_tci;
617 u8 *mask = flow->mask_data;
618 u8 key_layer, match_size;
620 memset(merge, 0, sizeof(struct nfp_flower_merge_check));
622 meta_tci = (struct nfp_flower_meta_tci *)mask;
623 key_layer = meta_tci->nfp_flow_key_layer;
625 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
628 merge->tci = meta_tci->tci;
629 mask += sizeof(struct nfp_flower_meta_tci);
631 if (key_layer & NFP_FLOWER_LAYER_EXT_META)
632 mask += sizeof(struct nfp_flower_ext_meta);
634 mask += sizeof(struct nfp_flower_in_port);
636 if (key_layer & NFP_FLOWER_LAYER_MAC) {
637 match_size = sizeof(struct nfp_flower_mac_mpls);
638 memcpy(&merge->l2, mask, match_size);
642 if (key_layer & NFP_FLOWER_LAYER_TP) {
643 match_size = sizeof(struct nfp_flower_tp_ports);
644 memcpy(&merge->l4, mask, match_size);
648 if (key_layer & NFP_FLOWER_LAYER_IPV4) {
649 match_size = sizeof(struct nfp_flower_ipv4);
650 memcpy(&merge->ipv4, mask, match_size);
653 if (key_layer & NFP_FLOWER_LAYER_IPV6) {
654 match_size = sizeof(struct nfp_flower_ipv6);
655 memcpy(&merge->ipv6, mask, match_size);
662 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
663 struct nfp_fl_payload *sub_flow2)
665 /* Two flows can be merged if sub_flow2 only matches on bits that are
666 * either matched by sub_flow1 or set by a sub_flow1 action. This
667 * ensures that every packet that hits sub_flow1 and recirculates is
668 * guaranteed to hit sub_flow2.
670 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
671 int err, act_out = 0;
674 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
679 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
684 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
685 &last_act_id, &act_out);
689 /* Must only be 1 output action and it must be the last in sequence. */
690 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
693 /* Reject merge if sub_flow2 matches on something that is not matched
694 * on or set in an action by sub_flow1.
696 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
697 sub_flow1_merge.vals,
698 sizeof(struct nfp_flower_merge_check) * 8);
706 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
709 unsigned int act_off = 0, act_len;
710 struct nfp_fl_act_head *a;
713 while (act_off < len) {
714 a = (struct nfp_fl_act_head *)&act_src[act_off];
715 act_len = a->len_lw << NFP_FL_LW_SIZ;
719 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
723 case NFP_FL_ACTION_OPCODE_PRE_LAG:
724 memcpy(act_dst + act_off, act_src + act_off, act_len);
737 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
739 struct nfp_fl_act_head *a;
740 unsigned int act_off = 0;
742 while (act_off < len) {
743 a = (struct nfp_fl_act_head *)&acts[act_off];
745 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
746 *vlan = (struct nfp_fl_push_vlan *)a;
747 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
750 act_off += a->len_lw << NFP_FL_LW_SIZ;
753 /* Ensure any VLAN push also has an egress action. */
754 if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
761 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
763 struct nfp_fl_set_ipv4_tun *tun;
764 struct nfp_fl_act_head *a;
765 unsigned int act_off = 0;
767 while (act_off < len) {
768 a = (struct nfp_fl_act_head *)&acts[act_off];
770 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
771 tun = (struct nfp_fl_set_ipv4_tun *)a;
772 tun->outer_vlan_tpid = vlan->vlan_tpid;
773 tun->outer_vlan_tci = vlan->vlan_tci;
778 act_off += a->len_lw << NFP_FL_LW_SIZ;
781 /* Return error if no tunnel action is found. */
786 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
787 struct nfp_fl_payload *sub_flow2,
788 struct nfp_fl_payload *merge_flow)
790 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
791 struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
792 bool tunnel_act = false;
796 /* The last action of sub_flow1 must be output - do not merge this. */
797 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
798 sub2_act_len = sub_flow2->meta.act_len;
803 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
806 /* A shortcut can only be applied if there is a single action. */
808 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
810 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
812 merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
813 merge_act = merge_flow->action_data;
815 /* Copy any pre-actions to the start of merge flow action list. */
816 pre_off1 = nfp_flower_copy_pre_actions(merge_act,
817 sub_flow1->action_data,
818 sub1_act_len, &tunnel_act);
819 merge_act += pre_off1;
820 sub1_act_len -= pre_off1;
821 pre_off2 = nfp_flower_copy_pre_actions(merge_act,
822 sub_flow2->action_data,
824 merge_act += pre_off2;
825 sub2_act_len -= pre_off2;
827 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
828 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
832 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
834 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
835 &post_tun_push_vlan);
839 if (post_tun_push_vlan) {
840 pre_off2 += sizeof(*post_tun_push_vlan);
841 sub2_act_len -= sizeof(*post_tun_push_vlan);
845 /* Copy remaining actions from sub_flows 1 and 2. */
846 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
848 if (post_tun_push_vlan) {
849 /* Update tunnel action in merge to include VLAN push. */
850 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
855 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
858 merge_act += sub1_act_len;
859 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
864 /* Flow link code should only be accessed under RTNL. */
865 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
867 list_del(&link->merge_flow.list);
868 list_del(&link->sub_flow.list);
872 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
873 struct nfp_fl_payload *sub_flow)
875 struct nfp_fl_payload_link *link;
877 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
878 if (link->sub_flow.flow == sub_flow) {
879 nfp_flower_unlink_flow(link);
884 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
885 struct nfp_fl_payload *sub_flow)
887 struct nfp_fl_payload_link *link;
889 link = kmalloc(sizeof(*link), GFP_KERNEL);
893 link->merge_flow.flow = merge_flow;
894 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
895 link->sub_flow.flow = sub_flow;
896 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
902 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
903 * @app: Pointer to the APP handle
904 * @sub_flow1: Initial flow matched to produce merge hint
905 * @sub_flow2: Post recirculation flow matched in merge hint
907 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
908 * and offloading the new, merged flow.
910 * Return: negative value on error, 0 in success.
912 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
913 struct nfp_fl_payload *sub_flow1,
914 struct nfp_fl_payload *sub_flow2)
916 struct flow_cls_offload merge_tc_off;
917 struct nfp_flower_priv *priv = app->priv;
918 struct netlink_ext_ack *extack = NULL;
919 struct nfp_fl_payload *merge_flow;
920 struct nfp_fl_key_ls merge_key_ls;
925 extack = merge_tc_off.common.extack;
926 if (sub_flow1 == sub_flow2 ||
927 nfp_flower_is_merge_flow(sub_flow1) ||
928 nfp_flower_is_merge_flow(sub_flow2))
931 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
935 merge_key_ls.key_size = sub_flow1->meta.key_len;
937 merge_flow = nfp_flower_allocate_new(&merge_key_ls);
941 merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
942 merge_flow->ingress_dev = sub_flow1->ingress_dev;
944 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
945 sub_flow1->meta.key_len);
946 memcpy(merge_flow->mask_data, sub_flow1->mask_data,
947 sub_flow1->meta.mask_len);
949 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
951 goto err_destroy_merge_flow;
953 err = nfp_flower_link_flows(merge_flow, sub_flow1);
955 goto err_destroy_merge_flow;
957 err = nfp_flower_link_flows(merge_flow, sub_flow2);
959 goto err_unlink_sub_flow1;
961 merge_tc_off.cookie = merge_flow->tc_flower_cookie;
962 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
963 merge_flow->ingress_dev, extack);
965 goto err_unlink_sub_flow2;
967 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
968 nfp_flower_table_params);
970 goto err_release_metadata;
972 err = nfp_flower_xmit_flow(app, merge_flow,
973 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
975 goto err_remove_rhash;
977 merge_flow->in_hw = true;
978 sub_flow1->in_hw = false;
983 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
984 &merge_flow->fl_node,
985 nfp_flower_table_params));
986 err_release_metadata:
987 nfp_modify_flow_metadata(app, merge_flow);
988 err_unlink_sub_flow2:
989 nfp_flower_unlink_flows(merge_flow, sub_flow2);
990 err_unlink_sub_flow1:
991 nfp_flower_unlink_flows(merge_flow, sub_flow1);
992 err_destroy_merge_flow:
993 kfree(merge_flow->action_data);
994 kfree(merge_flow->mask_data);
995 kfree(merge_flow->unmasked_data);
1001 * nfp_flower_validate_pre_tun_rule()
1002 * @app: Pointer to the APP handle
1003 * @flow: Pointer to NFP flow representation of rule
1004 * @extack: Netlink extended ACK report
1006 * Verifies the flow as a pre-tunnel rule.
1008 * Return: negative value on error, 0 if verified.
1011 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1012 struct nfp_fl_payload *flow,
1013 struct netlink_ext_ack *extack)
1019 * nfp_flower_add_offload() - Adds a new flow to hardware.
1020 * @app: Pointer to the APP handle
1021 * @netdev: netdev structure.
1022 * @flow: TC flower classifier offload structure.
1024 * Adds a new flow to the repeated hash structure and action payload.
1026 * Return: negative value on error, 0 if configured successfully.
1029 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1030 struct flow_cls_offload *flow)
1032 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1033 struct nfp_flower_priv *priv = app->priv;
1034 struct netlink_ext_ack *extack = NULL;
1035 struct nfp_fl_payload *flow_pay;
1036 struct nfp_fl_key_ls *key_layer;
1037 struct nfp_port *port = NULL;
1040 extack = flow->common.extack;
1041 if (nfp_netdev_is_nfp_repr(netdev))
1042 port = nfp_port_from_netdev(netdev);
1044 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1048 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1051 goto err_free_key_ls;
1053 flow_pay = nfp_flower_allocate_new(key_layer);
1056 goto err_free_key_ls;
1059 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1060 flow_pay, tun_type, extack);
1062 goto err_destroy_flow;
1064 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1066 goto err_destroy_flow;
1068 if (flow_pay->pre_tun_rule.dev) {
1069 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
1071 goto err_destroy_flow;
1074 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1076 goto err_destroy_flow;
1078 flow_pay->tc_flower_cookie = flow->cookie;
1079 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1080 nfp_flower_table_params);
1082 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1083 goto err_release_metadata;
1086 err = nfp_flower_xmit_flow(app, flow_pay,
1087 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1089 goto err_remove_rhash;
1092 port->tc_offload_cnt++;
1094 flow_pay->in_hw = true;
1096 /* Deallocate flow payload when flower rule has been destroyed. */
1102 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1104 nfp_flower_table_params));
1105 err_release_metadata:
1106 nfp_modify_flow_metadata(app, flow_pay);
1108 kfree(flow_pay->action_data);
1109 kfree(flow_pay->mask_data);
1110 kfree(flow_pay->unmasked_data);
1118 nfp_flower_remove_merge_flow(struct nfp_app *app,
1119 struct nfp_fl_payload *del_sub_flow,
1120 struct nfp_fl_payload *merge_flow)
1122 struct nfp_flower_priv *priv = app->priv;
1123 struct nfp_fl_payload_link *link, *temp;
1124 struct nfp_fl_payload *origin;
1128 link = list_first_entry(&merge_flow->linked_flows,
1129 struct nfp_fl_payload_link, merge_flow.list);
1130 origin = link->sub_flow.flow;
1132 /* Re-add rule the merge had overwritten if it has not been deleted. */
1133 if (origin != del_sub_flow)
1136 err = nfp_modify_flow_metadata(app, merge_flow);
1138 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1139 goto err_free_links;
1143 err = nfp_flower_xmit_flow(app, merge_flow,
1144 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1146 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1147 goto err_free_links;
1150 __nfp_modify_flow_metadata(priv, origin);
1151 err = nfp_flower_xmit_flow(app, origin,
1152 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1154 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1155 origin->in_hw = true;
1159 /* Clean any links connected with the merged flow. */
1160 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1162 nfp_flower_unlink_flow(link);
1164 kfree(merge_flow->action_data);
1165 kfree(merge_flow->mask_data);
1166 kfree(merge_flow->unmasked_data);
1167 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1168 &merge_flow->fl_node,
1169 nfp_flower_table_params));
1170 kfree_rcu(merge_flow, rcu);
1174 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1175 struct nfp_fl_payload *sub_flow)
1177 struct nfp_fl_payload_link *link, *temp;
1179 /* Remove any merge flow formed from the deleted sub_flow. */
1180 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1182 nfp_flower_remove_merge_flow(app, sub_flow,
1183 link->merge_flow.flow);
1187 * nfp_flower_del_offload() - Removes a flow from hardware.
1188 * @app: Pointer to the APP handle
1189 * @netdev: netdev structure.
1190 * @flow: TC flower classifier offload structure
1192 * Removes a flow from the repeated hash structure and clears the
1193 * action payload. Any flows merged from this are also deleted.
1195 * Return: negative value on error, 0 if removed successfully.
1198 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1199 struct flow_cls_offload *flow)
1201 struct nfp_flower_priv *priv = app->priv;
1202 struct netlink_ext_ack *extack = NULL;
1203 struct nfp_fl_payload *nfp_flow;
1204 struct nfp_port *port = NULL;
1207 extack = flow->common.extack;
1208 if (nfp_netdev_is_nfp_repr(netdev))
1209 port = nfp_port_from_netdev(netdev);
1211 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1213 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1217 err = nfp_modify_flow_metadata(app, nfp_flow);
1219 goto err_free_merge_flow;
1221 if (nfp_flow->nfp_tun_ipv4_addr)
1222 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1224 if (!nfp_flow->in_hw) {
1226 goto err_free_merge_flow;
1229 err = nfp_flower_xmit_flow(app, nfp_flow,
1230 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1231 /* Fall through on error. */
1233 err_free_merge_flow:
1234 nfp_flower_del_linked_merge_flows(app, nfp_flow);
1236 port->tc_offload_cnt--;
1237 kfree(nfp_flow->action_data);
1238 kfree(nfp_flow->mask_data);
1239 kfree(nfp_flow->unmasked_data);
1240 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1242 nfp_flower_table_params));
1243 kfree_rcu(nfp_flow, rcu);
1248 __nfp_flower_update_merge_stats(struct nfp_app *app,
1249 struct nfp_fl_payload *merge_flow)
1251 struct nfp_flower_priv *priv = app->priv;
1252 struct nfp_fl_payload_link *link;
1253 struct nfp_fl_payload *sub_flow;
1254 u64 pkts, bytes, used;
1257 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1258 pkts = priv->stats[ctx_id].pkts;
1259 /* Do not cycle subflows if no stats to distribute. */
1262 bytes = priv->stats[ctx_id].bytes;
1263 used = priv->stats[ctx_id].used;
1265 /* Reset stats for the merge flow. */
1266 priv->stats[ctx_id].pkts = 0;
1267 priv->stats[ctx_id].bytes = 0;
1269 /* The merge flow has received stats updates from firmware.
1270 * Distribute these stats to all subflows that form the merge.
1271 * The stats will collected from TC via the subflows.
1273 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1274 sub_flow = link->sub_flow.flow;
1275 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1276 priv->stats[ctx_id].pkts += pkts;
1277 priv->stats[ctx_id].bytes += bytes;
1278 max_t(u64, priv->stats[ctx_id].used, used);
1283 nfp_flower_update_merge_stats(struct nfp_app *app,
1284 struct nfp_fl_payload *sub_flow)
1286 struct nfp_fl_payload_link *link;
1288 /* Get merge flows that the subflow forms to distribute their stats. */
1289 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1290 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1294 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1295 * @app: Pointer to the APP handle
1296 * @netdev: Netdev structure.
1297 * @flow: TC flower classifier offload structure
1299 * Populates a flow statistics structure which which corresponds to a
1302 * Return: negative value on error, 0 if stats populated successfully.
1305 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1306 struct flow_cls_offload *flow)
1308 struct nfp_flower_priv *priv = app->priv;
1309 struct netlink_ext_ack *extack = NULL;
1310 struct nfp_fl_payload *nfp_flow;
1313 extack = flow->common.extack;
1314 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1316 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1320 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1322 spin_lock_bh(&priv->stats_lock);
1323 /* If request is for a sub_flow, update stats from merged flows. */
1324 if (!list_empty(&nfp_flow->linked_flows))
1325 nfp_flower_update_merge_stats(app, nfp_flow);
1327 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1328 priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1330 priv->stats[ctx_id].pkts = 0;
1331 priv->stats[ctx_id].bytes = 0;
1332 spin_unlock_bh(&priv->stats_lock);
1338 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1339 struct flow_cls_offload *flower)
1341 if (!eth_proto_is_802_3(flower->common.protocol))
1344 switch (flower->command) {
1345 case FLOW_CLS_REPLACE:
1346 return nfp_flower_add_offload(app, netdev, flower);
1347 case FLOW_CLS_DESTROY:
1348 return nfp_flower_del_offload(app, netdev, flower);
1349 case FLOW_CLS_STATS:
1350 return nfp_flower_get_stats(app, netdev, flower);
1356 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1357 void *type_data, void *cb_priv)
1359 struct nfp_repr *repr = cb_priv;
1361 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1365 case TC_SETUP_CLSFLOWER:
1366 return nfp_flower_repr_offload(repr->app, repr->netdev,
1368 case TC_SETUP_CLSMATCHALL:
1369 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1376 static LIST_HEAD(nfp_block_cb_list);
1378 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1379 struct flow_block_offload *f)
1381 struct nfp_repr *repr = netdev_priv(netdev);
1382 struct nfp_flower_repr_priv *repr_priv;
1383 struct flow_block_cb *block_cb;
1385 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1388 repr_priv = repr->app_priv;
1389 repr_priv->block_shared = f->block_shared;
1390 f->driver_block_list = &nfp_block_cb_list;
1392 switch (f->command) {
1393 case FLOW_BLOCK_BIND:
1394 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1395 &nfp_block_cb_list))
1398 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1400 if (IS_ERR(block_cb))
1401 return PTR_ERR(block_cb);
1403 flow_block_cb_add(block_cb, f);
1404 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1406 case FLOW_BLOCK_UNBIND:
1407 block_cb = flow_block_cb_lookup(f->block,
1408 nfp_flower_setup_tc_block_cb,
1413 flow_block_cb_remove(block_cb, f);
1414 list_del(&block_cb->driver_list);
1421 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1422 enum tc_setup_type type, void *type_data)
1425 case TC_SETUP_BLOCK:
1426 return nfp_flower_setup_tc_block(netdev, type_data);
1432 struct nfp_flower_indr_block_cb_priv {
1433 struct net_device *netdev;
1434 struct nfp_app *app;
1435 struct list_head list;
1438 static struct nfp_flower_indr_block_cb_priv *
1439 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1440 struct net_device *netdev)
1442 struct nfp_flower_indr_block_cb_priv *cb_priv;
1443 struct nfp_flower_priv *priv = app->priv;
1445 /* All callback list access should be protected by RTNL. */
1448 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1449 if (cb_priv->netdev == netdev)
1455 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1456 void *type_data, void *cb_priv)
1458 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1459 struct flow_cls_offload *flower = type_data;
1461 if (flower->common.chain_index)
1465 case TC_SETUP_CLSFLOWER:
1466 return nfp_flower_repr_offload(priv->app, priv->netdev,
1473 static void nfp_flower_setup_indr_tc_release(void *cb_priv)
1475 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1477 list_del(&priv->list);
1482 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1483 struct flow_block_offload *f)
1485 struct nfp_flower_indr_block_cb_priv *cb_priv;
1486 struct nfp_flower_priv *priv = app->priv;
1487 struct flow_block_cb *block_cb;
1489 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1490 !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1491 nfp_flower_internal_port_can_offload(app, netdev)))
1494 switch (f->command) {
1495 case FLOW_BLOCK_BIND:
1496 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1500 cb_priv->netdev = netdev;
1502 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1504 block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1506 nfp_flower_setup_indr_tc_release);
1507 if (IS_ERR(block_cb)) {
1508 list_del(&cb_priv->list);
1510 return PTR_ERR(block_cb);
1513 flow_block_cb_add(block_cb, f);
1514 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1516 case FLOW_BLOCK_UNBIND:
1517 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1521 block_cb = flow_block_cb_lookup(f->block,
1522 nfp_flower_setup_indr_block_cb,
1527 flow_block_cb_remove(block_cb, f);
1528 list_del(&block_cb->driver_list);
1537 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1538 enum tc_setup_type type, void *type_data)
1541 case TC_SETUP_BLOCK:
1542 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1549 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1550 struct net_device *netdev,
1551 unsigned long event)
1555 if (!nfp_fl_is_netdev_to_offload(netdev))
1558 if (event == NETDEV_REGISTER) {
1559 err = __tc_indr_block_cb_register(netdev, app,
1560 nfp_flower_indr_setup_tc_cb,
1563 nfp_flower_cmsg_warn(app,
1564 "Indirect block reg failed - %s\n",
1566 } else if (event == NETDEV_UNREGISTER) {
1567 __tc_indr_block_cb_unregister(netdev,
1568 nfp_flower_indr_setup_tc_cb, app);