1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
4 * Copyright (C) 2021 Marvell.
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/inetdevice.h>
9 #include <linux/rhashtable.h>
10 #include <linux/bitfield.h>
11 #include <net/flow_dissector.h>
12 #include <net/pkt_cls.h>
13 #include <net/tc_act/tc_gact.h>
14 #include <net/tc_act/tc_mirred.h>
15 #include <net/tc_act/tc_vlan.h>
19 #include "otx2_common.h"
21 /* Egress rate limiting definitions */
22 #define MAX_BURST_EXPONENT 0x0FULL
23 #define MAX_BURST_MANTISSA 0xFFULL
24 #define MAX_BURST_SIZE 130816ULL
25 #define MAX_RATE_DIVIDER_EXPONENT 12ULL
26 #define MAX_RATE_EXPONENT 0x0FULL
27 #define MAX_RATE_MANTISSA 0xFFULL
29 /* Bitfields in NIX_TLX_PIR register */
30 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
31 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
32 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
33 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
34 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
36 struct otx2_tc_flow_stats {
43 struct rhash_head node;
47 struct otx2_tc_flow_stats stats;
48 spinlock_t lock; /* lock for stats */
55 static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
60 /* Burst is calculated as
61 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
62 * Max supported burst size is 130,816 bytes.
64 burst = min_t(u32, burst, MAX_BURST_SIZE);
66 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
67 tmp = burst - rounddown_pow_of_two(burst);
68 if (burst < MAX_BURST_MANTISSA)
69 *burst_mantissa = tmp * 2;
71 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
73 *burst_exp = MAX_BURST_EXPONENT;
74 *burst_mantissa = MAX_BURST_MANTISSA;
78 static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
79 u32 *mantissa, u32 *div_exp)
83 /* Rate calculation by hardware
85 * PIR_ADD = ((256 + mantissa) << exp) / 256
86 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
87 * The resultant rate is in Mbps.
90 /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
91 * Setting this to '0' will ease the calculation of
92 * exponent and mantissa.
97 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
98 tmp = maxrate - rounddown_pow_of_two(maxrate);
99 if (maxrate < MAX_RATE_MANTISSA)
102 *mantissa = tmp / (1ULL << (*exp - 7));
104 /* Instead of disabling rate limiting, set all values to max */
105 *exp = MAX_RATE_EXPONENT;
106 *mantissa = MAX_RATE_MANTISSA;
110 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
112 struct otx2_hw *hw = &nic->hw;
113 struct nix_txschq_config *req;
114 u32 burst_exp, burst_mantissa;
115 u32 exp, mantissa, div_exp;
118 /* All SQs share the same TL4, so pick the first scheduler */
119 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
121 /* Get exponent and mantissa values from the desired rate */
122 otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
123 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
125 mutex_lock(&nic->mbox.lock);
126 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
128 mutex_unlock(&nic->mbox.lock);
132 req->lvl = NIX_TXSCH_LVL_TL4;
134 req->reg[0] = NIX_AF_TL4X_PIR(txschq);
135 req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
136 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
137 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
138 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
139 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
141 err = otx2_sync_mbox_msg(&nic->mbox);
142 mutex_unlock(&nic->mbox.lock);
146 static int otx2_tc_validate_flow(struct otx2_nic *nic,
147 struct flow_action *actions,
148 struct netlink_ext_ack *extack)
150 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
151 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
155 if (!flow_action_has_entries(actions)) {
156 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
160 if (!flow_offload_has_one_action(actions)) {
161 NL_SET_ERR_MSG_MOD(extack,
162 "Egress MATCHALL offload supports only 1 policing action");
168 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
169 struct tc_cls_matchall_offload *cls)
171 struct netlink_ext_ack *extack = cls->common.extack;
172 struct flow_action *actions = &cls->rule->action;
173 struct flow_action_entry *entry;
177 err = otx2_tc_validate_flow(nic, actions, extack);
181 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
182 NL_SET_ERR_MSG_MOD(extack,
183 "Only one Egress MATCHALL ratelimiter can be offloaded");
187 entry = &cls->rule->action.entries[0];
189 case FLOW_ACTION_POLICE:
190 if (entry->police.rate_pkt_ps) {
191 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
194 /* Convert bytes per second to Mbps */
195 rate = entry->police.rate_bytes_ps * 8;
196 rate = max_t(u32, rate / 1000000, 1);
197 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
200 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
203 NL_SET_ERR_MSG_MOD(extack,
204 "Only police action is supported with Egress MATCHALL offload");
211 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
212 struct tc_cls_matchall_offload *cls)
214 struct netlink_ext_ack *extack = cls->common.extack;
217 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
218 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
222 err = otx2_set_matchall_egress_rate(nic, 0, 0);
223 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
227 static int otx2_tc_act_set_police(struct otx2_nic *nic,
228 struct otx2_tc_flow *node,
229 struct flow_cls_offload *f,
230 u64 rate, u32 burst, u32 mark,
231 struct npc_install_flow_req *req, bool pps)
233 struct netlink_ext_ack *extack = f->common.extack;
234 struct otx2_hw *hw = &nic->hw;
237 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
238 if (rq_idx >= hw->rx_queues) {
239 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
243 mutex_lock(&nic->mbox.lock);
245 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
247 mutex_unlock(&nic->mbox.lock);
251 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
255 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
259 mutex_unlock(&nic->mbox.lock);
261 req->match_id = mark & 0xFFFFULL;
263 req->op = NIX_RX_ACTIONOP_UCAST;
264 set_bit(rq_idx, &nic->rq_bmap);
265 node->is_act_police = true;
271 if (cn10k_free_leaf_profile(nic, node->leaf_profile))
272 netdev_err(nic->netdev,
273 "Unable to free leaf bandwidth profile(%d)\n",
275 mutex_unlock(&nic->mbox.lock);
279 static int otx2_tc_parse_actions(struct otx2_nic *nic,
280 struct flow_action *flow_action,
281 struct npc_install_flow_req *req,
282 struct flow_cls_offload *f,
283 struct otx2_tc_flow *node)
285 struct netlink_ext_ack *extack = f->common.extack;
286 struct flow_action_entry *act;
287 struct net_device *target;
288 struct otx2_nic *priv;
295 if (!flow_action_has_entries(flow_action)) {
296 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
300 flow_action_for_each(i, act, flow_action) {
302 case FLOW_ACTION_DROP:
303 req->op = NIX_RX_ACTIONOP_DROP;
305 case FLOW_ACTION_ACCEPT:
306 req->op = NIX_RX_ACTION_DEFAULT;
308 case FLOW_ACTION_REDIRECT_INGRESS:
310 priv = netdev_priv(target);
311 /* npc_install_flow_req doesn't support passing a target pcifunc */
312 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
313 NL_SET_ERR_MSG_MOD(extack,
314 "can't redirect to other pf/vf");
317 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
318 req->op = NIX_RX_ACTION_DEFAULT;
320 case FLOW_ACTION_VLAN_POP:
321 req->vtag0_valid = true;
322 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
323 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
325 case FLOW_ACTION_POLICE:
326 /* Ingress ratelimiting is not supported on OcteonTx2 */
327 if (is_dev_otx2(nic->pdev)) {
328 NL_SET_ERR_MSG_MOD(extack,
329 "Ingress policing not supported on this platform");
333 if (act->police.rate_bytes_ps > 0) {
334 rate = act->police.rate_bytes_ps * 8;
335 burst = act->police.burst;
336 } else if (act->police.rate_pkt_ps > 0) {
337 /* The algorithm used to calculate rate
338 * mantissa, exponent values for a given token
339 * rate (token can be byte or packet) requires
340 * token rate to be mutiplied by 8.
342 rate = act->police.rate_pkt_ps * 8;
343 burst = act->police.burst_pkt;
348 case FLOW_ACTION_MARK:
357 NL_SET_ERR_MSG_MOD(extack,
358 "rate limit police offload requires a single action");
363 return otx2_tc_act_set_police(nic, node, f, rate, burst,
369 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
370 struct flow_cls_offload *f,
371 struct npc_install_flow_req *req)
373 struct netlink_ext_ack *extack = f->common.extack;
374 struct flow_msg *flow_spec = &req->packet;
375 struct flow_msg *flow_mask = &req->mask;
376 struct flow_dissector *dissector;
377 struct flow_rule *rule;
380 rule = flow_cls_offload_flow_rule(f);
381 dissector = rule->match.dissector;
383 if ((dissector->used_keys &
384 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
385 BIT(FLOW_DISSECTOR_KEY_BASIC) |
386 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
387 BIT(FLOW_DISSECTOR_KEY_VLAN) |
388 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
389 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
390 BIT(FLOW_DISSECTOR_KEY_PORTS) |
391 BIT(FLOW_DISSECTOR_KEY_IP)))) {
392 netdev_info(nic->netdev, "unsupported flow used key 0x%x",
393 dissector->used_keys);
397 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
398 struct flow_match_basic match;
400 flow_rule_match_basic(rule, &match);
402 /* All EtherTypes can be matched, no hw limitation */
403 flow_spec->etype = match.key->n_proto;
404 flow_mask->etype = match.mask->n_proto;
405 req->features |= BIT_ULL(NPC_ETYPE);
407 if (match.mask->ip_proto &&
408 (match.key->ip_proto != IPPROTO_TCP &&
409 match.key->ip_proto != IPPROTO_UDP &&
410 match.key->ip_proto != IPPROTO_SCTP &&
411 match.key->ip_proto != IPPROTO_ICMP &&
412 match.key->ip_proto != IPPROTO_ICMPV6)) {
413 netdev_info(nic->netdev,
414 "ip_proto=0x%x not supported\n",
415 match.key->ip_proto);
418 if (match.mask->ip_proto)
419 ip_proto = match.key->ip_proto;
421 if (ip_proto == IPPROTO_UDP)
422 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
423 else if (ip_proto == IPPROTO_TCP)
424 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
425 else if (ip_proto == IPPROTO_SCTP)
426 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
427 else if (ip_proto == IPPROTO_ICMP)
428 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
429 else if (ip_proto == IPPROTO_ICMPV6)
430 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
433 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
434 struct flow_match_eth_addrs match;
436 flow_rule_match_eth_addrs(rule, &match);
437 if (!is_zero_ether_addr(match.mask->src)) {
438 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
442 if (!is_zero_ether_addr(match.mask->dst)) {
443 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
444 ether_addr_copy(flow_mask->dmac,
445 (u8 *)&match.mask->dst);
446 req->features |= BIT_ULL(NPC_DMAC);
450 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
451 struct flow_match_ip match;
453 flow_rule_match_ip(rule, &match);
454 if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
456 NL_SET_ERR_MSG_MOD(extack, "tos not supported");
459 if (match.mask->ttl) {
460 NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
463 flow_spec->tos = match.key->tos;
464 flow_mask->tos = match.mask->tos;
465 req->features |= BIT_ULL(NPC_TOS);
468 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
469 struct flow_match_vlan match;
470 u16 vlan_tci, vlan_tci_mask;
472 flow_rule_match_vlan(rule, &match);
474 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
475 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
476 ntohs(match.key->vlan_tpid));
480 if (match.mask->vlan_id ||
481 match.mask->vlan_dei ||
482 match.mask->vlan_priority) {
483 vlan_tci = match.key->vlan_id |
484 match.key->vlan_dei << 12 |
485 match.key->vlan_priority << 13;
487 vlan_tci_mask = match.mask->vlan_id |
488 match.key->vlan_dei << 12 |
489 match.key->vlan_priority << 13;
491 flow_spec->vlan_tci = htons(vlan_tci);
492 flow_mask->vlan_tci = htons(vlan_tci_mask);
493 req->features |= BIT_ULL(NPC_OUTER_VID);
497 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
498 struct flow_match_ipv4_addrs match;
500 flow_rule_match_ipv4_addrs(rule, &match);
502 flow_spec->ip4dst = match.key->dst;
503 flow_mask->ip4dst = match.mask->dst;
504 req->features |= BIT_ULL(NPC_DIP_IPV4);
506 flow_spec->ip4src = match.key->src;
507 flow_mask->ip4src = match.mask->src;
508 req->features |= BIT_ULL(NPC_SIP_IPV4);
509 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
510 struct flow_match_ipv6_addrs match;
512 flow_rule_match_ipv6_addrs(rule, &match);
514 if (ipv6_addr_loopback(&match.key->dst) ||
515 ipv6_addr_loopback(&match.key->src)) {
516 NL_SET_ERR_MSG_MOD(extack,
517 "Flow matching IPv6 loopback addr not supported");
521 if (!ipv6_addr_any(&match.mask->dst)) {
522 memcpy(&flow_spec->ip6dst,
523 (struct in6_addr *)&match.key->dst,
524 sizeof(flow_spec->ip6dst));
525 memcpy(&flow_mask->ip6dst,
526 (struct in6_addr *)&match.mask->dst,
527 sizeof(flow_spec->ip6dst));
528 req->features |= BIT_ULL(NPC_DIP_IPV6);
531 if (!ipv6_addr_any(&match.mask->src)) {
532 memcpy(&flow_spec->ip6src,
533 (struct in6_addr *)&match.key->src,
534 sizeof(flow_spec->ip6src));
535 memcpy(&flow_mask->ip6src,
536 (struct in6_addr *)&match.mask->src,
537 sizeof(flow_spec->ip6src));
538 req->features |= BIT_ULL(NPC_SIP_IPV6);
542 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
543 struct flow_match_ports match;
545 flow_rule_match_ports(rule, &match);
547 flow_spec->dport = match.key->dst;
548 flow_mask->dport = match.mask->dst;
549 if (ip_proto == IPPROTO_UDP)
550 req->features |= BIT_ULL(NPC_DPORT_UDP);
551 else if (ip_proto == IPPROTO_TCP)
552 req->features |= BIT_ULL(NPC_DPORT_TCP);
553 else if (ip_proto == IPPROTO_SCTP)
554 req->features |= BIT_ULL(NPC_DPORT_SCTP);
556 flow_spec->sport = match.key->src;
557 flow_mask->sport = match.mask->src;
558 if (ip_proto == IPPROTO_UDP)
559 req->features |= BIT_ULL(NPC_SPORT_UDP);
560 else if (ip_proto == IPPROTO_TCP)
561 req->features |= BIT_ULL(NPC_SPORT_TCP);
562 else if (ip_proto == IPPROTO_SCTP)
563 req->features |= BIT_ULL(NPC_SPORT_SCTP);
566 return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
569 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
571 struct npc_delete_flow_req *req;
574 mutex_lock(&nic->mbox.lock);
575 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
577 mutex_unlock(&nic->mbox.lock);
583 /* Send message to AF */
584 err = otx2_sync_mbox_msg(&nic->mbox);
586 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
588 mutex_unlock(&nic->mbox.lock);
591 mutex_unlock(&nic->mbox.lock);
596 static int otx2_tc_del_flow(struct otx2_nic *nic,
597 struct flow_cls_offload *tc_flow_cmd)
599 struct otx2_tc_info *tc_info = &nic->tc_info;
600 struct otx2_tc_flow *flow_node;
603 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
604 &tc_flow_cmd->cookie,
605 tc_info->flow_ht_params);
607 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
608 tc_flow_cmd->cookie);
612 if (flow_node->is_act_police) {
613 mutex_lock(&nic->mbox.lock);
615 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
616 flow_node->leaf_profile, false);
618 netdev_err(nic->netdev,
619 "Unmapping RQ %d & profile %d failed\n",
620 flow_node->rq, flow_node->leaf_profile);
622 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
624 netdev_err(nic->netdev,
625 "Unable to free leaf bandwidth profile(%d)\n",
626 flow_node->leaf_profile);
628 __clear_bit(flow_node->rq, &nic->rq_bmap);
630 mutex_unlock(&nic->mbox.lock);
633 otx2_del_mcam_flow_entry(nic, flow_node->entry);
635 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
637 nic->tc_info.flow_ht_params));
638 kfree_rcu(flow_node, rcu);
640 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
641 tc_info->num_entries--;
646 static int otx2_tc_add_flow(struct otx2_nic *nic,
647 struct flow_cls_offload *tc_flow_cmd)
649 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
650 struct otx2_tc_info *tc_info = &nic->tc_info;
651 struct otx2_tc_flow *new_node, *old_node;
652 struct npc_install_flow_req *req, dummy;
655 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
658 if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
659 NL_SET_ERR_MSG_MOD(extack,
660 "Not enough MCAM space to add the flow");
664 /* allocate memory for the new flow and it's node */
665 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
668 spin_lock_init(&new_node->lock);
669 new_node->cookie = tc_flow_cmd->cookie;
671 memset(&dummy, 0, sizeof(struct npc_install_flow_req));
673 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
675 kfree_rcu(new_node, rcu);
679 /* If a flow exists with the same cookie, delete it */
680 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
681 &tc_flow_cmd->cookie,
682 tc_info->flow_ht_params);
684 otx2_tc_del_flow(nic, tc_flow_cmd);
686 mutex_lock(&nic->mbox.lock);
687 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
689 mutex_unlock(&nic->mbox.lock);
694 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
695 memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
697 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
698 nic->flow_cfg->tc_max_flows);
699 req->channel = nic->hw.rx_chan_base;
700 req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
701 nic->flow_cfg->tc_max_flows - new_node->bitpos];
702 req->intf = NIX_INTF_RX;
704 new_node->entry = req->entry;
706 /* Send message to AF */
707 rc = otx2_sync_mbox_msg(&nic->mbox);
709 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
710 mutex_unlock(&nic->mbox.lock);
711 kfree_rcu(new_node, rcu);
714 mutex_unlock(&nic->mbox.lock);
716 /* add new flow to flow-table */
717 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
718 nic->tc_info.flow_ht_params);
720 otx2_del_mcam_flow_entry(nic, req->entry);
721 kfree_rcu(new_node, rcu);
725 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
726 tc_info->num_entries++;
731 if (new_node->is_act_police) {
732 mutex_lock(&nic->mbox.lock);
734 err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
735 new_node->leaf_profile, false);
737 netdev_err(nic->netdev,
738 "Unmapping RQ %d & profile %d failed\n",
739 new_node->rq, new_node->leaf_profile);
740 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
742 netdev_err(nic->netdev,
743 "Unable to free leaf bandwidth profile(%d)\n",
744 new_node->leaf_profile);
746 __clear_bit(new_node->rq, &nic->rq_bmap);
748 mutex_unlock(&nic->mbox.lock);
754 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
755 struct flow_cls_offload *tc_flow_cmd)
757 struct otx2_tc_info *tc_info = &nic->tc_info;
758 struct npc_mcam_get_stats_req *req;
759 struct npc_mcam_get_stats_rsp *rsp;
760 struct otx2_tc_flow_stats *stats;
761 struct otx2_tc_flow *flow_node;
764 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
765 &tc_flow_cmd->cookie,
766 tc_info->flow_ht_params);
768 netdev_info(nic->netdev, "tc flow not found for cookie %lx",
769 tc_flow_cmd->cookie);
773 mutex_lock(&nic->mbox.lock);
775 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
777 mutex_unlock(&nic->mbox.lock);
781 req->entry = flow_node->entry;
783 err = otx2_sync_mbox_msg(&nic->mbox);
785 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
787 mutex_unlock(&nic->mbox.lock);
791 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
792 (&nic->mbox.mbox, 0, &req->hdr);
794 mutex_unlock(&nic->mbox.lock);
798 mutex_unlock(&nic->mbox.lock);
803 stats = &flow_node->stats;
805 spin_lock(&flow_node->lock);
806 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
807 FLOW_ACTION_HW_STATS_IMMEDIATE);
808 stats->pkts = rsp->stat;
809 spin_unlock(&flow_node->lock);
814 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
815 struct flow_cls_offload *cls_flower)
817 switch (cls_flower->command) {
818 case FLOW_CLS_REPLACE:
819 return otx2_tc_add_flow(nic, cls_flower);
820 case FLOW_CLS_DESTROY:
821 return otx2_tc_del_flow(nic, cls_flower);
823 return otx2_tc_get_flow_stats(nic, cls_flower);
829 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
830 struct tc_cls_matchall_offload *cls)
832 struct netlink_ext_ack *extack = cls->common.extack;
833 struct flow_action *actions = &cls->rule->action;
834 struct flow_action_entry *entry;
838 err = otx2_tc_validate_flow(nic, actions, extack);
842 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
843 NL_SET_ERR_MSG_MOD(extack,
844 "Only one ingress MATCHALL ratelimitter can be offloaded");
848 entry = &cls->rule->action.entries[0];
850 case FLOW_ACTION_POLICE:
851 /* Ingress ratelimiting is not supported on OcteonTx2 */
852 if (is_dev_otx2(nic->pdev)) {
853 NL_SET_ERR_MSG_MOD(extack,
854 "Ingress policing not supported on this platform");
858 err = cn10k_alloc_matchall_ipolicer(nic);
862 /* Convert to bits per second */
863 rate = entry->police.rate_bytes_ps * 8;
864 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
867 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
870 NL_SET_ERR_MSG_MOD(extack,
871 "Only police action supported with Ingress MATCHALL offload");
878 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
879 struct tc_cls_matchall_offload *cls)
881 struct netlink_ext_ack *extack = cls->common.extack;
884 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
885 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
889 err = cn10k_free_matchall_ipolicer(nic);
890 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
894 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
895 struct tc_cls_matchall_offload *cls_matchall)
897 switch (cls_matchall->command) {
898 case TC_CLSMATCHALL_REPLACE:
899 return otx2_tc_ingress_matchall_install(nic, cls_matchall);
900 case TC_CLSMATCHALL_DESTROY:
901 return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
902 case TC_CLSMATCHALL_STATS:
910 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
911 void *type_data, void *cb_priv)
913 struct otx2_nic *nic = cb_priv;
915 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
919 case TC_SETUP_CLSFLOWER:
920 return otx2_setup_tc_cls_flower(nic, type_data);
921 case TC_SETUP_CLSMATCHALL:
922 return otx2_setup_tc_ingress_matchall(nic, type_data);
930 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
931 struct tc_cls_matchall_offload *cls_matchall)
933 switch (cls_matchall->command) {
934 case TC_CLSMATCHALL_REPLACE:
935 return otx2_tc_egress_matchall_install(nic, cls_matchall);
936 case TC_CLSMATCHALL_DESTROY:
937 return otx2_tc_egress_matchall_delete(nic, cls_matchall);
938 case TC_CLSMATCHALL_STATS:
946 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
947 void *type_data, void *cb_priv)
949 struct otx2_nic *nic = cb_priv;
951 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
955 case TC_SETUP_CLSMATCHALL:
956 return otx2_setup_tc_egress_matchall(nic, type_data);
964 static LIST_HEAD(otx2_block_cb_list);
966 static int otx2_setup_tc_block(struct net_device *netdev,
967 struct flow_block_offload *f)
969 struct otx2_nic *nic = netdev_priv(netdev);
976 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
977 cb = otx2_setup_tc_block_ingress_cb;
979 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
980 cb = otx2_setup_tc_block_egress_cb;
986 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
990 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
995 return otx2_setup_tc_block(netdev, type_data);
1001 static const struct rhashtable_params tc_flow_ht_params = {
1002 .head_offset = offsetof(struct otx2_tc_flow, node),
1003 .key_offset = offsetof(struct otx2_tc_flow, cookie),
1004 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
1005 .automatic_shrinking = true,
1008 int otx2_init_tc(struct otx2_nic *nic)
1010 struct otx2_tc_info *tc = &nic->tc_info;
1012 /* Exclude receive queue 0 being used for police action */
1013 set_bit(0, &nic->rq_bmap);
1015 tc->flow_ht_params = tc_flow_ht_params;
1016 return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
1019 void otx2_shutdown_tc(struct otx2_nic *nic)
1021 struct otx2_tc_info *tc = &nic->tc_info;
1023 rhashtable_destroy(&tc->flow_table);