1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell.
9 #include "otx2_common.h"
11 #define OTX2_DEFAULT_ACTION 0x1
14 struct ethtool_rx_flow_spec flow_spec;
15 struct list_head list;
23 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
25 devm_kfree(pfvf->dev, flow_cfg->flow_ent);
26 flow_cfg->flow_ent = NULL;
27 flow_cfg->ntuple_max_flows = 0;
28 flow_cfg->tc_max_flows = 0;
31 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
33 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
34 struct npc_mcam_free_entry_req *req;
37 if (!flow_cfg->ntuple_max_flows)
40 mutex_lock(&pfvf->mbox.lock);
41 for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
42 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
46 req->entry = flow_cfg->flow_ent[ent];
48 /* Send message to AF to free MCAM entries */
49 err = otx2_sync_mbox_msg(&pfvf->mbox);
53 mutex_unlock(&pfvf->mbox.lock);
54 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
58 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
60 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
61 struct npc_mcam_alloc_entry_req *req;
62 struct npc_mcam_alloc_entry_rsp *rsp;
63 int ent, allocated = 0;
65 /* Free current ones and allocate new ones with requested count */
66 otx2_free_ntuple_mcam_entries(pfvf);
71 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
72 sizeof(u16), GFP_KERNEL);
73 if (!flow_cfg->flow_ent)
76 mutex_lock(&pfvf->mbox.lock);
78 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
79 * can only be allocated.
81 while (allocated < count) {
82 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
87 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
88 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
89 req->priority = NPC_MCAM_HIGHER_PRIO;
90 req->ref_entry = flow_cfg->def_ent[0];
92 /* Send message to AF */
93 if (otx2_sync_mbox_msg(&pfvf->mbox))
96 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
97 (&pfvf->mbox.mbox, 0, &req->hdr);
99 for (ent = 0; ent < rsp->count; ent++)
100 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
102 allocated += rsp->count;
104 /* If this request is not fulfilled, no need to send
107 if (rsp->count != req->count)
112 mutex_unlock(&pfvf->mbox.lock);
114 flow_cfg->ntuple_offset = 0;
115 flow_cfg->ntuple_max_flows = allocated;
116 flow_cfg->tc_max_flows = allocated;
118 if (allocated != count)
119 netdev_info(pfvf->netdev,
120 "Unable to allocate %d MCAM entries for ntuple, got %d\n",
126 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
128 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
129 struct npc_mcam_alloc_entry_req *req;
130 struct npc_mcam_alloc_entry_rsp *rsp;
131 int vf_vlan_max_flows;
134 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
135 count = OTX2_MAX_UNICAST_FLOWS +
136 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
138 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
139 sizeof(u16), GFP_KERNEL);
140 if (!flow_cfg->def_ent)
143 mutex_lock(&pfvf->mbox.lock);
145 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
147 mutex_unlock(&pfvf->mbox.lock);
154 /* Send message to AF */
155 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
156 mutex_unlock(&pfvf->mbox.lock);
160 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
161 (&pfvf->mbox.mbox, 0, &req->hdr);
163 if (rsp->count != req->count) {
164 netdev_info(pfvf->netdev,
165 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
166 mutex_unlock(&pfvf->mbox.lock);
167 devm_kfree(pfvf->dev, flow_cfg->def_ent);
171 for (ent = 0; ent < rsp->count; ent++)
172 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
174 flow_cfg->vf_vlan_offset = 0;
175 flow_cfg->unicast_offset = vf_vlan_max_flows;
176 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
177 OTX2_MAX_UNICAST_FLOWS;
178 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
179 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
180 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
182 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
183 mutex_unlock(&pfvf->mbox.lock);
185 /* Allocate entries for Ntuple filters */
186 count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
188 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
192 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
193 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
198 int otx2_mcam_flow_init(struct otx2_nic *pf)
202 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
207 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
209 err = otx2_alloc_mcam_entries(pf);
213 /* Check if MCAM entries are allocate or not */
214 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
217 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
218 * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
225 void otx2_mcam_flow_del(struct otx2_nic *pf)
227 otx2_destroy_mcam_flows(pf);
230 /* On success adds mcam entry
231 * On failure enable promisous mode
233 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
235 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
236 struct npc_install_flow_req *req;
239 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
242 /* dont have free mcam entries or uc list is greater than alloted */
243 if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
246 mutex_lock(&pf->mbox.lock);
247 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
249 mutex_unlock(&pf->mbox.lock);
253 /* unicast offset starts with 32 0..31 for ntuple */
254 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
255 if (pf->mac_table[i].inuse)
257 ether_addr_copy(pf->mac_table[i].addr, mac);
258 pf->mac_table[i].inuse = true;
259 pf->mac_table[i].mcam_entry =
260 flow_cfg->def_ent[i + flow_cfg->unicast_offset];
261 req->entry = pf->mac_table[i].mcam_entry;
265 ether_addr_copy(req->packet.dmac, mac);
266 eth_broadcast_addr((u8 *)&req->mask.dmac);
267 req->features = BIT_ULL(NPC_DMAC);
268 req->channel = pf->hw.rx_chan_base;
269 req->intf = NIX_INTF_RX;
270 req->op = NIX_RX_ACTION_DEFAULT;
273 err = otx2_sync_mbox_msg(&pf->mbox);
274 mutex_unlock(&pf->mbox.lock);
279 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
281 struct otx2_nic *pf = netdev_priv(netdev);
283 return otx2_do_add_macfilter(pf, mac);
286 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
291 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
292 if (!pf->mac_table[i].inuse)
295 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
296 *mcam_entry = pf->mac_table[i].mcam_entry;
297 pf->mac_table[i].inuse = false;
304 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
306 struct otx2_nic *pf = netdev_priv(netdev);
307 struct npc_delete_flow_req *req;
310 /* check does mcam entry exists for given mac */
311 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
314 mutex_lock(&pf->mbox.lock);
315 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
317 mutex_unlock(&pf->mbox.lock);
320 req->entry = mcam_entry;
321 /* Send message to AF */
322 err = otx2_sync_mbox_msg(&pf->mbox);
323 mutex_unlock(&pf->mbox.lock);
328 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
330 struct otx2_flow *iter;
332 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
333 if (iter->location == location)
340 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
342 struct list_head *head = &pfvf->flow_cfg->flow_list;
343 struct otx2_flow *iter;
345 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
346 if (iter->location > flow->location)
351 list_add(&flow->list, head);
354 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
357 struct otx2_flow *iter;
359 if (location >= pfvf->flow_cfg->ntuple_max_flows)
362 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
363 if (iter->location == location) {
364 nfc->fs = iter->flow_spec;
365 nfc->rss_context = iter->rss_ctx_id;
373 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
376 u32 rule_cnt = nfc->rule_cnt;
381 nfc->data = pfvf->flow_cfg->ntuple_max_flows;
382 while ((!err || err == -ENOENT) && idx < rule_cnt) {
383 err = otx2_get_flow(pfvf, nfc, location);
385 rule_locs[idx++] = location;
388 nfc->rule_cnt = rule_cnt;
393 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
394 struct npc_install_flow_req *req,
397 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
398 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
399 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
400 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
401 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
402 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
403 struct flow_msg *pmask = &req->mask;
404 struct flow_msg *pkt = &req->packet;
408 if (ipv4_usr_mask->ip4src) {
409 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
410 sizeof(pkt->ip4src));
411 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
412 sizeof(pmask->ip4src));
413 req->features |= BIT_ULL(NPC_SIP_IPV4);
415 if (ipv4_usr_mask->ip4dst) {
416 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
417 sizeof(pkt->ip4dst));
418 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
419 sizeof(pmask->ip4dst));
420 req->features |= BIT_ULL(NPC_DIP_IPV4);
422 if (ipv4_usr_mask->tos) {
423 pkt->tos = ipv4_usr_hdr->tos;
424 pmask->tos = ipv4_usr_mask->tos;
425 req->features |= BIT_ULL(NPC_TOS);
427 if (ipv4_usr_mask->proto) {
428 switch (ipv4_usr_hdr->proto) {
430 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
433 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
436 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
439 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
442 req->features |= BIT_ULL(NPC_IPPROTO_AH);
445 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
451 pkt->etype = cpu_to_be16(ETH_P_IP);
452 pmask->etype = cpu_to_be16(0xFFFF);
453 req->features |= BIT_ULL(NPC_ETYPE);
458 pkt->etype = cpu_to_be16(ETH_P_IP);
459 pmask->etype = cpu_to_be16(0xFFFF);
460 req->features |= BIT_ULL(NPC_ETYPE);
461 if (ipv4_l4_mask->ip4src) {
462 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
463 sizeof(pkt->ip4src));
464 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
465 sizeof(pmask->ip4src));
466 req->features |= BIT_ULL(NPC_SIP_IPV4);
468 if (ipv4_l4_mask->ip4dst) {
469 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
470 sizeof(pkt->ip4dst));
471 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
472 sizeof(pmask->ip4dst));
473 req->features |= BIT_ULL(NPC_DIP_IPV4);
475 if (ipv4_l4_mask->tos) {
476 pkt->tos = ipv4_l4_hdr->tos;
477 pmask->tos = ipv4_l4_mask->tos;
478 req->features |= BIT_ULL(NPC_TOS);
480 if (ipv4_l4_mask->psrc) {
481 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
483 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
484 sizeof(pmask->sport));
485 if (flow_type == UDP_V4_FLOW)
486 req->features |= BIT_ULL(NPC_SPORT_UDP);
487 else if (flow_type == TCP_V4_FLOW)
488 req->features |= BIT_ULL(NPC_SPORT_TCP);
490 req->features |= BIT_ULL(NPC_SPORT_SCTP);
492 if (ipv4_l4_mask->pdst) {
493 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
495 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
496 sizeof(pmask->dport));
497 if (flow_type == UDP_V4_FLOW)
498 req->features |= BIT_ULL(NPC_DPORT_UDP);
499 else if (flow_type == TCP_V4_FLOW)
500 req->features |= BIT_ULL(NPC_DPORT_TCP);
502 req->features |= BIT_ULL(NPC_DPORT_SCTP);
504 if (flow_type == UDP_V4_FLOW)
505 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
506 else if (flow_type == TCP_V4_FLOW)
507 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
509 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
513 pkt->etype = cpu_to_be16(ETH_P_IP);
514 pmask->etype = cpu_to_be16(0xFFFF);
515 req->features |= BIT_ULL(NPC_ETYPE);
516 if (ah_esp_mask->ip4src) {
517 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
518 sizeof(pkt->ip4src));
519 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
520 sizeof(pmask->ip4src));
521 req->features |= BIT_ULL(NPC_SIP_IPV4);
523 if (ah_esp_mask->ip4dst) {
524 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
525 sizeof(pkt->ip4dst));
526 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
527 sizeof(pmask->ip4dst));
528 req->features |= BIT_ULL(NPC_DIP_IPV4);
530 if (ah_esp_mask->tos) {
531 pkt->tos = ah_esp_hdr->tos;
532 pmask->tos = ah_esp_mask->tos;
533 req->features |= BIT_ULL(NPC_TOS);
536 /* NPC profile doesn't extract AH/ESP header fields */
537 if (ah_esp_mask->spi & ah_esp_hdr->spi)
540 if (flow_type == AH_V4_FLOW)
541 req->features |= BIT_ULL(NPC_IPPROTO_AH);
543 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
552 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
553 struct npc_install_flow_req *req,
556 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
557 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
558 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
559 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
560 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
561 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
562 struct flow_msg *pmask = &req->mask;
563 struct flow_msg *pkt = &req->packet;
567 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
568 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
569 sizeof(pkt->ip6src));
570 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
571 sizeof(pmask->ip6src));
572 req->features |= BIT_ULL(NPC_SIP_IPV6);
574 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
575 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
576 sizeof(pkt->ip6dst));
577 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
578 sizeof(pmask->ip6dst));
579 req->features |= BIT_ULL(NPC_DIP_IPV6);
581 pkt->etype = cpu_to_be16(ETH_P_IPV6);
582 pmask->etype = cpu_to_be16(0xFFFF);
583 req->features |= BIT_ULL(NPC_ETYPE);
588 pkt->etype = cpu_to_be16(ETH_P_IPV6);
589 pmask->etype = cpu_to_be16(0xFFFF);
590 req->features |= BIT_ULL(NPC_ETYPE);
591 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
592 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
593 sizeof(pkt->ip6src));
594 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
595 sizeof(pmask->ip6src));
596 req->features |= BIT_ULL(NPC_SIP_IPV6);
598 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
599 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
600 sizeof(pkt->ip6dst));
601 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
602 sizeof(pmask->ip6dst));
603 req->features |= BIT_ULL(NPC_DIP_IPV6);
605 if (ipv6_l4_mask->psrc) {
606 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
608 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
609 sizeof(pmask->sport));
610 if (flow_type == UDP_V6_FLOW)
611 req->features |= BIT_ULL(NPC_SPORT_UDP);
612 else if (flow_type == TCP_V6_FLOW)
613 req->features |= BIT_ULL(NPC_SPORT_TCP);
615 req->features |= BIT_ULL(NPC_SPORT_SCTP);
617 if (ipv6_l4_mask->pdst) {
618 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
620 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
621 sizeof(pmask->dport));
622 if (flow_type == UDP_V6_FLOW)
623 req->features |= BIT_ULL(NPC_DPORT_UDP);
624 else if (flow_type == TCP_V6_FLOW)
625 req->features |= BIT_ULL(NPC_DPORT_TCP);
627 req->features |= BIT_ULL(NPC_DPORT_SCTP);
629 if (flow_type == UDP_V6_FLOW)
630 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
631 else if (flow_type == TCP_V6_FLOW)
632 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
634 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
638 pkt->etype = cpu_to_be16(ETH_P_IPV6);
639 pmask->etype = cpu_to_be16(0xFFFF);
640 req->features |= BIT_ULL(NPC_ETYPE);
641 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
642 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
643 sizeof(pkt->ip6src));
644 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
645 sizeof(pmask->ip6src));
646 req->features |= BIT_ULL(NPC_SIP_IPV6);
648 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
649 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
650 sizeof(pkt->ip6dst));
651 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
652 sizeof(pmask->ip6dst));
653 req->features |= BIT_ULL(NPC_DIP_IPV6);
656 /* NPC profile doesn't extract AH/ESP header fields */
657 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
658 (ah_esp_mask->tclass & ah_esp_mask->tclass))
661 if (flow_type == AH_V6_FLOW)
662 req->features |= BIT_ULL(NPC_IPPROTO_AH);
664 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
673 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
674 struct npc_install_flow_req *req)
676 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
677 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
678 struct flow_msg *pmask = &req->mask;
679 struct flow_msg *pkt = &req->packet;
683 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
685 /* bits not set in mask are don't care */
687 if (!is_zero_ether_addr(eth_mask->h_source)) {
688 ether_addr_copy(pkt->smac, eth_hdr->h_source);
689 ether_addr_copy(pmask->smac, eth_mask->h_source);
690 req->features |= BIT_ULL(NPC_SMAC);
692 if (!is_zero_ether_addr(eth_mask->h_dest)) {
693 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
694 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
695 req->features |= BIT_ULL(NPC_DMAC);
697 if (eth_mask->h_proto) {
698 memcpy(&pkt->etype, ð_hdr->h_proto,
700 memcpy(&pmask->etype, ð_mask->h_proto,
701 sizeof(pmask->etype));
702 req->features |= BIT_ULL(NPC_ETYPE);
711 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
721 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
728 if (fsp->flow_type & FLOW_EXT) {
729 if (fsp->m_ext.vlan_etype)
731 if (fsp->m_ext.vlan_tci) {
732 if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
734 if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
737 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
738 sizeof(pkt->vlan_tci));
739 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
740 sizeof(pmask->vlan_tci));
741 req->features |= BIT_ULL(NPC_OUTER_VID);
744 /* Not Drop/Direct to queue but use action in default entry */
745 if (fsp->m_ext.data[1] &&
746 fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
747 req->op = NIX_RX_ACTION_DEFAULT;
750 if (fsp->flow_type & FLOW_MAC_EXT &&
751 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
752 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
753 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
754 req->features |= BIT_ULL(NPC_DMAC);
763 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
765 u64 ring_cookie = flow->flow_spec.ring_cookie;
766 struct npc_install_flow_req *req;
769 mutex_lock(&pfvf->mbox.lock);
770 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
772 mutex_unlock(&pfvf->mbox.lock);
776 err = otx2_prepare_flow_request(&flow->flow_spec, req);
778 /* free the allocated msg above */
779 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
780 mutex_unlock(&pfvf->mbox.lock);
784 req->entry = flow->entry;
785 req->intf = NIX_INTF_RX;
787 req->channel = pfvf->hw.rx_chan_base;
788 if (ring_cookie == RX_CLS_FLOW_DISC) {
789 req->op = NIX_RX_ACTIONOP_DROP;
791 /* change to unicast only if action of default entry is not
794 if (flow->flow_spec.flow_type & FLOW_RSS) {
795 req->op = NIX_RX_ACTIONOP_RSS;
796 req->index = flow->rss_ctx_id;
798 req->op = NIX_RX_ACTIONOP_UCAST;
799 req->index = ethtool_get_flow_spec_ring(ring_cookie);
801 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
802 if (vf > pci_num_vf(pfvf->pdev)) {
803 mutex_unlock(&pfvf->mbox.lock);
808 /* ethtool ring_cookie has (VF + 1) for VF */
815 /* Send message to AF */
816 err = otx2_sync_mbox_msg(&pfvf->mbox);
817 mutex_unlock(&pfvf->mbox.lock);
821 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
823 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
824 struct ethtool_rx_flow_spec *fsp = &nfc->fs;
825 struct otx2_flow *flow;
830 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
831 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
834 if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
837 if (fsp->location >= flow_cfg->ntuple_max_flows)
840 flow = otx2_find_flow(pfvf, fsp->location);
842 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
845 flow->location = fsp->location;
846 flow->entry = flow_cfg->flow_ent[flow->location];
850 flow->flow_spec = *fsp;
852 if (fsp->flow_type & FLOW_RSS)
853 flow->rss_ctx_id = nfc->rss_context;
855 err = otx2_add_flow_msg(pfvf, flow);
862 /* add the new flow installed to list */
864 otx2_add_flow_to_list(pfvf, flow);
865 flow_cfg->nr_flows++;
871 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
873 struct npc_delete_flow_req *req;
876 mutex_lock(&pfvf->mbox.lock);
877 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
879 mutex_unlock(&pfvf->mbox.lock);
887 /* Send message to AF */
888 err = otx2_sync_mbox_msg(&pfvf->mbox);
889 mutex_unlock(&pfvf->mbox.lock);
893 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
895 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
896 struct otx2_flow *flow;
899 if (location >= flow_cfg->ntuple_max_flows)
902 flow = otx2_find_flow(pfvf, location);
906 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
910 list_del(&flow->list);
912 flow_cfg->nr_flows--;
917 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
919 struct otx2_flow *flow, *tmp;
922 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
923 if (flow->rss_ctx_id != ctx_id)
925 err = otx2_remove_flow(pfvf, flow->location);
927 netdev_warn(pfvf->netdev,
928 "Can't delete the rule %d associated with this rss group err:%d",
929 flow->location, err);
933 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
935 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
936 struct npc_delete_flow_req *req;
937 struct otx2_flow *iter, *tmp;
940 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
943 mutex_lock(&pfvf->mbox.lock);
944 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
946 mutex_unlock(&pfvf->mbox.lock);
950 req->start = flow_cfg->flow_ent[0];
951 req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
952 err = otx2_sync_mbox_msg(&pfvf->mbox);
953 mutex_unlock(&pfvf->mbox.lock);
955 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
956 list_del(&iter->list);
958 flow_cfg->nr_flows--;
963 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
965 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
966 struct npc_mcam_free_entry_req *req;
967 struct otx2_flow *iter, *tmp;
970 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
973 /* remove all flows */
974 err = otx2_remove_flow_msg(pfvf, 0, true);
978 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
979 list_del(&iter->list);
981 flow_cfg->nr_flows--;
984 mutex_lock(&pfvf->mbox.lock);
985 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
987 mutex_unlock(&pfvf->mbox.lock);
992 /* Send message to AF to free MCAM entries */
993 err = otx2_sync_mbox_msg(&pfvf->mbox);
995 mutex_unlock(&pfvf->mbox.lock);
999 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1000 mutex_unlock(&pfvf->mbox.lock);
1005 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1007 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1008 struct npc_install_flow_req *req;
1011 mutex_lock(&pfvf->mbox.lock);
1012 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1014 mutex_unlock(&pfvf->mbox.lock);
1018 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1019 req->intf = NIX_INTF_RX;
1020 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1021 eth_broadcast_addr((u8 *)&req->mask.dmac);
1022 req->channel = pfvf->hw.rx_chan_base;
1023 req->op = NIX_RX_ACTION_DEFAULT;
1024 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1025 req->vtag0_valid = true;
1026 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1028 /* Send message to AF */
1029 err = otx2_sync_mbox_msg(&pfvf->mbox);
1030 mutex_unlock(&pfvf->mbox.lock);
1034 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1036 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1037 struct npc_delete_flow_req *req;
1040 mutex_lock(&pfvf->mbox.lock);
1041 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1043 mutex_unlock(&pfvf->mbox.lock);
1047 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1048 /* Send message to AF */
1049 err = otx2_sync_mbox_msg(&pfvf->mbox);
1050 mutex_unlock(&pfvf->mbox.lock);
1054 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1056 struct nix_vtag_config *req;
1057 struct mbox_msghdr *rsp_hdr;
1060 /* Dont have enough mcam entries */
1061 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1065 err = otx2_install_rxvlan_offload_flow(pf);
1069 err = otx2_delete_rxvlan_offload_flow(pf);
1074 mutex_lock(&pf->mbox.lock);
1075 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1077 mutex_unlock(&pf->mbox.lock);
1081 /* config strip, capture and size */
1082 req->vtag_size = VTAGSIZE_T4;
1083 req->cfg_type = 1; /* rx vlan cfg */
1084 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1085 req->rx.strip_vtag = enable;
1086 req->rx.capture_vtag = enable;
1088 err = otx2_sync_mbox_msg(&pf->mbox);
1090 mutex_unlock(&pf->mbox.lock);
1094 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1095 if (IS_ERR(rsp_hdr)) {
1096 mutex_unlock(&pf->mbox.lock);
1097 return PTR_ERR(rsp_hdr);
1100 mutex_unlock(&pf->mbox.lock);