clk: x86: Rename clk-lpt to more specific clk-lpss-atom
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / marvell / octeontx2 / nic / otx2_flows.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6
7 #include <net/ipv6.h>
8
9 #include "otx2_common.h"
10
11 #define OTX2_DEFAULT_ACTION     0x1
12
13 struct otx2_flow {
14         struct ethtool_rx_flow_spec flow_spec;
15         struct list_head list;
16         u32 location;
17         u16 entry;
18         bool is_vf;
19         u8 rss_ctx_id;
20         int vf;
21 };
22
23 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
24 {
25         devm_kfree(pfvf->dev, flow_cfg->flow_ent);
26         flow_cfg->flow_ent = NULL;
27         flow_cfg->ntuple_max_flows = 0;
28         flow_cfg->tc_max_flows = 0;
29 }
30
31 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
32 {
33         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
34         struct npc_mcam_free_entry_req *req;
35         int ent, err;
36
37         if (!flow_cfg->ntuple_max_flows)
38                 return 0;
39
40         mutex_lock(&pfvf->mbox.lock);
41         for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
42                 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
43                 if (!req)
44                         break;
45
46                 req->entry = flow_cfg->flow_ent[ent];
47
48                 /* Send message to AF to free MCAM entries */
49                 err = otx2_sync_mbox_msg(&pfvf->mbox);
50                 if (err)
51                         break;
52         }
53         mutex_unlock(&pfvf->mbox.lock);
54         otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
55         return 0;
56 }
57
58 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
59 {
60         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
61         struct npc_mcam_alloc_entry_req *req;
62         struct npc_mcam_alloc_entry_rsp *rsp;
63         int ent, allocated = 0;
64
65         /* Free current ones and allocate new ones with requested count */
66         otx2_free_ntuple_mcam_entries(pfvf);
67
68         if (!count)
69                 return 0;
70
71         flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
72                                                 sizeof(u16), GFP_KERNEL);
73         if (!flow_cfg->flow_ent)
74                 return -ENOMEM;
75
76         mutex_lock(&pfvf->mbox.lock);
77
78         /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
79          * can only be allocated.
80          */
81         while (allocated < count) {
82                 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
83                 if (!req)
84                         goto exit;
85
86                 req->contig = false;
87                 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
88                                 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
89                 req->priority = NPC_MCAM_HIGHER_PRIO;
90                 req->ref_entry = flow_cfg->def_ent[0];
91
92                 /* Send message to AF */
93                 if (otx2_sync_mbox_msg(&pfvf->mbox))
94                         goto exit;
95
96                 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
97                         (&pfvf->mbox.mbox, 0, &req->hdr);
98
99                 for (ent = 0; ent < rsp->count; ent++)
100                         flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
101
102                 allocated += rsp->count;
103
104                 /* If this request is not fulfilled, no need to send
105                  * further requests.
106                  */
107                 if (rsp->count != req->count)
108                         break;
109         }
110
111 exit:
112         mutex_unlock(&pfvf->mbox.lock);
113
114         flow_cfg->ntuple_offset = 0;
115         flow_cfg->ntuple_max_flows = allocated;
116         flow_cfg->tc_max_flows = allocated;
117
118         if (allocated != count)
119                 netdev_info(pfvf->netdev,
120                             "Unable to allocate %d MCAM entries for ntuple, got %d\n",
121                             count, allocated);
122
123         return allocated;
124 }
125
126 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
127 {
128         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
129         struct npc_mcam_alloc_entry_req *req;
130         struct npc_mcam_alloc_entry_rsp *rsp;
131         int vf_vlan_max_flows;
132         int ent, count;
133
134         vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
135         count = OTX2_MAX_UNICAST_FLOWS +
136                         OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
137
138         flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
139                                                sizeof(u16), GFP_KERNEL);
140         if (!flow_cfg->def_ent)
141                 return -ENOMEM;
142
143         mutex_lock(&pfvf->mbox.lock);
144
145         req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
146         if (!req) {
147                 mutex_unlock(&pfvf->mbox.lock);
148                 return -ENOMEM;
149         }
150
151         req->contig = false;
152         req->count = count;
153
154         /* Send message to AF */
155         if (otx2_sync_mbox_msg(&pfvf->mbox)) {
156                 mutex_unlock(&pfvf->mbox.lock);
157                 return -EINVAL;
158         }
159
160         rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
161                (&pfvf->mbox.mbox, 0, &req->hdr);
162
163         if (rsp->count != req->count) {
164                 netdev_info(pfvf->netdev,
165                             "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
166                 mutex_unlock(&pfvf->mbox.lock);
167                 devm_kfree(pfvf->dev, flow_cfg->def_ent);
168                 return 0;
169         }
170
171         for (ent = 0; ent < rsp->count; ent++)
172                 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
173
174         flow_cfg->vf_vlan_offset = 0;
175         flow_cfg->unicast_offset = vf_vlan_max_flows;
176         flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
177                                         OTX2_MAX_UNICAST_FLOWS;
178         pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
179         pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
180         pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
181
182         pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
183         mutex_unlock(&pfvf->mbox.lock);
184
185         /* Allocate entries for Ntuple filters */
186         count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
187         if (count <= 0) {
188                 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
189                 return 0;
190         }
191
192         pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
193         pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
194
195         return 0;
196 }
197
198 int otx2_mcam_flow_init(struct otx2_nic *pf)
199 {
200         int err;
201
202         pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
203                                     GFP_KERNEL);
204         if (!pf->flow_cfg)
205                 return -ENOMEM;
206
207         INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
208
209         err = otx2_alloc_mcam_entries(pf);
210         if (err)
211                 return err;
212
213         /* Check if MCAM entries are allocate or not */
214         if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
215                 return 0;
216
217         pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
218                                         * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
219         if (!pf->mac_table)
220                 return -ENOMEM;
221
222         return 0;
223 }
224
225 void otx2_mcam_flow_del(struct otx2_nic *pf)
226 {
227         otx2_destroy_mcam_flows(pf);
228 }
229
230 /*  On success adds mcam entry
231  *  On failure enable promisous mode
232  */
233 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
234 {
235         struct otx2_flow_config *flow_cfg = pf->flow_cfg;
236         struct npc_install_flow_req *req;
237         int err, i;
238
239         if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
240                 return -ENOMEM;
241
242         /* dont have free mcam entries or uc list is greater than alloted */
243         if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
244                 return -ENOMEM;
245
246         mutex_lock(&pf->mbox.lock);
247         req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
248         if (!req) {
249                 mutex_unlock(&pf->mbox.lock);
250                 return -ENOMEM;
251         }
252
253         /* unicast offset starts with 32 0..31 for ntuple */
254         for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
255                 if (pf->mac_table[i].inuse)
256                         continue;
257                 ether_addr_copy(pf->mac_table[i].addr, mac);
258                 pf->mac_table[i].inuse = true;
259                 pf->mac_table[i].mcam_entry =
260                         flow_cfg->def_ent[i + flow_cfg->unicast_offset];
261                 req->entry =  pf->mac_table[i].mcam_entry;
262                 break;
263         }
264
265         ether_addr_copy(req->packet.dmac, mac);
266         eth_broadcast_addr((u8 *)&req->mask.dmac);
267         req->features = BIT_ULL(NPC_DMAC);
268         req->channel = pf->hw.rx_chan_base;
269         req->intf = NIX_INTF_RX;
270         req->op = NIX_RX_ACTION_DEFAULT;
271         req->set_cntr = 1;
272
273         err = otx2_sync_mbox_msg(&pf->mbox);
274         mutex_unlock(&pf->mbox.lock);
275
276         return err;
277 }
278
279 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
280 {
281         struct otx2_nic *pf = netdev_priv(netdev);
282
283         return otx2_do_add_macfilter(pf, mac);
284 }
285
286 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
287                                        int *mcam_entry)
288 {
289         int i;
290
291         for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
292                 if (!pf->mac_table[i].inuse)
293                         continue;
294
295                 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
296                         *mcam_entry = pf->mac_table[i].mcam_entry;
297                         pf->mac_table[i].inuse = false;
298                         return true;
299                 }
300         }
301         return false;
302 }
303
304 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
305 {
306         struct otx2_nic *pf = netdev_priv(netdev);
307         struct npc_delete_flow_req *req;
308         int err, mcam_entry;
309
310         /* check does mcam entry exists for given mac */
311         if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
312                 return 0;
313
314         mutex_lock(&pf->mbox.lock);
315         req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
316         if (!req) {
317                 mutex_unlock(&pf->mbox.lock);
318                 return -ENOMEM;
319         }
320         req->entry = mcam_entry;
321         /* Send message to AF */
322         err = otx2_sync_mbox_msg(&pf->mbox);
323         mutex_unlock(&pf->mbox.lock);
324
325         return err;
326 }
327
328 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
329 {
330         struct otx2_flow *iter;
331
332         list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
333                 if (iter->location == location)
334                         return iter;
335         }
336
337         return NULL;
338 }
339
340 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
341 {
342         struct list_head *head = &pfvf->flow_cfg->flow_list;
343         struct otx2_flow *iter;
344
345         list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
346                 if (iter->location > flow->location)
347                         break;
348                 head = &iter->list;
349         }
350
351         list_add(&flow->list, head);
352 }
353
354 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
355                   u32 location)
356 {
357         struct otx2_flow *iter;
358
359         if (location >= pfvf->flow_cfg->ntuple_max_flows)
360                 return -EINVAL;
361
362         list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
363                 if (iter->location == location) {
364                         nfc->fs = iter->flow_spec;
365                         nfc->rss_context = iter->rss_ctx_id;
366                         return 0;
367                 }
368         }
369
370         return -ENOENT;
371 }
372
373 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
374                        u32 *rule_locs)
375 {
376         u32 rule_cnt = nfc->rule_cnt;
377         u32 location = 0;
378         int idx = 0;
379         int err = 0;
380
381         nfc->data = pfvf->flow_cfg->ntuple_max_flows;
382         while ((!err || err == -ENOENT) && idx < rule_cnt) {
383                 err = otx2_get_flow(pfvf, nfc, location);
384                 if (!err)
385                         rule_locs[idx++] = location;
386                 location++;
387         }
388         nfc->rule_cnt = rule_cnt;
389
390         return err;
391 }
392
393 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
394                                   struct npc_install_flow_req *req,
395                                   u32 flow_type)
396 {
397         struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
398         struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
399         struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
400         struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
401         struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
402         struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
403         struct flow_msg *pmask = &req->mask;
404         struct flow_msg *pkt = &req->packet;
405
406         switch (flow_type) {
407         case IP_USER_FLOW:
408                 if (ipv4_usr_mask->ip4src) {
409                         memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
410                                sizeof(pkt->ip4src));
411                         memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
412                                sizeof(pmask->ip4src));
413                         req->features |= BIT_ULL(NPC_SIP_IPV4);
414                 }
415                 if (ipv4_usr_mask->ip4dst) {
416                         memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
417                                sizeof(pkt->ip4dst));
418                         memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
419                                sizeof(pmask->ip4dst));
420                         req->features |= BIT_ULL(NPC_DIP_IPV4);
421                 }
422                 if (ipv4_usr_mask->tos) {
423                         pkt->tos = ipv4_usr_hdr->tos;
424                         pmask->tos = ipv4_usr_mask->tos;
425                         req->features |= BIT_ULL(NPC_TOS);
426                 }
427                 if (ipv4_usr_mask->proto) {
428                         switch (ipv4_usr_hdr->proto) {
429                         case IPPROTO_ICMP:
430                                 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
431                                 break;
432                         case IPPROTO_TCP:
433                                 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
434                                 break;
435                         case IPPROTO_UDP:
436                                 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
437                                 break;
438                         case IPPROTO_SCTP:
439                                 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
440                                 break;
441                         case IPPROTO_AH:
442                                 req->features |= BIT_ULL(NPC_IPPROTO_AH);
443                                 break;
444                         case IPPROTO_ESP:
445                                 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
446                                 break;
447                         default:
448                                 return -EOPNOTSUPP;
449                         }
450                 }
451                 pkt->etype = cpu_to_be16(ETH_P_IP);
452                 pmask->etype = cpu_to_be16(0xFFFF);
453                 req->features |= BIT_ULL(NPC_ETYPE);
454                 break;
455         case TCP_V4_FLOW:
456         case UDP_V4_FLOW:
457         case SCTP_V4_FLOW:
458                 pkt->etype = cpu_to_be16(ETH_P_IP);
459                 pmask->etype = cpu_to_be16(0xFFFF);
460                 req->features |= BIT_ULL(NPC_ETYPE);
461                 if (ipv4_l4_mask->ip4src) {
462                         memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
463                                sizeof(pkt->ip4src));
464                         memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
465                                sizeof(pmask->ip4src));
466                         req->features |= BIT_ULL(NPC_SIP_IPV4);
467                 }
468                 if (ipv4_l4_mask->ip4dst) {
469                         memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
470                                sizeof(pkt->ip4dst));
471                         memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
472                                sizeof(pmask->ip4dst));
473                         req->features |= BIT_ULL(NPC_DIP_IPV4);
474                 }
475                 if (ipv4_l4_mask->tos) {
476                         pkt->tos = ipv4_l4_hdr->tos;
477                         pmask->tos = ipv4_l4_mask->tos;
478                         req->features |= BIT_ULL(NPC_TOS);
479                 }
480                 if (ipv4_l4_mask->psrc) {
481                         memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
482                                sizeof(pkt->sport));
483                         memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
484                                sizeof(pmask->sport));
485                         if (flow_type == UDP_V4_FLOW)
486                                 req->features |= BIT_ULL(NPC_SPORT_UDP);
487                         else if (flow_type == TCP_V4_FLOW)
488                                 req->features |= BIT_ULL(NPC_SPORT_TCP);
489                         else
490                                 req->features |= BIT_ULL(NPC_SPORT_SCTP);
491                 }
492                 if (ipv4_l4_mask->pdst) {
493                         memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
494                                sizeof(pkt->dport));
495                         memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
496                                sizeof(pmask->dport));
497                         if (flow_type == UDP_V4_FLOW)
498                                 req->features |= BIT_ULL(NPC_DPORT_UDP);
499                         else if (flow_type == TCP_V4_FLOW)
500                                 req->features |= BIT_ULL(NPC_DPORT_TCP);
501                         else
502                                 req->features |= BIT_ULL(NPC_DPORT_SCTP);
503                 }
504                 if (flow_type == UDP_V4_FLOW)
505                         req->features |= BIT_ULL(NPC_IPPROTO_UDP);
506                 else if (flow_type == TCP_V4_FLOW)
507                         req->features |= BIT_ULL(NPC_IPPROTO_TCP);
508                 else
509                         req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
510                 break;
511         case AH_V4_FLOW:
512         case ESP_V4_FLOW:
513                 pkt->etype = cpu_to_be16(ETH_P_IP);
514                 pmask->etype = cpu_to_be16(0xFFFF);
515                 req->features |= BIT_ULL(NPC_ETYPE);
516                 if (ah_esp_mask->ip4src) {
517                         memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
518                                sizeof(pkt->ip4src));
519                         memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
520                                sizeof(pmask->ip4src));
521                         req->features |= BIT_ULL(NPC_SIP_IPV4);
522                 }
523                 if (ah_esp_mask->ip4dst) {
524                         memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
525                                sizeof(pkt->ip4dst));
526                         memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
527                                sizeof(pmask->ip4dst));
528                         req->features |= BIT_ULL(NPC_DIP_IPV4);
529                 }
530                 if (ah_esp_mask->tos) {
531                         pkt->tos = ah_esp_hdr->tos;
532                         pmask->tos = ah_esp_mask->tos;
533                         req->features |= BIT_ULL(NPC_TOS);
534                 }
535
536                 /* NPC profile doesn't extract AH/ESP header fields */
537                 if (ah_esp_mask->spi & ah_esp_hdr->spi)
538                         return -EOPNOTSUPP;
539
540                 if (flow_type == AH_V4_FLOW)
541                         req->features |= BIT_ULL(NPC_IPPROTO_AH);
542                 else
543                         req->features |= BIT_ULL(NPC_IPPROTO_ESP);
544                 break;
545         default:
546                 break;
547         }
548
549         return 0;
550 }
551
552 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
553                                   struct npc_install_flow_req *req,
554                                   u32 flow_type)
555 {
556         struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
557         struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
558         struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
559         struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
560         struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
561         struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
562         struct flow_msg *pmask = &req->mask;
563         struct flow_msg *pkt = &req->packet;
564
565         switch (flow_type) {
566         case IPV6_USER_FLOW:
567                 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
568                         memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
569                                sizeof(pkt->ip6src));
570                         memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
571                                sizeof(pmask->ip6src));
572                         req->features |= BIT_ULL(NPC_SIP_IPV6);
573                 }
574                 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
575                         memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
576                                sizeof(pkt->ip6dst));
577                         memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
578                                sizeof(pmask->ip6dst));
579                         req->features |= BIT_ULL(NPC_DIP_IPV6);
580                 }
581                 pkt->etype = cpu_to_be16(ETH_P_IPV6);
582                 pmask->etype = cpu_to_be16(0xFFFF);
583                 req->features |= BIT_ULL(NPC_ETYPE);
584                 break;
585         case TCP_V6_FLOW:
586         case UDP_V6_FLOW:
587         case SCTP_V6_FLOW:
588                 pkt->etype = cpu_to_be16(ETH_P_IPV6);
589                 pmask->etype = cpu_to_be16(0xFFFF);
590                 req->features |= BIT_ULL(NPC_ETYPE);
591                 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
592                         memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
593                                sizeof(pkt->ip6src));
594                         memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
595                                sizeof(pmask->ip6src));
596                         req->features |= BIT_ULL(NPC_SIP_IPV6);
597                 }
598                 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
599                         memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
600                                sizeof(pkt->ip6dst));
601                         memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
602                                sizeof(pmask->ip6dst));
603                         req->features |= BIT_ULL(NPC_DIP_IPV6);
604                 }
605                 if (ipv6_l4_mask->psrc) {
606                         memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
607                                sizeof(pkt->sport));
608                         memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
609                                sizeof(pmask->sport));
610                         if (flow_type == UDP_V6_FLOW)
611                                 req->features |= BIT_ULL(NPC_SPORT_UDP);
612                         else if (flow_type == TCP_V6_FLOW)
613                                 req->features |= BIT_ULL(NPC_SPORT_TCP);
614                         else
615                                 req->features |= BIT_ULL(NPC_SPORT_SCTP);
616                 }
617                 if (ipv6_l4_mask->pdst) {
618                         memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
619                                sizeof(pkt->dport));
620                         memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
621                                sizeof(pmask->dport));
622                         if (flow_type == UDP_V6_FLOW)
623                                 req->features |= BIT_ULL(NPC_DPORT_UDP);
624                         else if (flow_type == TCP_V6_FLOW)
625                                 req->features |= BIT_ULL(NPC_DPORT_TCP);
626                         else
627                                 req->features |= BIT_ULL(NPC_DPORT_SCTP);
628                 }
629                 if (flow_type == UDP_V6_FLOW)
630                         req->features |= BIT_ULL(NPC_IPPROTO_UDP);
631                 else if (flow_type == TCP_V6_FLOW)
632                         req->features |= BIT_ULL(NPC_IPPROTO_TCP);
633                 else
634                         req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
635                 break;
636         case AH_V6_FLOW:
637         case ESP_V6_FLOW:
638                 pkt->etype = cpu_to_be16(ETH_P_IPV6);
639                 pmask->etype = cpu_to_be16(0xFFFF);
640                 req->features |= BIT_ULL(NPC_ETYPE);
641                 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
642                         memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
643                                sizeof(pkt->ip6src));
644                         memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
645                                sizeof(pmask->ip6src));
646                         req->features |= BIT_ULL(NPC_SIP_IPV6);
647                 }
648                 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
649                         memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
650                                sizeof(pkt->ip6dst));
651                         memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
652                                sizeof(pmask->ip6dst));
653                         req->features |= BIT_ULL(NPC_DIP_IPV6);
654                 }
655
656                 /* NPC profile doesn't extract AH/ESP header fields */
657                 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
658                     (ah_esp_mask->tclass & ah_esp_mask->tclass))
659                         return -EOPNOTSUPP;
660
661                 if (flow_type == AH_V6_FLOW)
662                         req->features |= BIT_ULL(NPC_IPPROTO_AH);
663                 else
664                         req->features |= BIT_ULL(NPC_IPPROTO_ESP);
665                 break;
666         default:
667                 break;
668         }
669
670         return 0;
671 }
672
673 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
674                               struct npc_install_flow_req *req)
675 {
676         struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
677         struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
678         struct flow_msg *pmask = &req->mask;
679         struct flow_msg *pkt = &req->packet;
680         u32 flow_type;
681         int ret;
682
683         flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
684         switch (flow_type) {
685         /* bits not set in mask are don't care */
686         case ETHER_FLOW:
687                 if (!is_zero_ether_addr(eth_mask->h_source)) {
688                         ether_addr_copy(pkt->smac, eth_hdr->h_source);
689                         ether_addr_copy(pmask->smac, eth_mask->h_source);
690                         req->features |= BIT_ULL(NPC_SMAC);
691                 }
692                 if (!is_zero_ether_addr(eth_mask->h_dest)) {
693                         ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
694                         ether_addr_copy(pmask->dmac, eth_mask->h_dest);
695                         req->features |= BIT_ULL(NPC_DMAC);
696                 }
697                 if (eth_mask->h_proto) {
698                         memcpy(&pkt->etype, &eth_hdr->h_proto,
699                                sizeof(pkt->etype));
700                         memcpy(&pmask->etype, &eth_mask->h_proto,
701                                sizeof(pmask->etype));
702                         req->features |= BIT_ULL(NPC_ETYPE);
703                 }
704                 break;
705         case IP_USER_FLOW:
706         case TCP_V4_FLOW:
707         case UDP_V4_FLOW:
708         case SCTP_V4_FLOW:
709         case AH_V4_FLOW:
710         case ESP_V4_FLOW:
711                 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
712                 if (ret)
713                         return ret;
714                 break;
715         case IPV6_USER_FLOW:
716         case TCP_V6_FLOW:
717         case UDP_V6_FLOW:
718         case SCTP_V6_FLOW:
719         case AH_V6_FLOW:
720         case ESP_V6_FLOW:
721                 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
722                 if (ret)
723                         return ret;
724                 break;
725         default:
726                 return -EOPNOTSUPP;
727         }
728         if (fsp->flow_type & FLOW_EXT) {
729                 if (fsp->m_ext.vlan_etype)
730                         return -EINVAL;
731                 if (fsp->m_ext.vlan_tci) {
732                         if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
733                                 return -EINVAL;
734                         if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
735                                 return -EINVAL;
736
737                         memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
738                                sizeof(pkt->vlan_tci));
739                         memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
740                                sizeof(pmask->vlan_tci));
741                         req->features |= BIT_ULL(NPC_OUTER_VID);
742                 }
743
744                 /* Not Drop/Direct to queue but use action in default entry */
745                 if (fsp->m_ext.data[1] &&
746                     fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
747                         req->op = NIX_RX_ACTION_DEFAULT;
748         }
749
750         if (fsp->flow_type & FLOW_MAC_EXT &&
751             !is_zero_ether_addr(fsp->m_ext.h_dest)) {
752                 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
753                 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
754                 req->features |= BIT_ULL(NPC_DMAC);
755         }
756
757         if (!req->features)
758                 return -EOPNOTSUPP;
759
760         return 0;
761 }
762
763 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
764 {
765         u64 ring_cookie = flow->flow_spec.ring_cookie;
766         struct npc_install_flow_req *req;
767         int err, vf = 0;
768
769         mutex_lock(&pfvf->mbox.lock);
770         req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
771         if (!req) {
772                 mutex_unlock(&pfvf->mbox.lock);
773                 return -ENOMEM;
774         }
775
776         err = otx2_prepare_flow_request(&flow->flow_spec, req);
777         if (err) {
778                 /* free the allocated msg above */
779                 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
780                 mutex_unlock(&pfvf->mbox.lock);
781                 return err;
782         }
783
784         req->entry = flow->entry;
785         req->intf = NIX_INTF_RX;
786         req->set_cntr = 1;
787         req->channel = pfvf->hw.rx_chan_base;
788         if (ring_cookie == RX_CLS_FLOW_DISC) {
789                 req->op = NIX_RX_ACTIONOP_DROP;
790         } else {
791                 /* change to unicast only if action of default entry is not
792                  * requested by user
793                  */
794                 if (flow->flow_spec.flow_type & FLOW_RSS) {
795                         req->op = NIX_RX_ACTIONOP_RSS;
796                         req->index = flow->rss_ctx_id;
797                 } else {
798                         req->op = NIX_RX_ACTIONOP_UCAST;
799                         req->index = ethtool_get_flow_spec_ring(ring_cookie);
800                 }
801                 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
802                 if (vf > pci_num_vf(pfvf->pdev)) {
803                         mutex_unlock(&pfvf->mbox.lock);
804                         return -EINVAL;
805                 }
806         }
807
808         /* ethtool ring_cookie has (VF + 1) for VF */
809         if (vf) {
810                 req->vf = vf;
811                 flow->is_vf = true;
812                 flow->vf = vf;
813         }
814
815         /* Send message to AF */
816         err = otx2_sync_mbox_msg(&pfvf->mbox);
817         mutex_unlock(&pfvf->mbox.lock);
818         return err;
819 }
820
821 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
822 {
823         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
824         struct ethtool_rx_flow_spec *fsp = &nfc->fs;
825         struct otx2_flow *flow;
826         bool new = false;
827         u32 ring;
828         int err;
829
830         ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
831         if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
832                 return -ENOMEM;
833
834         if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
835                 return -EINVAL;
836
837         if (fsp->location >= flow_cfg->ntuple_max_flows)
838                 return -EINVAL;
839
840         flow = otx2_find_flow(pfvf, fsp->location);
841         if (!flow) {
842                 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
843                 if (!flow)
844                         return -ENOMEM;
845                 flow->location = fsp->location;
846                 flow->entry = flow_cfg->flow_ent[flow->location];
847                 new = true;
848         }
849         /* struct copy */
850         flow->flow_spec = *fsp;
851
852         if (fsp->flow_type & FLOW_RSS)
853                 flow->rss_ctx_id = nfc->rss_context;
854
855         err = otx2_add_flow_msg(pfvf, flow);
856         if (err) {
857                 if (new)
858                         kfree(flow);
859                 return err;
860         }
861
862         /* add the new flow installed to list */
863         if (new) {
864                 otx2_add_flow_to_list(pfvf, flow);
865                 flow_cfg->nr_flows++;
866         }
867
868         return 0;
869 }
870
871 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
872 {
873         struct npc_delete_flow_req *req;
874         int err;
875
876         mutex_lock(&pfvf->mbox.lock);
877         req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
878         if (!req) {
879                 mutex_unlock(&pfvf->mbox.lock);
880                 return -ENOMEM;
881         }
882
883         req->entry = entry;
884         if (all)
885                 req->all = 1;
886
887         /* Send message to AF */
888         err = otx2_sync_mbox_msg(&pfvf->mbox);
889         mutex_unlock(&pfvf->mbox.lock);
890         return err;
891 }
892
893 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
894 {
895         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
896         struct otx2_flow *flow;
897         int err;
898
899         if (location >= flow_cfg->ntuple_max_flows)
900                 return -EINVAL;
901
902         flow = otx2_find_flow(pfvf, location);
903         if (!flow)
904                 return -ENOENT;
905
906         err = otx2_remove_flow_msg(pfvf, flow->entry, false);
907         if (err)
908                 return err;
909
910         list_del(&flow->list);
911         kfree(flow);
912         flow_cfg->nr_flows--;
913
914         return 0;
915 }
916
917 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
918 {
919         struct otx2_flow *flow, *tmp;
920         int err;
921
922         list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
923                 if (flow->rss_ctx_id != ctx_id)
924                         continue;
925                 err = otx2_remove_flow(pfvf, flow->location);
926                 if (err)
927                         netdev_warn(pfvf->netdev,
928                                     "Can't delete the rule %d associated with this rss group err:%d",
929                                     flow->location, err);
930         }
931 }
932
933 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
934 {
935         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
936         struct npc_delete_flow_req *req;
937         struct otx2_flow *iter, *tmp;
938         int err;
939
940         if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
941                 return 0;
942
943         mutex_lock(&pfvf->mbox.lock);
944         req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
945         if (!req) {
946                 mutex_unlock(&pfvf->mbox.lock);
947                 return -ENOMEM;
948         }
949
950         req->start = flow_cfg->flow_ent[0];
951         req->end   = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
952         err = otx2_sync_mbox_msg(&pfvf->mbox);
953         mutex_unlock(&pfvf->mbox.lock);
954
955         list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
956                 list_del(&iter->list);
957                 kfree(iter);
958                 flow_cfg->nr_flows--;
959         }
960         return err;
961 }
962
963 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
964 {
965         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
966         struct npc_mcam_free_entry_req *req;
967         struct otx2_flow *iter, *tmp;
968         int err;
969
970         if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
971                 return 0;
972
973         /* remove all flows */
974         err = otx2_remove_flow_msg(pfvf, 0, true);
975         if (err)
976                 return err;
977
978         list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
979                 list_del(&iter->list);
980                 kfree(iter);
981                 flow_cfg->nr_flows--;
982         }
983
984         mutex_lock(&pfvf->mbox.lock);
985         req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
986         if (!req) {
987                 mutex_unlock(&pfvf->mbox.lock);
988                 return -ENOMEM;
989         }
990
991         req->all = 1;
992         /* Send message to AF to free MCAM entries */
993         err = otx2_sync_mbox_msg(&pfvf->mbox);
994         if (err) {
995                 mutex_unlock(&pfvf->mbox.lock);
996                 return err;
997         }
998
999         pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1000         mutex_unlock(&pfvf->mbox.lock);
1001
1002         return 0;
1003 }
1004
1005 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1006 {
1007         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1008         struct npc_install_flow_req *req;
1009         int err;
1010
1011         mutex_lock(&pfvf->mbox.lock);
1012         req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1013         if (!req) {
1014                 mutex_unlock(&pfvf->mbox.lock);
1015                 return -ENOMEM;
1016         }
1017
1018         req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1019         req->intf = NIX_INTF_RX;
1020         ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1021         eth_broadcast_addr((u8 *)&req->mask.dmac);
1022         req->channel = pfvf->hw.rx_chan_base;
1023         req->op = NIX_RX_ACTION_DEFAULT;
1024         req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1025         req->vtag0_valid = true;
1026         req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1027
1028         /* Send message to AF */
1029         err = otx2_sync_mbox_msg(&pfvf->mbox);
1030         mutex_unlock(&pfvf->mbox.lock);
1031         return err;
1032 }
1033
1034 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1035 {
1036         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1037         struct npc_delete_flow_req *req;
1038         int err;
1039
1040         mutex_lock(&pfvf->mbox.lock);
1041         req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1042         if (!req) {
1043                 mutex_unlock(&pfvf->mbox.lock);
1044                 return -ENOMEM;
1045         }
1046
1047         req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1048         /* Send message to AF */
1049         err = otx2_sync_mbox_msg(&pfvf->mbox);
1050         mutex_unlock(&pfvf->mbox.lock);
1051         return err;
1052 }
1053
1054 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1055 {
1056         struct nix_vtag_config *req;
1057         struct mbox_msghdr *rsp_hdr;
1058         int err;
1059
1060         /* Dont have enough mcam entries */
1061         if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1062                 return -ENOMEM;
1063
1064         if (enable) {
1065                 err = otx2_install_rxvlan_offload_flow(pf);
1066                 if (err)
1067                         return err;
1068         } else {
1069                 err = otx2_delete_rxvlan_offload_flow(pf);
1070                 if (err)
1071                         return err;
1072         }
1073
1074         mutex_lock(&pf->mbox.lock);
1075         req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1076         if (!req) {
1077                 mutex_unlock(&pf->mbox.lock);
1078                 return -ENOMEM;
1079         }
1080
1081         /* config strip, capture and size */
1082         req->vtag_size = VTAGSIZE_T4;
1083         req->cfg_type = 1; /* rx vlan cfg */
1084         req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1085         req->rx.strip_vtag = enable;
1086         req->rx.capture_vtag = enable;
1087
1088         err = otx2_sync_mbox_msg(&pf->mbox);
1089         if (err) {
1090                 mutex_unlock(&pf->mbox.lock);
1091                 return err;
1092         }
1093
1094         rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1095         if (IS_ERR(rsp_hdr)) {
1096                 mutex_unlock(&pf->mbox.lock);
1097                 return PTR_ERR(rsp_hdr);
1098         }
1099
1100         mutex_unlock(&pf->mbox.lock);
1101         return rsp_hdr->rc;
1102 }