net: stmmac: xgmac: fix a typo of register name in DPP safety handling
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_tc.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac TC Handling (HW only)
5  */
6
7 #include <net/pkt_cls.h>
8 #include <net/tc_act/tc_gact.h>
9 #include "common.h"
10 #include "dwmac4.h"
11 #include "dwmac5.h"
12 #include "stmmac.h"
13
14 static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
15 {
16         memset(entry, 0, sizeof(*entry));
17         entry->in_use = true;
18         entry->is_last = true;
19         entry->is_frag = false;
20         entry->prio = ~0x0;
21         entry->handle = 0;
22         entry->val.match_data = 0x0;
23         entry->val.match_en = 0x0;
24         entry->val.af = 1;
25         entry->val.dma_ch_no = 0x0;
26 }
27
28 static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29                                              struct tc_cls_u32_offload *cls,
30                                              bool free)
31 {
32         struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33         u32 loc = cls->knode.handle;
34         int i;
35
36         for (i = 0; i < priv->tc_entries_max; i++) {
37                 entry = &priv->tc_entries[i];
38                 if (!entry->in_use && !first && free)
39                         first = entry;
40                 if ((entry->handle == loc) && !free && !entry->is_frag)
41                         dup = entry;
42         }
43
44         if (dup)
45                 return dup;
46         if (first) {
47                 first->handle = loc;
48                 first->in_use = true;
49
50                 /* Reset HW values */
51                 memset(&first->val, 0, sizeof(first->val));
52         }
53
54         return first;
55 }
56
57 static int tc_fill_actions(struct stmmac_tc_entry *entry,
58                            struct stmmac_tc_entry *frag,
59                            struct tc_cls_u32_offload *cls)
60 {
61         struct stmmac_tc_entry *action_entry = entry;
62         const struct tc_action *act;
63         struct tcf_exts *exts;
64         int i;
65
66         exts = cls->knode.exts;
67         if (!tcf_exts_has_actions(exts))
68                 return -EINVAL;
69         if (frag)
70                 action_entry = frag;
71
72         tcf_exts_for_each_action(i, act, exts) {
73                 /* Accept */
74                 if (is_tcf_gact_ok(act)) {
75                         action_entry->val.af = 1;
76                         break;
77                 }
78                 /* Drop */
79                 if (is_tcf_gact_shot(act)) {
80                         action_entry->val.rf = 1;
81                         break;
82                 }
83
84                 /* Unsupported */
85                 return -EINVAL;
86         }
87
88         return 0;
89 }
90
91 static int tc_fill_entry(struct stmmac_priv *priv,
92                          struct tc_cls_u32_offload *cls)
93 {
94         struct stmmac_tc_entry *entry, *frag = NULL;
95         struct tc_u32_sel *sel = cls->knode.sel;
96         u32 off, data, mask, real_off, rem;
97         u32 prio = cls->common.prio << 16;
98         int ret;
99
100         /* Only 1 match per entry */
101         if (sel->nkeys <= 0 || sel->nkeys > 1)
102                 return -EINVAL;
103
104         off = sel->keys[0].off << sel->offshift;
105         data = sel->keys[0].val;
106         mask = sel->keys[0].mask;
107
108         switch (ntohs(cls->common.protocol)) {
109         case ETH_P_ALL:
110                 break;
111         case ETH_P_IP:
112                 off += ETH_HLEN;
113                 break;
114         default:
115                 return -EINVAL;
116         }
117
118         if (off > priv->tc_off_max)
119                 return -EINVAL;
120
121         real_off = off / 4;
122         rem = off % 4;
123
124         entry = tc_find_entry(priv, cls, true);
125         if (!entry)
126                 return -EINVAL;
127
128         if (rem) {
129                 frag = tc_find_entry(priv, cls, true);
130                 if (!frag) {
131                         ret = -EINVAL;
132                         goto err_unuse;
133                 }
134
135                 entry->frag_ptr = frag;
136                 entry->val.match_en = (mask << (rem * 8)) &
137                         GENMASK(31, rem * 8);
138                 entry->val.match_data = (data << (rem * 8)) &
139                         GENMASK(31, rem * 8);
140                 entry->val.frame_offset = real_off;
141                 entry->prio = prio;
142
143                 frag->val.match_en = (mask >> (rem * 8)) &
144                         GENMASK(rem * 8 - 1, 0);
145                 frag->val.match_data = (data >> (rem * 8)) &
146                         GENMASK(rem * 8 - 1, 0);
147                 frag->val.frame_offset = real_off + 1;
148                 frag->prio = prio;
149                 frag->is_frag = true;
150         } else {
151                 entry->frag_ptr = NULL;
152                 entry->val.match_en = mask;
153                 entry->val.match_data = data;
154                 entry->val.frame_offset = real_off;
155                 entry->prio = prio;
156         }
157
158         ret = tc_fill_actions(entry, frag, cls);
159         if (ret)
160                 goto err_unuse;
161
162         return 0;
163
164 err_unuse:
165         if (frag)
166                 frag->in_use = false;
167         entry->in_use = false;
168         return ret;
169 }
170
171 static void tc_unfill_entry(struct stmmac_priv *priv,
172                             struct tc_cls_u32_offload *cls)
173 {
174         struct stmmac_tc_entry *entry;
175
176         entry = tc_find_entry(priv, cls, false);
177         if (!entry)
178                 return;
179
180         entry->in_use = false;
181         if (entry->frag_ptr) {
182                 entry = entry->frag_ptr;
183                 entry->is_frag = false;
184                 entry->in_use = false;
185         }
186 }
187
188 static int tc_config_knode(struct stmmac_priv *priv,
189                            struct tc_cls_u32_offload *cls)
190 {
191         int ret;
192
193         ret = tc_fill_entry(priv, cls);
194         if (ret)
195                 return ret;
196
197         ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198                         priv->tc_entries_max);
199         if (ret)
200                 goto err_unfill;
201
202         return 0;
203
204 err_unfill:
205         tc_unfill_entry(priv, cls);
206         return ret;
207 }
208
209 static int tc_delete_knode(struct stmmac_priv *priv,
210                            struct tc_cls_u32_offload *cls)
211 {
212         /* Set entry and fragments as not used */
213         tc_unfill_entry(priv, cls);
214
215         return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
216                                  priv->tc_entries_max);
217 }
218
219 static int tc_setup_cls_u32(struct stmmac_priv *priv,
220                             struct tc_cls_u32_offload *cls)
221 {
222         switch (cls->command) {
223         case TC_CLSU32_REPLACE_KNODE:
224                 tc_unfill_entry(priv, cls);
225                 fallthrough;
226         case TC_CLSU32_NEW_KNODE:
227                 return tc_config_knode(priv, cls);
228         case TC_CLSU32_DELETE_KNODE:
229                 return tc_delete_knode(priv, cls);
230         default:
231                 return -EOPNOTSUPP;
232         }
233 }
234
235 static int tc_rfs_init(struct stmmac_priv *priv)
236 {
237         int i;
238
239         priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
240         priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
241         priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
242
243         for (i = 0; i < STMMAC_RFS_T_MAX; i++)
244                 priv->rfs_entries_total += priv->rfs_entries_max[i];
245
246         priv->rfs_entries = devm_kcalloc(priv->device,
247                                          priv->rfs_entries_total,
248                                          sizeof(*priv->rfs_entries),
249                                          GFP_KERNEL);
250         if (!priv->rfs_entries)
251                 return -ENOMEM;
252
253         dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
254                  priv->rfs_entries_total);
255
256         return 0;
257 }
258
259 static int tc_init(struct stmmac_priv *priv)
260 {
261         struct dma_features *dma_cap = &priv->dma_cap;
262         unsigned int count;
263         int ret, i;
264
265         if (dma_cap->l3l4fnum) {
266                 priv->flow_entries_max = dma_cap->l3l4fnum;
267                 priv->flow_entries = devm_kcalloc(priv->device,
268                                                   dma_cap->l3l4fnum,
269                                                   sizeof(*priv->flow_entries),
270                                                   GFP_KERNEL);
271                 if (!priv->flow_entries)
272                         return -ENOMEM;
273
274                 for (i = 0; i < priv->flow_entries_max; i++)
275                         priv->flow_entries[i].idx = i;
276
277                 dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
278                          priv->flow_entries_max);
279         }
280
281         ret = tc_rfs_init(priv);
282         if (ret)
283                 return -ENOMEM;
284
285         if (!priv->plat->fpe_cfg) {
286                 priv->plat->fpe_cfg = devm_kzalloc(priv->device,
287                                                    sizeof(*priv->plat->fpe_cfg),
288                                                    GFP_KERNEL);
289                 if (!priv->plat->fpe_cfg)
290                         return -ENOMEM;
291         } else {
292                 memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
293         }
294
295         /* Fail silently as we can still use remaining features, e.g. CBS */
296         if (!dma_cap->frpsel)
297                 return 0;
298
299         switch (dma_cap->frpbs) {
300         case 0x0:
301                 priv->tc_off_max = 64;
302                 break;
303         case 0x1:
304                 priv->tc_off_max = 128;
305                 break;
306         case 0x2:
307                 priv->tc_off_max = 256;
308                 break;
309         default:
310                 return -EINVAL;
311         }
312
313         switch (dma_cap->frpes) {
314         case 0x0:
315                 count = 64;
316                 break;
317         case 0x1:
318                 count = 128;
319                 break;
320         case 0x2:
321                 count = 256;
322                 break;
323         default:
324                 return -EINVAL;
325         }
326
327         /* Reserve one last filter which lets all pass */
328         priv->tc_entries_max = count;
329         priv->tc_entries = devm_kcalloc(priv->device,
330                         count, sizeof(*priv->tc_entries), GFP_KERNEL);
331         if (!priv->tc_entries)
332                 return -ENOMEM;
333
334         tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
335
336         dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
337                         priv->tc_entries_max, priv->tc_off_max);
338
339         return 0;
340 }
341
342 static int tc_setup_cbs(struct stmmac_priv *priv,
343                         struct tc_cbs_qopt_offload *qopt)
344 {
345         u32 tx_queues_count = priv->plat->tx_queues_to_use;
346         u32 queue = qopt->queue;
347         u32 ptr, speed_div;
348         u32 mode_to_use;
349         u64 value;
350         int ret;
351
352         /* Queue 0 is not AVB capable */
353         if (queue <= 0 || queue >= tx_queues_count)
354                 return -EINVAL;
355         if (!priv->dma_cap.av)
356                 return -EOPNOTSUPP;
357
358         /* Port Transmit Rate and Speed Divider */
359         switch (priv->speed) {
360         case SPEED_10000:
361                 ptr = 32;
362                 speed_div = 10000000;
363                 break;
364         case SPEED_5000:
365                 ptr = 32;
366                 speed_div = 5000000;
367                 break;
368         case SPEED_2500:
369                 ptr = 8;
370                 speed_div = 2500000;
371                 break;
372         case SPEED_1000:
373                 ptr = 8;
374                 speed_div = 1000000;
375                 break;
376         case SPEED_100:
377                 ptr = 4;
378                 speed_div = 100000;
379                 break;
380         default:
381                 return -EOPNOTSUPP;
382         }
383
384         mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
385         if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
386                 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
387                 if (ret)
388                         return ret;
389
390                 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
391         } else if (!qopt->enable) {
392                 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
393                                        MTL_QUEUE_DCB);
394                 if (ret)
395                         return ret;
396
397                 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
398         }
399
400         /* Final adjustments for HW */
401         value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
402         priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
403
404         value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
405         priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
406
407         value = qopt->hicredit * 1024ll * 8;
408         priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
409
410         value = qopt->locredit * 1024ll * 8;
411         priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
412
413         ret = stmmac_config_cbs(priv, priv->hw,
414                                 priv->plat->tx_queues_cfg[queue].send_slope,
415                                 priv->plat->tx_queues_cfg[queue].idle_slope,
416                                 priv->plat->tx_queues_cfg[queue].high_credit,
417                                 priv->plat->tx_queues_cfg[queue].low_credit,
418                                 queue);
419         if (ret)
420                 return ret;
421
422         dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
423                         queue, qopt->sendslope, qopt->idleslope,
424                         qopt->hicredit, qopt->locredit);
425         return 0;
426 }
427
428 static int tc_parse_flow_actions(struct stmmac_priv *priv,
429                                  struct flow_action *action,
430                                  struct stmmac_flow_entry *entry,
431                                  struct netlink_ext_ack *extack)
432 {
433         struct flow_action_entry *act;
434         int i;
435
436         if (!flow_action_has_entries(action))
437                 return -EINVAL;
438
439         if (!flow_action_basic_hw_stats_check(action, extack))
440                 return -EOPNOTSUPP;
441
442         flow_action_for_each(i, act, action) {
443                 switch (act->id) {
444                 case FLOW_ACTION_DROP:
445                         entry->action |= STMMAC_FLOW_ACTION_DROP;
446                         return 0;
447                 default:
448                         break;
449                 }
450         }
451
452         /* Nothing to do, maybe inverse filter ? */
453         return 0;
454 }
455
456 #define ETHER_TYPE_FULL_MASK    cpu_to_be16(~0)
457
458 static int tc_add_basic_flow(struct stmmac_priv *priv,
459                              struct flow_cls_offload *cls,
460                              struct stmmac_flow_entry *entry)
461 {
462         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
463         struct flow_dissector *dissector = rule->match.dissector;
464         struct flow_match_basic match;
465
466         /* Nothing to do here */
467         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
468                 return -EINVAL;
469
470         flow_rule_match_basic(rule, &match);
471
472         entry->ip_proto = match.key->ip_proto;
473         return 0;
474 }
475
476 static int tc_add_ip4_flow(struct stmmac_priv *priv,
477                            struct flow_cls_offload *cls,
478                            struct stmmac_flow_entry *entry)
479 {
480         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
481         struct flow_dissector *dissector = rule->match.dissector;
482         bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
483         struct flow_match_ipv4_addrs match;
484         u32 hw_match;
485         int ret;
486
487         /* Nothing to do here */
488         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
489                 return -EINVAL;
490
491         flow_rule_match_ipv4_addrs(rule, &match);
492         hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
493         if (hw_match) {
494                 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
495                                               false, true, inv, hw_match);
496                 if (ret)
497                         return ret;
498         }
499
500         hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
501         if (hw_match) {
502                 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
503                                               false, false, inv, hw_match);
504                 if (ret)
505                         return ret;
506         }
507
508         return 0;
509 }
510
511 static int tc_add_ports_flow(struct stmmac_priv *priv,
512                              struct flow_cls_offload *cls,
513                              struct stmmac_flow_entry *entry)
514 {
515         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
516         struct flow_dissector *dissector = rule->match.dissector;
517         bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
518         struct flow_match_ports match;
519         u32 hw_match;
520         bool is_udp;
521         int ret;
522
523         /* Nothing to do here */
524         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
525                 return -EINVAL;
526
527         switch (entry->ip_proto) {
528         case IPPROTO_TCP:
529                 is_udp = false;
530                 break;
531         case IPPROTO_UDP:
532                 is_udp = true;
533                 break;
534         default:
535                 return -EINVAL;
536         }
537
538         flow_rule_match_ports(rule, &match);
539
540         hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
541         if (hw_match) {
542                 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
543                                               is_udp, true, inv, hw_match);
544                 if (ret)
545                         return ret;
546         }
547
548         hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
549         if (hw_match) {
550                 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
551                                               is_udp, false, inv, hw_match);
552                 if (ret)
553                         return ret;
554         }
555
556         entry->is_l4 = true;
557         return 0;
558 }
559
560 static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
561                                               struct flow_cls_offload *cls,
562                                               bool get_free)
563 {
564         int i;
565
566         for (i = 0; i < priv->flow_entries_max; i++) {
567                 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
568
569                 if (entry->cookie == cls->cookie)
570                         return entry;
571                 if (get_free && (entry->in_use == false))
572                         return entry;
573         }
574
575         return NULL;
576 }
577
578 static struct {
579         int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
580                   struct stmmac_flow_entry *entry);
581 } tc_flow_parsers[] = {
582         { .fn = tc_add_basic_flow },
583         { .fn = tc_add_ip4_flow },
584         { .fn = tc_add_ports_flow },
585 };
586
587 static int tc_add_flow(struct stmmac_priv *priv,
588                        struct flow_cls_offload *cls)
589 {
590         struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
591         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
592         int i, ret;
593
594         if (!entry) {
595                 entry = tc_find_flow(priv, cls, true);
596                 if (!entry)
597                         return -ENOENT;
598         }
599
600         ret = tc_parse_flow_actions(priv, &rule->action, entry,
601                                     cls->common.extack);
602         if (ret)
603                 return ret;
604
605         for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
606                 ret = tc_flow_parsers[i].fn(priv, cls, entry);
607                 if (!ret)
608                         entry->in_use = true;
609         }
610
611         if (!entry->in_use)
612                 return -EINVAL;
613
614         entry->cookie = cls->cookie;
615         return 0;
616 }
617
618 static int tc_del_flow(struct stmmac_priv *priv,
619                        struct flow_cls_offload *cls)
620 {
621         struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
622         int ret;
623
624         if (!entry || !entry->in_use)
625                 return -ENOENT;
626
627         if (entry->is_l4) {
628                 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
629                                               false, false, false, 0);
630         } else {
631                 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
632                                               false, false, false, 0);
633         }
634
635         entry->in_use = false;
636         entry->cookie = 0;
637         entry->is_l4 = false;
638         return ret;
639 }
640
641 static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
642                                             struct flow_cls_offload *cls,
643                                             bool get_free)
644 {
645         int i;
646
647         for (i = 0; i < priv->rfs_entries_total; i++) {
648                 struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
649
650                 if (entry->cookie == cls->cookie)
651                         return entry;
652                 if (get_free && entry->in_use == false)
653                         return entry;
654         }
655
656         return NULL;
657 }
658
659 #define VLAN_PRIO_FULL_MASK (0x07)
660
661 static int tc_add_vlan_flow(struct stmmac_priv *priv,
662                             struct flow_cls_offload *cls)
663 {
664         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
665         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
666         struct flow_dissector *dissector = rule->match.dissector;
667         int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
668         struct flow_match_vlan match;
669
670         if (!entry) {
671                 entry = tc_find_rfs(priv, cls, true);
672                 if (!entry)
673                         return -ENOENT;
674         }
675
676         if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
677             priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
678                 return -ENOENT;
679
680         /* Nothing to do here */
681         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
682                 return -EINVAL;
683
684         if (tc < 0) {
685                 netdev_err(priv->dev, "Invalid traffic class\n");
686                 return -EINVAL;
687         }
688
689         flow_rule_match_vlan(rule, &match);
690
691         if (match.mask->vlan_priority) {
692                 u32 prio;
693
694                 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
695                         netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
696                         return -EINVAL;
697                 }
698
699                 prio = BIT(match.key->vlan_priority);
700                 stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
701
702                 entry->in_use = true;
703                 entry->cookie = cls->cookie;
704                 entry->tc = tc;
705                 entry->type = STMMAC_RFS_T_VLAN;
706                 priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
707         }
708
709         return 0;
710 }
711
712 static int tc_del_vlan_flow(struct stmmac_priv *priv,
713                             struct flow_cls_offload *cls)
714 {
715         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
716
717         if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
718                 return -ENOENT;
719
720         stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
721
722         entry->in_use = false;
723         entry->cookie = 0;
724         entry->tc = 0;
725         entry->type = 0;
726
727         priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
728
729         return 0;
730 }
731
732 static int tc_add_ethtype_flow(struct stmmac_priv *priv,
733                                struct flow_cls_offload *cls)
734 {
735         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
736         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
737         struct flow_dissector *dissector = rule->match.dissector;
738         int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
739         struct flow_match_basic match;
740
741         if (!entry) {
742                 entry = tc_find_rfs(priv, cls, true);
743                 if (!entry)
744                         return -ENOENT;
745         }
746
747         /* Nothing to do here */
748         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
749                 return -EINVAL;
750
751         if (tc < 0) {
752                 netdev_err(priv->dev, "Invalid traffic class\n");
753                 return -EINVAL;
754         }
755
756         flow_rule_match_basic(rule, &match);
757
758         if (match.mask->n_proto) {
759                 u16 etype = ntohs(match.key->n_proto);
760
761                 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
762                         netdev_err(priv->dev, "Only full mask is supported for EthType filter");
763                         return -EINVAL;
764                 }
765                 switch (etype) {
766                 case ETH_P_LLDP:
767                         if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
768                             priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
769                                 return -ENOENT;
770
771                         entry->type = STMMAC_RFS_T_LLDP;
772                         priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
773
774                         stmmac_rx_queue_routing(priv, priv->hw,
775                                                 PACKET_DCBCPQ, tc);
776                         break;
777                 case ETH_P_1588:
778                         if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
779                             priv->rfs_entries_max[STMMAC_RFS_T_1588])
780                                 return -ENOENT;
781
782                         entry->type = STMMAC_RFS_T_1588;
783                         priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
784
785                         stmmac_rx_queue_routing(priv, priv->hw,
786                                                 PACKET_PTPQ, tc);
787                         break;
788                 default:
789                         netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
790                         return -EINVAL;
791                 }
792
793                 entry->in_use = true;
794                 entry->cookie = cls->cookie;
795                 entry->tc = tc;
796                 entry->etype = etype;
797
798                 return 0;
799         }
800
801         return -EINVAL;
802 }
803
804 static int tc_del_ethtype_flow(struct stmmac_priv *priv,
805                                struct flow_cls_offload *cls)
806 {
807         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
808
809         if (!entry || !entry->in_use ||
810             entry->type < STMMAC_RFS_T_LLDP ||
811             entry->type > STMMAC_RFS_T_1588)
812                 return -ENOENT;
813
814         switch (entry->etype) {
815         case ETH_P_LLDP:
816                 stmmac_rx_queue_routing(priv, priv->hw,
817                                         PACKET_DCBCPQ, 0);
818                 priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
819                 break;
820         case ETH_P_1588:
821                 stmmac_rx_queue_routing(priv, priv->hw,
822                                         PACKET_PTPQ, 0);
823                 priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
824                 break;
825         default:
826                 netdev_err(priv->dev, "EthType(0x%x) is not supported",
827                            entry->etype);
828                 return -EINVAL;
829         }
830
831         entry->in_use = false;
832         entry->cookie = 0;
833         entry->tc = 0;
834         entry->etype = 0;
835         entry->type = 0;
836
837         return 0;
838 }
839
840 static int tc_add_flow_cls(struct stmmac_priv *priv,
841                            struct flow_cls_offload *cls)
842 {
843         int ret;
844
845         ret = tc_add_flow(priv, cls);
846         if (!ret)
847                 return ret;
848
849         ret = tc_add_ethtype_flow(priv, cls);
850         if (!ret)
851                 return ret;
852
853         return tc_add_vlan_flow(priv, cls);
854 }
855
856 static int tc_del_flow_cls(struct stmmac_priv *priv,
857                            struct flow_cls_offload *cls)
858 {
859         int ret;
860
861         ret = tc_del_flow(priv, cls);
862         if (!ret)
863                 return ret;
864
865         ret = tc_del_ethtype_flow(priv, cls);
866         if (!ret)
867                 return ret;
868
869         return tc_del_vlan_flow(priv, cls);
870 }
871
872 static int tc_setup_cls(struct stmmac_priv *priv,
873                         struct flow_cls_offload *cls)
874 {
875         int ret = 0;
876
877         /* When RSS is enabled, the filtering will be bypassed */
878         if (priv->rss.enable)
879                 return -EBUSY;
880
881         switch (cls->command) {
882         case FLOW_CLS_REPLACE:
883                 ret = tc_add_flow_cls(priv, cls);
884                 break;
885         case FLOW_CLS_DESTROY:
886                 ret = tc_del_flow_cls(priv, cls);
887                 break;
888         default:
889                 return -EOPNOTSUPP;
890         }
891
892         return ret;
893 }
894
895 struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
896                                            ktime_t current_time,
897                                            u64 cycle_time)
898 {
899         struct timespec64 time;
900
901         if (ktime_after(old_base_time, current_time)) {
902                 time = ktime_to_timespec64(old_base_time);
903         } else {
904                 s64 n;
905                 ktime_t base_time;
906
907                 n = div64_s64(ktime_sub_ns(current_time, old_base_time),
908                               cycle_time);
909                 base_time = ktime_add_ns(old_base_time,
910                                          (n + 1) * cycle_time);
911
912                 time = ktime_to_timespec64(base_time);
913         }
914
915         return time;
916 }
917
918 static int tc_setup_taprio(struct stmmac_priv *priv,
919                            struct tc_taprio_qopt_offload *qopt)
920 {
921         u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
922         struct plat_stmmacenet_data *plat = priv->plat;
923         struct timespec64 time, current_time, qopt_time;
924         ktime_t current_time_ns;
925         bool fpe = false;
926         int i, ret = 0;
927         u64 ctr;
928
929         if (qopt->base_time < 0)
930                 return -ERANGE;
931
932         if (!priv->dma_cap.estsel)
933                 return -EOPNOTSUPP;
934
935         switch (wid) {
936         case 0x1:
937                 wid = 16;
938                 break;
939         case 0x2:
940                 wid = 20;
941                 break;
942         case 0x3:
943                 wid = 24;
944                 break;
945         default:
946                 return -EOPNOTSUPP;
947         }
948
949         switch (dep) {
950         case 0x1:
951                 dep = 64;
952                 break;
953         case 0x2:
954                 dep = 128;
955                 break;
956         case 0x3:
957                 dep = 256;
958                 break;
959         case 0x4:
960                 dep = 512;
961                 break;
962         case 0x5:
963                 dep = 1024;
964                 break;
965         default:
966                 return -EOPNOTSUPP;
967         }
968
969         if (qopt->cmd == TAPRIO_CMD_DESTROY)
970                 goto disable;
971         else if (qopt->cmd != TAPRIO_CMD_REPLACE)
972                 return -EOPNOTSUPP;
973
974         if (qopt->num_entries >= dep)
975                 return -EINVAL;
976         if (!qopt->cycle_time)
977                 return -ERANGE;
978
979         if (!plat->est) {
980                 plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
981                                          GFP_KERNEL);
982                 if (!plat->est)
983                         return -ENOMEM;
984
985                 mutex_init(&priv->plat->est->lock);
986         } else {
987                 memset(plat->est, 0, sizeof(*plat->est));
988         }
989
990         size = qopt->num_entries;
991
992         mutex_lock(&priv->plat->est->lock);
993         priv->plat->est->gcl_size = size;
994         priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
995         mutex_unlock(&priv->plat->est->lock);
996
997         for (i = 0; i < size; i++) {
998                 s64 delta_ns = qopt->entries[i].interval;
999                 u32 gates = qopt->entries[i].gate_mask;
1000
1001                 if (delta_ns > GENMASK(wid, 0))
1002                         return -ERANGE;
1003                 if (gates > GENMASK(31 - wid, 0))
1004                         return -ERANGE;
1005
1006                 switch (qopt->entries[i].command) {
1007                 case TC_TAPRIO_CMD_SET_GATES:
1008                         if (fpe)
1009                                 return -EINVAL;
1010                         break;
1011                 case TC_TAPRIO_CMD_SET_AND_HOLD:
1012                         gates |= BIT(0);
1013                         fpe = true;
1014                         break;
1015                 case TC_TAPRIO_CMD_SET_AND_RELEASE:
1016                         gates &= ~BIT(0);
1017                         fpe = true;
1018                         break;
1019                 default:
1020                         return -EOPNOTSUPP;
1021                 }
1022
1023                 priv->plat->est->gcl[i] = delta_ns | (gates << wid);
1024         }
1025
1026         mutex_lock(&priv->plat->est->lock);
1027         /* Adjust for real system time */
1028         priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
1029         current_time_ns = timespec64_to_ktime(current_time);
1030         time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
1031                                         qopt->cycle_time);
1032
1033         priv->plat->est->btr[0] = (u32)time.tv_nsec;
1034         priv->plat->est->btr[1] = (u32)time.tv_sec;
1035
1036         qopt_time = ktime_to_timespec64(qopt->base_time);
1037         priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
1038         priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
1039
1040         ctr = qopt->cycle_time;
1041         priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
1042         priv->plat->est->ctr[1] = (u32)ctr;
1043
1044         if (fpe && !priv->dma_cap.fpesel) {
1045                 mutex_unlock(&priv->plat->est->lock);
1046                 return -EOPNOTSUPP;
1047         }
1048
1049         /* Actual FPE register configuration will be done after FPE handshake
1050          * is success.
1051          */
1052         priv->plat->fpe_cfg->enable = fpe;
1053
1054         ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
1055                                    priv->plat->clk_ptp_rate);
1056         mutex_unlock(&priv->plat->est->lock);
1057         if (ret) {
1058                 netdev_err(priv->dev, "failed to configure EST\n");
1059                 goto disable;
1060         }
1061
1062         netdev_info(priv->dev, "configured EST\n");
1063
1064         if (fpe) {
1065                 stmmac_fpe_handshake(priv, true);
1066                 netdev_info(priv->dev, "start FPE handshake\n");
1067         }
1068
1069         return 0;
1070
1071 disable:
1072         if (priv->plat->est) {
1073                 mutex_lock(&priv->plat->est->lock);
1074                 priv->plat->est->enable = false;
1075                 stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
1076                                      priv->plat->clk_ptp_rate);
1077                 mutex_unlock(&priv->plat->est->lock);
1078         }
1079
1080         priv->plat->fpe_cfg->enable = false;
1081         stmmac_fpe_configure(priv, priv->ioaddr,
1082                              priv->plat->fpe_cfg,
1083                              priv->plat->tx_queues_to_use,
1084                              priv->plat->rx_queues_to_use,
1085                              false);
1086         netdev_info(priv->dev, "disabled FPE\n");
1087
1088         stmmac_fpe_handshake(priv, false);
1089         netdev_info(priv->dev, "stop FPE handshake\n");
1090
1091         return ret;
1092 }
1093
1094 static int tc_setup_etf(struct stmmac_priv *priv,
1095                         struct tc_etf_qopt_offload *qopt)
1096 {
1097         if (!priv->dma_cap.tbssel)
1098                 return -EOPNOTSUPP;
1099         if (qopt->queue >= priv->plat->tx_queues_to_use)
1100                 return -EINVAL;
1101         if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
1102                 return -EINVAL;
1103
1104         if (qopt->enable)
1105                 priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
1106         else
1107                 priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
1108
1109         netdev_info(priv->dev, "%s ETF for Queue %d\n",
1110                     qopt->enable ? "enabled" : "disabled", qopt->queue);
1111         return 0;
1112 }
1113
1114 static int tc_query_caps(struct stmmac_priv *priv,
1115                          struct tc_query_caps_base *base)
1116 {
1117         switch (base->type) {
1118         case TC_SETUP_QDISC_TAPRIO: {
1119                 struct tc_taprio_caps *caps = base->caps;
1120
1121                 if (!priv->dma_cap.estsel)
1122                         return -EOPNOTSUPP;
1123
1124                 caps->gate_mask_per_txq = true;
1125
1126                 return 0;
1127         }
1128         default:
1129                 return -EOPNOTSUPP;
1130         }
1131 }
1132
1133 const struct stmmac_tc_ops dwmac510_tc_ops = {
1134         .init = tc_init,
1135         .setup_cls_u32 = tc_setup_cls_u32,
1136         .setup_cbs = tc_setup_cbs,
1137         .setup_cls = tc_setup_cls,
1138         .setup_taprio = tc_setup_taprio,
1139         .setup_etf = tc_setup_etf,
1140         .query_caps = tc_query_caps,
1141 };