1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac TC Handling (HW only)
7 #include <net/pkt_cls.h>
8 #include <net/tc_act/tc_gact.h>
14 static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
16 memset(entry, 0, sizeof(*entry));
18 entry->is_last = true;
19 entry->is_frag = false;
22 entry->val.match_data = 0x0;
23 entry->val.match_en = 0x0;
25 entry->val.dma_ch_no = 0x0;
28 static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29 struct tc_cls_u32_offload *cls,
32 struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33 u32 loc = cls->knode.handle;
36 for (i = 0; i < priv->tc_entries_max; i++) {
37 entry = &priv->tc_entries[i];
38 if (!entry->in_use && !first && free)
40 if ((entry->handle == loc) && !free && !entry->is_frag)
51 memset(&first->val, 0, sizeof(first->val));
57 static int tc_fill_actions(struct stmmac_tc_entry *entry,
58 struct stmmac_tc_entry *frag,
59 struct tc_cls_u32_offload *cls)
61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act;
63 struct tcf_exts *exts;
66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts))
72 tcf_exts_for_each_action(i, act, exts) {
74 if (is_tcf_gact_ok(act)) {
75 action_entry->val.af = 1;
79 if (is_tcf_gact_shot(act)) {
80 action_entry->val.rf = 1;
91 static int tc_fill_entry(struct stmmac_priv *priv,
92 struct tc_cls_u32_offload *cls)
94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio << 16;
100 /* Only 1 match per entry */
101 if (sel->nkeys <= 0 || sel->nkeys > 1)
104 off = sel->keys[0].off << sel->offshift;
105 data = sel->keys[0].val;
106 mask = sel->keys[0].mask;
108 switch (ntohs(cls->common.protocol)) {
118 if (off > priv->tc_off_max)
124 entry = tc_find_entry(priv, cls, true);
129 frag = tc_find_entry(priv, cls, true);
135 entry->frag_ptr = frag;
136 entry->val.match_en = (mask << (rem * 8)) &
137 GENMASK(31, rem * 8);
138 entry->val.match_data = (data << (rem * 8)) &
139 GENMASK(31, rem * 8);
140 entry->val.frame_offset = real_off;
143 frag->val.match_en = (mask >> (rem * 8)) &
144 GENMASK(rem * 8 - 1, 0);
145 frag->val.match_data = (data >> (rem * 8)) &
146 GENMASK(rem * 8 - 1, 0);
147 frag->val.frame_offset = real_off + 1;
149 frag->is_frag = true;
151 entry->frag_ptr = NULL;
152 entry->val.match_en = mask;
153 entry->val.match_data = data;
154 entry->val.frame_offset = real_off;
158 ret = tc_fill_actions(entry, frag, cls);
166 frag->in_use = false;
167 entry->in_use = false;
171 static void tc_unfill_entry(struct stmmac_priv *priv,
172 struct tc_cls_u32_offload *cls)
174 struct stmmac_tc_entry *entry;
176 entry = tc_find_entry(priv, cls, false);
180 entry->in_use = false;
181 if (entry->frag_ptr) {
182 entry = entry->frag_ptr;
183 entry->is_frag = false;
184 entry->in_use = false;
188 static int tc_config_knode(struct stmmac_priv *priv,
189 struct tc_cls_u32_offload *cls)
193 ret = tc_fill_entry(priv, cls);
197 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198 priv->tc_entries_max);
205 tc_unfill_entry(priv, cls);
209 static int tc_delete_knode(struct stmmac_priv *priv,
210 struct tc_cls_u32_offload *cls)
212 /* Set entry and fragments as not used */
213 tc_unfill_entry(priv, cls);
215 return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
216 priv->tc_entries_max);
219 static int tc_setup_cls_u32(struct stmmac_priv *priv,
220 struct tc_cls_u32_offload *cls)
222 switch (cls->command) {
223 case TC_CLSU32_REPLACE_KNODE:
224 tc_unfill_entry(priv, cls);
226 case TC_CLSU32_NEW_KNODE:
227 return tc_config_knode(priv, cls);
228 case TC_CLSU32_DELETE_KNODE:
229 return tc_delete_knode(priv, cls);
235 static int tc_init(struct stmmac_priv *priv)
237 struct dma_features *dma_cap = &priv->dma_cap;
241 if (dma_cap->l3l4fnum) {
242 priv->flow_entries_max = dma_cap->l3l4fnum;
243 priv->flow_entries = devm_kcalloc(priv->device,
245 sizeof(*priv->flow_entries),
247 if (!priv->flow_entries)
250 for (i = 0; i < priv->flow_entries_max; i++)
251 priv->flow_entries[i].idx = i;
253 dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
254 priv->flow_entries_max);
257 /* Fail silently as we can still use remaining features, e.g. CBS */
258 if (!dma_cap->frpsel)
261 switch (dma_cap->frpbs) {
263 priv->tc_off_max = 64;
266 priv->tc_off_max = 128;
269 priv->tc_off_max = 256;
275 switch (dma_cap->frpes) {
289 /* Reserve one last filter which lets all pass */
290 priv->tc_entries_max = count;
291 priv->tc_entries = devm_kcalloc(priv->device,
292 count, sizeof(*priv->tc_entries), GFP_KERNEL);
293 if (!priv->tc_entries)
296 tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
298 dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
299 priv->tc_entries_max, priv->tc_off_max);
303 static int tc_setup_cbs(struct stmmac_priv *priv,
304 struct tc_cbs_qopt_offload *qopt)
306 u32 tx_queues_count = priv->plat->tx_queues_to_use;
307 u32 queue = qopt->queue;
313 /* Queue 0 is not AVB capable */
314 if (queue <= 0 || queue >= tx_queues_count)
316 if (!priv->dma_cap.av)
319 /* Port Transmit Rate and Speed Divider */
320 switch (priv->speed) {
323 speed_div = 10000000;
345 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
346 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
347 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
351 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
352 } else if (!qopt->enable) {
353 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
358 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
361 /* Final adjustments for HW */
362 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
363 priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
365 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
366 priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
368 value = qopt->hicredit * 1024ll * 8;
369 priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
371 value = qopt->locredit * 1024ll * 8;
372 priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
374 ret = stmmac_config_cbs(priv, priv->hw,
375 priv->plat->tx_queues_cfg[queue].send_slope,
376 priv->plat->tx_queues_cfg[queue].idle_slope,
377 priv->plat->tx_queues_cfg[queue].high_credit,
378 priv->plat->tx_queues_cfg[queue].low_credit,
383 dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
384 queue, qopt->sendslope, qopt->idleslope,
385 qopt->hicredit, qopt->locredit);
389 static int tc_parse_flow_actions(struct stmmac_priv *priv,
390 struct flow_action *action,
391 struct stmmac_flow_entry *entry,
392 struct netlink_ext_ack *extack)
394 struct flow_action_entry *act;
397 if (!flow_action_has_entries(action))
400 if (!flow_action_basic_hw_stats_check(action, extack))
403 flow_action_for_each(i, act, action) {
405 case FLOW_ACTION_DROP:
406 entry->action |= STMMAC_FLOW_ACTION_DROP;
413 /* Nothing to do, maybe inverse filter ? */
417 static int tc_add_basic_flow(struct stmmac_priv *priv,
418 struct flow_cls_offload *cls,
419 struct stmmac_flow_entry *entry)
421 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
422 struct flow_dissector *dissector = rule->match.dissector;
423 struct flow_match_basic match;
425 /* Nothing to do here */
426 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
429 flow_rule_match_basic(rule, &match);
430 entry->ip_proto = match.key->ip_proto;
434 static int tc_add_ip4_flow(struct stmmac_priv *priv,
435 struct flow_cls_offload *cls,
436 struct stmmac_flow_entry *entry)
438 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
439 struct flow_dissector *dissector = rule->match.dissector;
440 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
441 struct flow_match_ipv4_addrs match;
445 /* Nothing to do here */
446 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
449 flow_rule_match_ipv4_addrs(rule, &match);
450 hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
452 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
453 false, true, inv, hw_match);
458 hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
460 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
461 false, false, inv, hw_match);
469 static int tc_add_ports_flow(struct stmmac_priv *priv,
470 struct flow_cls_offload *cls,
471 struct stmmac_flow_entry *entry)
473 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
474 struct flow_dissector *dissector = rule->match.dissector;
475 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
476 struct flow_match_ports match;
481 /* Nothing to do here */
482 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
485 switch (entry->ip_proto) {
496 flow_rule_match_ports(rule, &match);
498 hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
500 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
501 is_udp, true, inv, hw_match);
506 hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
508 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
509 is_udp, false, inv, hw_match);
518 static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
519 struct flow_cls_offload *cls,
524 for (i = 0; i < priv->flow_entries_max; i++) {
525 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
527 if (entry->cookie == cls->cookie)
529 if (get_free && (entry->in_use == false))
537 int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
538 struct stmmac_flow_entry *entry);
539 } tc_flow_parsers[] = {
540 { .fn = tc_add_basic_flow },
541 { .fn = tc_add_ip4_flow },
542 { .fn = tc_add_ports_flow },
545 static int tc_add_flow(struct stmmac_priv *priv,
546 struct flow_cls_offload *cls)
548 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
549 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
553 entry = tc_find_flow(priv, cls, true);
558 ret = tc_parse_flow_actions(priv, &rule->action, entry,
563 for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
564 ret = tc_flow_parsers[i].fn(priv, cls, entry);
566 entry->in_use = true;
574 entry->cookie = cls->cookie;
578 static int tc_del_flow(struct stmmac_priv *priv,
579 struct flow_cls_offload *cls)
581 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
584 if (!entry || !entry->in_use)
588 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
589 false, false, false, 0);
591 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
592 false, false, false, 0);
595 entry->in_use = false;
597 entry->is_l4 = false;
601 static int tc_setup_cls(struct stmmac_priv *priv,
602 struct flow_cls_offload *cls)
606 /* When RSS is enabled, the filtering will be bypassed */
607 if (priv->rss.enable)
610 switch (cls->command) {
611 case FLOW_CLS_REPLACE:
612 ret = tc_add_flow(priv, cls);
614 case FLOW_CLS_DESTROY:
615 ret = tc_del_flow(priv, cls);
624 static int tc_setup_taprio(struct stmmac_priv *priv,
625 struct tc_taprio_qopt_offload *qopt)
627 u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
628 struct plat_stmmacenet_data *plat = priv->plat;
629 struct timespec64 time, current_time;
630 ktime_t current_time_ns;
635 if (!priv->dma_cap.estsel)
674 if (qopt->num_entries >= dep)
676 if (!qopt->base_time)
678 if (!qopt->cycle_time)
682 plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
687 memset(plat->est, 0, sizeof(*plat->est));
690 size = qopt->num_entries;
692 priv->plat->est->gcl_size = size;
693 priv->plat->est->enable = qopt->enable;
695 for (i = 0; i < size; i++) {
696 s64 delta_ns = qopt->entries[i].interval;
697 u32 gates = qopt->entries[i].gate_mask;
699 if (delta_ns > GENMASK(wid, 0))
701 if (gates > GENMASK(31 - wid, 0))
704 switch (qopt->entries[i].command) {
705 case TC_TAPRIO_CMD_SET_GATES:
709 case TC_TAPRIO_CMD_SET_AND_HOLD:
713 case TC_TAPRIO_CMD_SET_AND_RELEASE:
721 priv->plat->est->gcl[i] = delta_ns | (gates << wid);
724 /* Adjust for real system time */
725 priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
726 current_time_ns = timespec64_to_ktime(current_time);
727 if (ktime_after(qopt->base_time, current_time_ns)) {
728 time = ktime_to_timespec64(qopt->base_time);
733 n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
735 base_time = ktime_add_ns(qopt->base_time,
736 (n + 1) * qopt->cycle_time);
738 time = ktime_to_timespec64(base_time);
741 priv->plat->est->btr[0] = (u32)time.tv_nsec;
742 priv->plat->est->btr[1] = (u32)time.tv_sec;
744 ctr = qopt->cycle_time;
745 priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
746 priv->plat->est->ctr[1] = (u32)ctr;
748 if (fpe && !priv->dma_cap.fpesel)
751 ret = stmmac_fpe_configure(priv, priv->ioaddr,
752 priv->plat->tx_queues_to_use,
753 priv->plat->rx_queues_to_use, fpe);
755 netdev_err(priv->dev, "failed to enable Frame Preemption\n");
759 ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
760 priv->plat->clk_ptp_rate);
762 netdev_err(priv->dev, "failed to configure EST\n");
766 netdev_info(priv->dev, "configured EST\n");
770 priv->plat->est->enable = false;
771 stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
772 priv->plat->clk_ptp_rate);
776 static int tc_setup_etf(struct stmmac_priv *priv,
777 struct tc_etf_qopt_offload *qopt)
779 if (!priv->dma_cap.tbssel)
781 if (qopt->queue >= priv->plat->tx_queues_to_use)
783 if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
787 priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
789 priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
791 netdev_info(priv->dev, "%s ETF for Queue %d\n",
792 qopt->enable ? "enabled" : "disabled", qopt->queue);
796 const struct stmmac_tc_ops dwmac510_tc_ops = {
798 .setup_cls_u32 = tc_setup_cls_u32,
799 .setup_cbs = tc_setup_cbs,
800 .setup_cls = tc_setup_cls,
801 .setup_taprio = tc_setup_taprio,
802 .setup_etf = tc_setup_etf,