1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/of_platform.h>
12 #include <linux/of_mdio.h>
13 #include <linux/of_net.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_vlan.h>
17 #include <linux/icmp.h>
19 #include <linux/ipv6.h>
20 #include <linux/udp.h>
21 #include <linux/tcp.h>
22 #include <linux/net.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/highmem.h>
27 #include <linux/percpu.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/sort.h>
30 #include <linux/phy_fixed.h>
31 #include <linux/bpf.h>
32 #include <linux/bpf_trace.h>
33 #include <soc/fsl/bman.h>
34 #include <soc/fsl/qman.h>
36 #include "fman_port.h"
40 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
41 * using trace events only need to #include <trace/events/sched.h>
43 #define CREATE_TRACE_POINTS
44 #include "dpaa_eth_trace.h"
46 static int debug = -1;
47 module_param(debug, int, 0444);
48 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
50 static u16 tx_timeout = 1000;
51 module_param(tx_timeout, ushort, 0444);
52 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
54 #define FM_FD_STAT_RX_ERRORS \
55 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
56 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
57 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
58 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
59 FM_FD_ERR_PRS_HDR_ERR)
61 #define FM_FD_STAT_TX_ERRORS \
62 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
63 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
65 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
66 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
67 NETIF_MSG_IFDOWN | NETIF_MSG_HW)
69 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
70 /* Ingress congestion threshold on FMan ports
71 * The size in bytes of the ingress tail-drop threshold on FMan ports.
72 * Traffic piling up above this value will be rejected by QMan and discarded
76 /* Size in bytes of the FQ taildrop threshold */
77 #define DPAA_FQ_TD 0x200000
79 #define DPAA_CS_THRESHOLD_1G 0x06000000
80 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
81 * The size in bytes of the egress Congestion State notification threshold on
82 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
83 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
84 * and the larger the frame size, the more acute the problem.
85 * So we have to find a balance between these factors:
86 * - avoiding the device staying congested for a prolonged time (risking
87 * the netdev watchdog to fire - see also the tx_timeout module param);
88 * - affecting performance of protocols such as TCP, which otherwise
89 * behave well under the congestion notification mechanism;
90 * - preventing the Tx cores from tightly-looping (as if the congestion
91 * threshold was too low to be effective);
92 * - running out of memory if the CS threshold is set too high.
95 #define DPAA_CS_THRESHOLD_10G 0x10000000
96 /* The size in bytes of the egress Congestion State notification threshold on
97 * 10G ports, range 0x1000 .. 0x10000000
100 /* Largest value that the FQD's OAL field can hold */
101 #define FSL_QMAN_MAX_OAL 127
103 /* Default alignment for start of data in an Rx FD */
104 #ifdef CONFIG_DPAA_ERRATUM_A050385
105 /* aligning data start to 64 avoids DMA transaction splits, unless the buffer
106 * is crossing a 4k page boundary
108 #define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
109 /* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
110 * crossings; also, all SG fragments except the last must have a size multiple
111 * of 256 to avoid DMA transaction splits
113 #define DPAA_A050385_ALIGN 256
114 #define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
115 DPAA_A050385_ALIGN : 16)
117 #define DPAA_FD_DATA_ALIGNMENT 16
118 #define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
121 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
122 #define DPAA_SGT_SIZE 256
124 /* Values for the L3R field of the FM Parse Results
126 /* L3 Type field: First IP Present IPv4 */
127 #define FM_L3_PARSE_RESULT_IPV4 0x8000
128 /* L3 Type field: First IP Present IPv6 */
129 #define FM_L3_PARSE_RESULT_IPV6 0x4000
130 /* Values for the L4R field of the FM Parse Results */
131 /* L4 Type field: UDP */
132 #define FM_L4_PARSE_RESULT_UDP 0x40
133 /* L4 Type field: TCP */
134 #define FM_L4_PARSE_RESULT_TCP 0x20
136 /* FD status field indicating whether the FM Parser has attempted to validate
137 * the L4 csum of the frame.
138 * Note that having this bit set doesn't necessarily imply that the checksum
139 * is valid. One would have to check the parse results to find that out.
141 #define FM_FD_STAT_L4CV 0x00000004
143 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
144 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
146 #define FSL_DPAA_BPID_INV 0xff
147 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128
148 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80
150 #define DPAA_TX_PRIV_DATA_SIZE 16
151 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
152 #define DPAA_TIME_STAMP_SIZE 8
153 #define DPAA_HASH_RESULTS_SIZE 8
154 #define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
155 + DPAA_HASH_RESULTS_SIZE)
156 #define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
157 XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
158 #ifdef CONFIG_DPAA_ERRATUM_A050385
159 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
160 #define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
161 DPAA_RX_PRIV_DATA_A050385_SIZE : \
162 DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
164 #define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
167 #define DPAA_ETH_PCD_RXQ_NUM 128
169 #define DPAA_ENQUEUE_RETRIES 100000
171 enum port_type {RX, TX};
174 struct dpaa_fq *tx_defq;
175 struct dpaa_fq *tx_errq;
176 struct dpaa_fq *rx_defq;
177 struct dpaa_fq *rx_errq;
178 struct dpaa_fq *rx_pcdq;
181 /* All the dpa bps in use at any moment */
182 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
184 #define DPAA_BP_RAW_SIZE 4096
186 #ifdef CONFIG_DPAA_ERRATUM_A050385
187 #define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
188 ~(DPAA_A050385_ALIGN - 1))
190 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
193 static int dpaa_max_frm;
195 static int dpaa_rx_extra_headroom;
197 #define dpaa_get_max_mtu() \
198 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
200 static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
202 static int dpaa_netdev_init(struct net_device *net_dev,
203 const struct net_device_ops *dpaa_ops,
206 struct dpaa_priv *priv = netdev_priv(net_dev);
207 struct device *dev = net_dev->dev.parent;
208 struct mac_device *mac_dev = priv->mac_dev;
209 struct dpaa_percpu_priv *percpu_priv;
213 /* Although we access another CPU's private data here
214 * we do it at initialization so it is safe
216 for_each_possible_cpu(i) {
217 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
218 percpu_priv->net_dev = net_dev;
221 net_dev->netdev_ops = dpaa_ops;
222 mac_addr = mac_dev->addr;
224 net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
225 net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
227 net_dev->min_mtu = ETH_MIN_MTU;
228 net_dev->max_mtu = dpaa_get_max_mtu();
230 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
231 NETIF_F_LLTX | NETIF_F_RXHASH);
233 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
234 /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
235 * For conformity, we'll still declare GSO explicitly.
237 net_dev->features |= NETIF_F_GSO;
238 net_dev->features |= NETIF_F_RXCSUM;
240 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
241 /* we do not want shared skbs on TX */
242 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
244 net_dev->features |= net_dev->hw_features;
245 net_dev->vlan_features = net_dev->features;
247 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
248 NETDEV_XDP_ACT_REDIRECT |
249 NETDEV_XDP_ACT_NDO_XMIT;
251 if (is_valid_ether_addr(mac_addr)) {
252 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
253 eth_hw_addr_set(net_dev, mac_addr);
255 eth_hw_addr_random(net_dev);
256 err = mac_dev->change_addr(mac_dev->fman_mac,
257 (const enet_addr_t *)net_dev->dev_addr);
259 dev_err(dev, "Failed to set random MAC address\n");
262 dev_info(dev, "Using random MAC address: %pM\n",
266 net_dev->ethtool_ops = &dpaa_ethtool_ops;
268 net_dev->needed_headroom = priv->tx_headroom;
269 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
271 /* The rest of the config is filled in by the mac device already */
272 mac_dev->phylink_config.dev = &net_dev->dev;
273 mac_dev->phylink_config.type = PHYLINK_NETDEV;
274 mac_dev->update_speed = dpaa_eth_cgr_set_speed;
275 mac_dev->phylink = phylink_create(&mac_dev->phylink_config,
276 dev_fwnode(mac_dev->dev),
278 mac_dev->phylink_ops);
279 if (IS_ERR(mac_dev->phylink)) {
280 err = PTR_ERR(mac_dev->phylink);
281 dev_err_probe(dev, err, "Could not create phylink\n");
285 /* start without the RUNNING flag, phylib controls it later */
286 netif_carrier_off(net_dev);
288 err = register_netdev(net_dev);
290 dev_err(dev, "register_netdev() = %d\n", err);
291 phylink_destroy(mac_dev->phylink);
298 static int dpaa_stop(struct net_device *net_dev)
300 struct mac_device *mac_dev;
301 struct dpaa_priv *priv;
304 priv = netdev_priv(net_dev);
305 mac_dev = priv->mac_dev;
307 netif_tx_stop_all_queues(net_dev);
308 /* Allow the Fman (Tx) port to process in-flight frames before we
309 * try switching it off.
313 phylink_stop(mac_dev->phylink);
314 mac_dev->disable(mac_dev->fman_mac);
316 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
317 error = fman_port_disable(mac_dev->port[i]);
322 phylink_disconnect_phy(mac_dev->phylink);
323 net_dev->phydev = NULL;
330 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
332 struct dpaa_percpu_priv *percpu_priv;
333 const struct dpaa_priv *priv;
335 priv = netdev_priv(net_dev);
336 percpu_priv = this_cpu_ptr(priv->percpu_priv);
338 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
339 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
341 percpu_priv->stats.tx_errors++;
344 /* Calculates the statistics for the given device by adding the statistics
345 * collected by each CPU.
347 static void dpaa_get_stats64(struct net_device *net_dev,
348 struct rtnl_link_stats64 *s)
350 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
351 struct dpaa_priv *priv = netdev_priv(net_dev);
352 struct dpaa_percpu_priv *percpu_priv;
353 u64 *netstats = (u64 *)s;
357 for_each_possible_cpu(i) {
358 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
360 cpustats = (u64 *)&percpu_priv->stats;
362 /* add stats from all CPUs */
363 for (j = 0; j < numstats; j++)
364 netstats[j] += cpustats[j];
368 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
371 struct dpaa_priv *priv = netdev_priv(net_dev);
372 struct tc_mqprio_qopt *mqprio = type_data;
376 if (type != TC_SETUP_QDISC_MQPRIO)
379 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
380 num_tc = mqprio->num_tc;
382 if (num_tc == priv->num_tc)
386 netdev_reset_tc(net_dev);
390 if (num_tc > DPAA_TC_NUM) {
391 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
396 netdev_set_num_tc(net_dev, num_tc);
398 for (i = 0; i < num_tc; i++)
399 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
400 i * DPAA_TC_TXQ_NUM);
403 priv->num_tc = num_tc ? : 1;
404 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
408 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
410 struct dpaa_eth_data *eth_data;
411 struct device *dpaa_dev;
412 struct mac_device *mac_dev;
414 dpaa_dev = &pdev->dev;
415 eth_data = dpaa_dev->platform_data;
417 dev_err(dpaa_dev, "eth_data missing\n");
418 return ERR_PTR(-ENODEV);
420 mac_dev = eth_data->mac_dev;
422 dev_err(dpaa_dev, "mac_dev missing\n");
423 return ERR_PTR(-EINVAL);
429 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
431 const struct dpaa_priv *priv;
432 struct mac_device *mac_dev;
433 struct sockaddr old_addr;
436 priv = netdev_priv(net_dev);
438 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
440 err = eth_mac_addr(net_dev, addr);
442 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
446 mac_dev = priv->mac_dev;
448 err = mac_dev->change_addr(mac_dev->fman_mac,
449 (const enet_addr_t *)net_dev->dev_addr);
451 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
453 /* reverting to previous address */
454 eth_mac_addr(net_dev, &old_addr);
462 static void dpaa_set_rx_mode(struct net_device *net_dev)
464 const struct dpaa_priv *priv;
467 priv = netdev_priv(net_dev);
469 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
470 priv->mac_dev->promisc = !priv->mac_dev->promisc;
471 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
472 priv->mac_dev->promisc);
474 netif_err(priv, drv, net_dev,
475 "mac_dev->set_promisc() = %d\n",
479 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
480 priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
481 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
482 priv->mac_dev->allmulti);
484 netif_err(priv, drv, net_dev,
485 "mac_dev->set_allmulti() = %d\n",
489 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
491 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
495 static struct dpaa_bp *dpaa_bpid2pool(int bpid)
497 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
500 return dpaa_bp_array[bpid];
503 /* checks if this bpool is already allocated */
504 static bool dpaa_bpid2pool_use(int bpid)
506 if (dpaa_bpid2pool(bpid)) {
507 refcount_inc(&dpaa_bp_array[bpid]->refs);
514 /* called only once per bpid by dpaa_bp_alloc_pool() */
515 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
517 dpaa_bp_array[bpid] = dpaa_bp;
518 refcount_set(&dpaa_bp->refs, 1);
521 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
525 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
526 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
531 /* If the pool is already specified, we only create one per bpid */
532 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
533 dpaa_bpid2pool_use(dpaa_bp->bpid))
536 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
537 dpaa_bp->pool = bman_new_pool();
538 if (!dpaa_bp->pool) {
539 pr_err("%s: bman_new_pool() failed\n",
544 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
547 if (dpaa_bp->seed_cb) {
548 err = dpaa_bp->seed_cb(dpaa_bp);
550 goto pool_seed_failed;
553 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
558 pr_err("%s: pool seeding failed\n", __func__);
559 bman_free_pool(dpaa_bp->pool);
564 /* remove and free all the buffers from the given buffer pool */
565 static void dpaa_bp_drain(struct dpaa_bp *bp)
571 struct bm_buffer bmb[8];
574 ret = bman_acquire(bp->pool, bmb, num);
577 /* we have less than 8 buffers left;
578 * drain them one by one
584 /* Pool is fully drained */
590 for (i = 0; i < num; i++)
591 bp->free_buf_cb(bp, &bmb[i]);
595 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
597 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
599 /* the mapping between bpid and dpaa_bp is done very late in the
600 * allocation procedure; if something failed before the mapping, the bp
601 * was not configured, therefore we don't need the below instructions
606 if (!refcount_dec_and_test(&bp->refs))
612 dpaa_bp_array[bp->bpid] = NULL;
613 bman_free_pool(bp->pool);
616 static void dpaa_bps_free(struct dpaa_priv *priv)
618 dpaa_bp_free(priv->dpaa_bp);
621 /* Use multiple WQs for FQ assignment:
622 * - Tx Confirmation queues go to WQ1.
623 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
624 * to be scheduled, in case there are many more FQs in WQ6).
625 * - Rx Default goes to WQ6.
626 * - Tx queues go to different WQs depending on their priority. Equal
627 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
628 * WQ0 (highest priority).
629 * This ensures that Tx-confirmed buffers are timely released. In particular,
630 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
631 * are greatly outnumbered by other FQs in the system, while
632 * dequeue scheduling is round-robin.
634 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
636 switch (fq->fq_type) {
637 case FQ_TYPE_TX_CONFIRM:
638 case FQ_TYPE_TX_CONF_MQ:
641 case FQ_TYPE_RX_ERROR:
642 case FQ_TYPE_TX_ERROR:
645 case FQ_TYPE_RX_DEFAULT:
650 switch (idx / DPAA_TC_TXQ_NUM) {
652 /* Low priority (best effort) */
656 /* Medium priority */
664 /* Very high priority */
668 WARN(1, "Too many TX FQs: more than %d!\n",
673 WARN(1, "Invalid FQ type %d for FQID %d!\n",
674 fq->fq_type, fq->fqid);
678 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
679 u32 start, u32 count,
680 struct list_head *list,
681 enum dpaa_fq_type fq_type)
683 struct dpaa_fq *dpaa_fq;
686 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
691 for (i = 0; i < count; i++) {
692 dpaa_fq[i].fq_type = fq_type;
693 dpaa_fq[i].fqid = start ? start + i : 0;
694 list_add_tail(&dpaa_fq[i].list, list);
697 for (i = 0; i < count; i++)
698 dpaa_assign_wq(dpaa_fq + i, i);
703 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
704 struct fm_port_fqs *port_fqs)
706 struct dpaa_fq *dpaa_fq;
707 u32 fq_base, fq_base_aligned, i;
709 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
711 goto fq_alloc_failed;
713 port_fqs->rx_errq = &dpaa_fq[0];
715 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
717 goto fq_alloc_failed;
719 port_fqs->rx_defq = &dpaa_fq[0];
721 /* the PCD FQIDs range needs to be aligned for correct operation */
722 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
723 goto fq_alloc_failed;
725 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
727 for (i = fq_base; i < fq_base_aligned; i++)
728 qman_release_fqid(i);
730 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
731 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
732 qman_release_fqid(i);
734 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
735 list, FQ_TYPE_RX_PCD);
737 goto fq_alloc_failed;
739 port_fqs->rx_pcdq = &dpaa_fq[0];
741 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
742 goto fq_alloc_failed;
744 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
746 goto fq_alloc_failed;
748 port_fqs->tx_errq = &dpaa_fq[0];
750 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
752 goto fq_alloc_failed;
754 port_fqs->tx_defq = &dpaa_fq[0];
756 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
757 goto fq_alloc_failed;
762 dev_err(dev, "dpaa_fq_alloc() failed\n");
766 static u32 rx_pool_channel;
767 static DEFINE_SPINLOCK(rx_pool_channel_init);
769 static int dpaa_get_channel(void)
771 spin_lock(&rx_pool_channel_init);
772 if (!rx_pool_channel) {
776 ret = qman_alloc_pool(&pool);
779 rx_pool_channel = pool;
781 spin_unlock(&rx_pool_channel_init);
782 if (!rx_pool_channel)
784 return rx_pool_channel;
787 static void dpaa_release_channel(void)
789 qman_release_pool(rx_pool_channel);
792 static void dpaa_eth_add_channel(u16 channel, struct device *dev)
794 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
795 const cpumask_t *cpus = qman_affine_cpus();
796 struct qman_portal *portal;
799 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
800 portal = qman_get_affine_portal(cpu);
801 qman_p_static_dequeue_add(portal, pool);
802 qman_start_using_portal(portal, dev);
806 /* Congestion group state change notification callback.
807 * Stops the device's egress queues while they are congested and
808 * wakes them upon exiting congested state.
809 * Also updates some CGR-related stats.
811 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
814 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
815 struct dpaa_priv, cgr_data.cgr);
818 priv->cgr_data.congestion_start_jiffies = jiffies;
819 netif_tx_stop_all_queues(priv->net_dev);
820 priv->cgr_data.cgr_congested_count++;
822 priv->cgr_data.congested_jiffies +=
823 (jiffies - priv->cgr_data.congestion_start_jiffies);
824 netif_tx_wake_all_queues(priv->net_dev);
828 static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
830 struct qm_mcc_initcgr initcgr;
834 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
836 if (netif_msg_drv(priv))
837 pr_err("%s: Error %d allocating CGR ID\n",
841 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
843 /* Enable Congestion State Change Notifications and CS taildrop */
844 memset(&initcgr, 0, sizeof(initcgr));
845 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
846 initcgr.cgr.cscn_en = QM_CGR_EN;
848 /* Set different thresholds based on the configured MAC speed.
849 * This may turn suboptimal if the MAC is reconfigured at another
850 * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up
853 if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD)
854 cs_th = DPAA_CS_THRESHOLD_10G;
856 cs_th = DPAA_CS_THRESHOLD_1G;
857 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
859 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
860 initcgr.cgr.cstd_en = QM_CGR_EN;
862 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
865 if (netif_msg_drv(priv))
866 pr_err("%s: Error %d creating CGR with ID %d\n",
867 __func__, err, priv->cgr_data.cgr.cgrid);
868 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
871 if (netif_msg_drv(priv))
872 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
873 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
874 priv->cgr_data.cgr.chan);
880 static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
882 struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev);
883 struct dpaa_priv *priv = netdev_priv(net_dev);
884 struct qm_mcc_initcgr opts = { };
888 opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
891 cs_th = DPAA_CS_THRESHOLD_10G;
895 cs_th = DPAA_CS_THRESHOLD_1G;
898 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
900 err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
902 netdev_err(net_dev, "could not update speed: %d\n", err);
905 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
907 const struct qman_fq *template)
909 fq->fq_base = *template;
910 fq->net_dev = priv->net_dev;
912 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
913 fq->channel = priv->channel;
916 static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
918 struct fman_port *port,
919 const struct qman_fq *template)
921 fq->fq_base = *template;
922 fq->net_dev = priv->net_dev;
925 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
926 fq->channel = (u16)fman_port_get_qman_channel_id(port);
928 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
932 static void dpaa_fq_setup(struct dpaa_priv *priv,
933 const struct dpaa_fq_cbs *fq_cbs,
934 struct fman_port *tx_port)
936 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
937 const cpumask_t *affine_cpus = qman_affine_cpus();
938 u16 channels[NR_CPUS];
941 for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
942 channels[num_portals++] = qman_affine_channel(cpu);
944 if (num_portals == 0)
945 dev_err(priv->net_dev->dev.parent,
946 "No Qman software (affine) channels found\n");
948 /* Initialize each FQ in the list */
949 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
950 switch (fq->fq_type) {
951 case FQ_TYPE_RX_DEFAULT:
952 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
954 case FQ_TYPE_RX_ERROR:
955 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
960 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
961 fq->channel = channels[portal_cnt++ % num_portals];
964 dpaa_setup_egress(priv, fq, tx_port,
965 &fq_cbs->egress_ern);
966 /* If we have more Tx queues than the number of cores,
967 * just ignore the extra ones.
969 if (egress_cnt < DPAA_ETH_TXQ_NUM)
970 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
972 case FQ_TYPE_TX_CONF_MQ:
973 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
975 case FQ_TYPE_TX_CONFIRM:
976 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
978 case FQ_TYPE_TX_ERROR:
979 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
982 dev_warn(priv->net_dev->dev.parent,
983 "Unknown FQ type detected!\n");
988 /* Make sure all CPUs receive a corresponding Tx queue. */
989 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
990 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
991 if (fq->fq_type != FQ_TYPE_TX)
993 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
994 if (egress_cnt == DPAA_ETH_TXQ_NUM)
1000 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
1001 struct qman_fq *tx_fq)
1005 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
1006 if (priv->egress_fqs[i] == tx_fq)
1012 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
1014 const struct dpaa_priv *priv;
1015 struct qman_fq *confq = NULL;
1016 struct qm_mcc_initfq initfq;
1022 priv = netdev_priv(dpaa_fq->net_dev);
1023 dev = dpaa_fq->net_dev->dev.parent;
1025 if (dpaa_fq->fqid == 0)
1026 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1028 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
1030 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
1032 dev_err(dev, "qman_create_fq() failed\n");
1035 fq = &dpaa_fq->fq_base;
1037 if (dpaa_fq->init) {
1038 memset(&initfq, 0, sizeof(initfq));
1040 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
1041 /* Note: we may get to keep an empty FQ in cache */
1042 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1044 /* Try to reduce the number of portal interrupts for
1045 * Tx Confirmation FQs.
1047 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1048 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1051 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1053 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1055 /* Put all egress queues in a congestion group of their own.
1056 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1057 * rather than Tx - but they nonetheless account for the
1058 * memory footprint on behalf of egress traffic. We therefore
1059 * place them in the netdev's CGR, along with the Tx FQs.
1061 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1062 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1063 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1064 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1065 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1066 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1067 /* Set a fixed overhead accounting, in an attempt to
1068 * reduce the impact of fixed-size skb shells and the
1069 * driver's needed headroom on system memory. This is
1070 * especially the case when the egress traffic is
1071 * composed of small datagrams.
1072 * Unfortunately, QMan's OAL value is capped to an
1073 * insufficient value, but even that is better than
1074 * no overhead accounting at all.
1076 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1077 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1078 qm_fqd_set_oal(&initfq.fqd,
1079 min(sizeof(struct sk_buff) +
1081 (size_t)FSL_QMAN_MAX_OAL));
1085 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1086 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1087 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1090 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1091 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1093 confq = priv->conf_fqs[queue_id];
1096 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1097 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1098 * A2V=1 (contextA A2 field is valid)
1099 * A0V=1 (contextA A0 field is valid)
1100 * B0V=1 (contextB field is valid)
1101 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1102 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1104 qm_fqd_context_a_set64(&initfq.fqd,
1105 0x1e00000080000000ULL);
1109 /* Put all the ingress queues in our "ingress CGR". */
1110 if (priv->use_ingress_cgr &&
1111 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1112 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1113 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1114 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1115 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1116 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1117 /* Set a fixed overhead accounting, just like for the
1120 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1121 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1122 qm_fqd_set_oal(&initfq.fqd,
1123 min(sizeof(struct sk_buff) +
1125 (size_t)FSL_QMAN_MAX_OAL));
1128 /* Initialization common to all ingress queues */
1129 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1130 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1131 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1132 QM_FQCTRL_CTXASTASHING);
1133 initfq.fqd.context_a.stashing.exclusive =
1134 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1135 QM_STASHING_EXCL_ANNOTATION;
1136 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1137 DIV_ROUND_UP(sizeof(struct qman_fq),
1141 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1143 dev_err(dev, "qman_init_fq(%u) = %d\n",
1144 qman_fq_fqid(fq), err);
1145 qman_destroy_fq(fq);
1150 dpaa_fq->fqid = qman_fq_fqid(fq);
1152 if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1153 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
1154 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
1157 dev_err(dev, "xdp_rxq_info_reg() = %d\n", err);
1161 err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
1162 MEM_TYPE_PAGE_ORDER0, NULL);
1164 dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n",
1166 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1174 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1176 const struct dpaa_priv *priv;
1177 struct dpaa_fq *dpaa_fq;
1182 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1183 priv = netdev_priv(dpaa_fq->net_dev);
1185 if (dpaa_fq->init) {
1186 err = qman_retire_fq(fq, NULL);
1187 if (err < 0 && netif_msg_drv(priv))
1188 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1189 qman_fq_fqid(fq), err);
1191 error = qman_oos_fq(fq);
1192 if (error < 0 && netif_msg_drv(priv)) {
1193 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1194 qman_fq_fqid(fq), error);
1200 if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1201 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
1202 xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
1203 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1205 qman_destroy_fq(fq);
1206 list_del(&dpaa_fq->list);
1211 static int dpaa_fq_free(struct device *dev, struct list_head *list)
1213 struct dpaa_fq *dpaa_fq, *tmp;
1217 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1218 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1219 if (error < 0 && err >= 0)
1226 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1227 struct dpaa_fq *defq,
1228 struct dpaa_buffer_layout *buf_layout)
1230 struct fman_buffer_prefix_content buf_prefix_content;
1231 struct fman_port_params params;
1234 memset(¶ms, 0, sizeof(params));
1235 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1237 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1238 buf_prefix_content.pass_prs_result = true;
1239 buf_prefix_content.pass_hash_result = true;
1240 buf_prefix_content.pass_time_stamp = true;
1241 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1243 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1244 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1246 err = fman_port_config(port, ¶ms);
1248 pr_err("%s: fman_port_config failed\n", __func__);
1252 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1254 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1259 err = fman_port_init(port);
1261 pr_err("%s: fm_port_init failed\n", __func__);
1266 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
1267 struct dpaa_fq *errq,
1268 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1269 struct dpaa_buffer_layout *buf_layout)
1271 struct fman_buffer_prefix_content buf_prefix_content;
1272 struct fman_port_rx_params *rx_p;
1273 struct fman_port_params params;
1276 memset(¶ms, 0, sizeof(params));
1277 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1279 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1280 buf_prefix_content.pass_prs_result = true;
1281 buf_prefix_content.pass_hash_result = true;
1282 buf_prefix_content.pass_time_stamp = true;
1283 buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
1285 rx_p = ¶ms.specific_params.rx_params;
1286 rx_p->err_fqid = errq->fqid;
1287 rx_p->dflt_fqid = defq->fqid;
1289 rx_p->pcd_base_fqid = pcdq->fqid;
1290 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1293 rx_p->ext_buf_pools.num_of_pools_used = 1;
1294 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
1295 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
1297 err = fman_port_config(port, ¶ms);
1299 pr_err("%s: fman_port_config failed\n", __func__);
1303 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1305 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1310 err = fman_port_init(port);
1312 pr_err("%s: fm_port_init failed\n", __func__);
1317 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1319 struct fm_port_fqs *port_fqs,
1320 struct dpaa_buffer_layout *buf_layout,
1323 struct fman_port *rxport = mac_dev->port[RX];
1324 struct fman_port *txport = mac_dev->port[TX];
1327 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1328 port_fqs->tx_defq, &buf_layout[TX]);
1332 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
1333 port_fqs->rx_defq, port_fqs->rx_pcdq,
1339 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1340 struct bm_buffer *bmb, int cnt)
1344 err = bman_release(dpaa_bp->pool, bmb, cnt);
1345 /* Should never occur, address anyway to avoid leaking the buffers */
1346 if (WARN_ON(err) && dpaa_bp->free_buf_cb)
1348 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1353 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1355 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1356 struct dpaa_bp *dpaa_bp;
1359 memset(bmb, 0, sizeof(bmb));
1362 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1368 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1370 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1373 } while (j < ARRAY_SIZE(bmb) &&
1374 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1375 sgt[i - 1].bpid == sgt[i].bpid);
1377 dpaa_bman_release(dpaa_bp, bmb, j);
1378 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1381 static void dpaa_fd_release(const struct net_device *net_dev,
1382 const struct qm_fd *fd)
1384 struct qm_sg_entry *sgt;
1385 struct dpaa_bp *dpaa_bp;
1386 struct bm_buffer bmb;
1391 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1393 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1397 if (qm_fd_get_format(fd) == qm_fd_sg) {
1398 vaddr = phys_to_virt(qm_fd_addr(fd));
1399 sgt = vaddr + qm_fd_get_offset(fd);
1401 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1402 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1404 dpaa_release_sgt_members(sgt);
1406 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
1407 virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
1409 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1410 netdev_err(net_dev, "DMA mapping failed\n");
1413 bm_buffer_set64(&bmb, addr);
1416 dpaa_bman_release(dpaa_bp, &bmb, 1);
1419 static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1420 const union qm_mr_entry *msg)
1422 switch (msg->ern.rc & QM_MR_RC_MASK) {
1423 case QM_MR_RC_CGR_TAILDROP:
1424 percpu_priv->ern_cnt.cg_tdrop++;
1427 percpu_priv->ern_cnt.wred++;
1429 case QM_MR_RC_ERROR:
1430 percpu_priv->ern_cnt.err_cond++;
1432 case QM_MR_RC_ORPWINDOW_EARLY:
1433 percpu_priv->ern_cnt.early_window++;
1435 case QM_MR_RC_ORPWINDOW_LATE:
1436 percpu_priv->ern_cnt.late_window++;
1438 case QM_MR_RC_FQ_TAILDROP:
1439 percpu_priv->ern_cnt.fq_tdrop++;
1441 case QM_MR_RC_ORPWINDOW_RETIRED:
1442 percpu_priv->ern_cnt.fq_retired++;
1444 case QM_MR_RC_ORP_ZERO:
1445 percpu_priv->ern_cnt.orp_zero++;
1450 /* Turn on HW checksum computation for this outgoing frame.
1451 * If the current protocol is not something we support in this regard
1452 * (or if the stack has already computed the SW checksum), we do nothing.
1454 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1457 * Note that this function may modify the fd->cmd field and the skb data buffer
1458 * (the Parse Results area).
1460 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1461 struct sk_buff *skb,
1463 void *parse_results)
1465 struct fman_prs_result *parse_result;
1466 u16 ethertype = ntohs(skb->protocol);
1467 struct ipv6hdr *ipv6h = NULL;
1472 if (skb->ip_summed != CHECKSUM_PARTIAL)
1475 /* Note: L3 csum seems to be already computed in sw, but we can't choose
1476 * L4 alone from the FM configuration anyway.
1479 /* Fill in some fields of the Parse Results array, so the FMan
1480 * can find them as if they came from the FMan Parser.
1482 parse_result = (struct fman_prs_result *)parse_results;
1484 /* If we're dealing with VLAN, get the real Ethernet type */
1485 if (ethertype == ETH_P_8021Q) {
1486 /* We can't always assume the MAC header is set correctly
1487 * by the stack, so reset to beginning of skb->data
1489 skb_reset_mac_header(skb);
1490 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1493 /* Fill in the relevant L3 parse result fields
1494 * and read the L4 protocol type
1496 switch (ethertype) {
1498 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1501 l4_proto = iph->protocol;
1504 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1505 ipv6h = ipv6_hdr(skb);
1507 l4_proto = ipv6h->nexthdr;
1510 /* We shouldn't even be here */
1511 if (net_ratelimit())
1512 netif_alert(priv, tx_err, priv->net_dev,
1513 "Can't compute HW csum for L3 proto 0x%x\n",
1514 ntohs(skb->protocol));
1519 /* Fill in the relevant L4 parse result fields */
1522 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1525 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1528 if (net_ratelimit())
1529 netif_alert(priv, tx_err, priv->net_dev,
1530 "Can't compute HW csum for L4 proto 0x%x\n",
1536 /* At index 0 is IPOffset_1 as defined in the Parse Results */
1537 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1538 parse_result->l4_off = (u8)skb_transport_offset(skb);
1540 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1541 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1543 /* On P1023 and similar platforms fd->cmd interpretation could
1544 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1545 * is not set so we do not need to check; in the future, if/when
1546 * using context_a we need to check this bit
1553 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1555 struct net_device *net_dev = dpaa_bp->priv->net_dev;
1556 struct bm_buffer bmb[8];
1561 for (i = 0; i < 8; i++) {
1562 p = dev_alloc_pages(0);
1564 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1565 goto release_previous_buffs;
1568 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
1569 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1570 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1572 netdev_err(net_dev, "DMA map failed\n");
1573 goto release_previous_buffs;
1577 bm_buffer_set64(&bmb[i], addr);
1581 return dpaa_bman_release(dpaa_bp, bmb, i);
1583 release_previous_buffs:
1584 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1586 bm_buffer_set64(&bmb[i], 0);
1587 /* Avoid releasing a completely null buffer; bman_release() requires
1588 * at least one buffer.
1596 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1600 /* Give each CPU an allotment of "config_count" buffers */
1601 for_each_possible_cpu(i) {
1602 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1605 /* Although we access another CPU's counters here
1606 * we do it at boot time so it is safe
1608 for (j = 0; j < dpaa_bp->config_count; j += 8)
1609 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1614 /* Add buffers/(pages) for Rx processing whenever bpool count falls below
1617 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1619 int count = *countptr;
1622 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1624 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1625 if (unlikely(!new_bufs)) {
1626 /* Avoid looping forever if we've temporarily
1627 * run out of memory. We'll try again at the
1633 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1636 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1643 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1645 struct dpaa_bp *dpaa_bp;
1648 dpaa_bp = priv->dpaa_bp;
1651 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1653 return dpaa_eth_refill_bpool(dpaa_bp, countptr);
1656 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
1657 * either contiguous frames or scatter/gather ones.
1658 * Skb freeing is not handled here.
1660 * This function may be called on error paths in the Tx function, so guard
1661 * against cases when not all fd relevant fields were filled in. To avoid
1662 * reading the invalid transmission timestamp for the error paths set ts to
1665 * Return the skb backpointer, since for S/G frames the buffer containing it
1668 * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
1669 * and return NULL in this case.
1671 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1672 const struct qm_fd *fd, bool ts)
1674 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1675 struct device *dev = priv->net_dev->dev.parent;
1676 struct skb_shared_hwtstamps shhwtstamps;
1677 dma_addr_t addr = qm_fd_addr(fd);
1678 void *vaddr = phys_to_virt(addr);
1679 const struct qm_sg_entry *sgt;
1680 struct dpaa_eth_swbp *swbp;
1681 struct sk_buff *skb;
1685 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1686 dma_unmap_page(priv->tx_dma_dev, addr,
1687 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1690 /* The sgt buffer has been allocated with netdev_alloc_frag(),
1693 sgt = vaddr + qm_fd_get_offset(fd);
1695 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1696 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
1697 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1699 /* remaining pages were mapped with skb_frag_dma_map() */
1700 for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
1701 !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
1702 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1704 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
1705 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1708 dma_unmap_single(priv->tx_dma_dev, addr,
1709 qm_fd_get_offset(fd) + qm_fd_get_length(fd),
1713 swbp = (struct dpaa_eth_swbp *)vaddr;
1716 /* No skb backpointer is set when running XDP. An xdp_frame
1717 * backpointer is saved instead.
1720 xdp_return_frame(swbp->xdpf);
1724 /* DMA unmapping is required before accessing the HW provided info */
1725 if (ts && priv->tx_tstamp &&
1726 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1727 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1729 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
1731 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1732 skb_tstamp_tx(skb, &shhwtstamps);
1734 dev_warn(dev, "fman_port_get_tstamp failed!\n");
1738 if (qm_fd_get_format(fd) == qm_fd_sg)
1739 /* Free the page that we allocated on Tx for the SGT */
1740 free_pages((unsigned long)vaddr, 0);
1745 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1747 /* The parser has run and performed L4 checksum validation.
1748 * We know there were no parser errors (and implicitly no
1749 * L4 csum error), otherwise we wouldn't be here.
1751 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1752 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1753 return CHECKSUM_UNNECESSARY;
1755 /* We're here because either the parser didn't run or the L4 checksum
1756 * was not verified. This may include the case of a UDP frame with
1757 * checksum zero or an L4 proto other than TCP/UDP
1759 return CHECKSUM_NONE;
1762 #define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
1764 /* Build a linear skb around the received buffer.
1765 * We are guaranteed there is enough room at the end of the data buffer to
1766 * accommodate the shared info area of the skb.
1768 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1769 const struct qm_fd *fd)
1771 ssize_t fd_off = qm_fd_get_offset(fd);
1772 dma_addr_t addr = qm_fd_addr(fd);
1773 struct dpaa_bp *dpaa_bp;
1774 struct sk_buff *skb;
1777 vaddr = phys_to_virt(addr);
1778 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1780 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1784 skb = build_skb(vaddr, dpaa_bp->size +
1785 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1786 if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
1788 skb_reserve(skb, fd_off);
1789 skb_put(skb, qm_fd_get_length(fd));
1791 skb->ip_summed = rx_csum_offload(priv, fd);
1796 free_pages((unsigned long)vaddr, 0);
1800 /* Build an skb with the data of the first S/G entry in the linear portion and
1801 * the rest of the frame as skb fragments.
1803 * The page fragment holding the S/G Table is recycled here.
1805 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1806 const struct qm_fd *fd)
1808 ssize_t fd_off = qm_fd_get_offset(fd);
1809 dma_addr_t addr = qm_fd_addr(fd);
1810 const struct qm_sg_entry *sgt;
1811 struct page *page, *head_page;
1812 struct dpaa_bp *dpaa_bp;
1813 void *vaddr, *sg_vaddr;
1814 int frag_off, frag_len;
1815 struct sk_buff *skb;
1822 vaddr = phys_to_virt(addr);
1823 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1825 /* Iterate through the SGT entries and add data buffers to the skb */
1826 sgt = vaddr + fd_off;
1828 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1829 /* Extension bit is not supported */
1830 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1832 sg_addr = qm_sg_addr(&sgt[i]);
1833 sg_vaddr = phys_to_virt(sg_addr);
1834 WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
1836 dma_unmap_page(priv->rx_dma_dev, sg_addr,
1837 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1839 /* We may use multiple Rx pools */
1840 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1845 sz = dpaa_bp->size +
1846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1847 skb = build_skb(sg_vaddr, sz);
1851 skb->ip_summed = rx_csum_offload(priv, fd);
1853 /* Make sure forwarded skbs will have enough space
1854 * on Tx, if extra headers are added.
1856 WARN_ON(fd_off != priv->rx_headroom);
1857 skb_reserve(skb, fd_off);
1858 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1860 /* Not the first S/G entry; all data from buffer will
1861 * be added in an skb fragment; fragment index is offset
1862 * by one since first S/G entry was incorporated in the
1863 * linear part of the skb.
1865 * Caution: 'page' may be a tail page.
1867 page = virt_to_page(sg_vaddr);
1868 head_page = virt_to_head_page(sg_vaddr);
1870 /* Compute offset in (possibly tail) page */
1871 page_offset = ((unsigned long)sg_vaddr &
1873 (page_address(page) - page_address(head_page));
1874 /* page_offset only refers to the beginning of sgt[i];
1875 * but the buffer itself may have an internal offset.
1877 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1878 frag_len = qm_sg_entry_get_len(&sgt[i]);
1879 /* skb_add_rx_frag() does no checking on the page; if
1880 * we pass it a tail page, we'll end up with
1881 * bad page accounting and eventually with segafults.
1883 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1884 frag_len, dpaa_bp->size);
1887 /* Update the pool count for the current {cpu x bpool} */
1888 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1891 if (qm_sg_entry_is_final(&sgt[i]))
1894 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1896 /* free the SG table buffer */
1897 free_pages((unsigned long)vaddr, 0);
1902 /* free all the SG entries */
1903 for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
1904 sg_addr = qm_sg_addr(&sgt[j]);
1905 sg_vaddr = phys_to_virt(sg_addr);
1906 /* all pages 0..i were unmaped */
1908 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
1909 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1910 free_pages((unsigned long)sg_vaddr, 0);
1911 /* counters 0..i-1 were decremented */
1913 dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
1915 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1920 if (qm_sg_entry_is_final(&sgt[j]))
1923 /* free the SGT fragment */
1924 free_pages((unsigned long)vaddr, 0);
1929 static int skb_to_contig_fd(struct dpaa_priv *priv,
1930 struct sk_buff *skb, struct qm_fd *fd,
1933 struct net_device *net_dev = priv->net_dev;
1934 enum dma_data_direction dma_dir;
1935 struct dpaa_eth_swbp *swbp;
1936 unsigned char *buff_start;
1940 /* We are guaranteed to have at least tx_headroom bytes
1941 * available, so just use that for offset.
1943 fd->bpid = FSL_DPAA_BPID_INV;
1944 buff_start = skb->data - priv->tx_headroom;
1945 dma_dir = DMA_TO_DEVICE;
1947 swbp = (struct dpaa_eth_swbp *)buff_start;
1950 /* Enable L3/L4 hardware checksum computation.
1952 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1953 * need to write into the skb.
1955 err = dpaa_enable_tx_csum(priv, skb, fd,
1956 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1957 if (unlikely(err < 0)) {
1958 if (net_ratelimit())
1959 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1964 /* Fill in the rest of the FD fields */
1965 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1966 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1968 /* Map the entire buffer size that may be seen by FMan, but no more */
1969 addr = dma_map_single(priv->tx_dma_dev, buff_start,
1970 priv->tx_headroom + skb->len, dma_dir);
1971 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1972 if (net_ratelimit())
1973 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1976 qm_fd_addr_set64(fd, addr);
1981 static int skb_to_sg_fd(struct dpaa_priv *priv,
1982 struct sk_buff *skb, struct qm_fd *fd)
1984 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1985 const int nr_frags = skb_shinfo(skb)->nr_frags;
1986 struct net_device *net_dev = priv->net_dev;
1987 struct dpaa_eth_swbp *swbp;
1988 struct qm_sg_entry *sgt;
1996 /* get a page to store the SGTable */
1997 p = dev_alloc_pages(0);
1999 netdev_err(net_dev, "dev_alloc_pages() failed\n");
2002 buff_start = page_address(p);
2004 /* Enable L3/L4 hardware checksum computation.
2006 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
2007 * need to write into the skb.
2009 err = dpaa_enable_tx_csum(priv, skb, fd,
2010 buff_start + DPAA_TX_PRIV_DATA_SIZE);
2011 if (unlikely(err < 0)) {
2012 if (net_ratelimit())
2013 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
2018 /* SGT[0] is used by the linear part */
2019 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
2020 frag_len = skb_headlen(skb);
2021 qm_sg_entry_set_len(&sgt[0], frag_len);
2022 sgt[0].bpid = FSL_DPAA_BPID_INV;
2024 addr = dma_map_single(priv->tx_dma_dev, skb->data,
2025 skb_headlen(skb), dma_dir);
2026 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2027 netdev_err(priv->net_dev, "DMA mapping failed\n");
2029 goto sg0_map_failed;
2031 qm_sg_entry_set64(&sgt[0], addr);
2033 /* populate the rest of SGT entries */
2034 for (i = 0; i < nr_frags; i++) {
2035 frag = &skb_shinfo(skb)->frags[i];
2036 frag_len = skb_frag_size(frag);
2037 WARN_ON(!skb_frag_page(frag));
2038 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
2040 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2041 netdev_err(priv->net_dev, "DMA mapping failed\n");
2046 qm_sg_entry_set_len(&sgt[i + 1], frag_len);
2047 sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
2048 sgt[i + 1].offset = 0;
2050 /* keep the offset in the address */
2051 qm_sg_entry_set64(&sgt[i + 1], addr);
2054 /* Set the final bit in the last used entry of the SGT */
2055 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
2057 /* set fd offset to priv->tx_headroom */
2058 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
2060 /* DMA map the SGT page */
2061 swbp = (struct dpaa_eth_swbp *)buff_start;
2064 addr = dma_map_page(priv->tx_dma_dev, p, 0,
2065 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
2066 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2067 netdev_err(priv->net_dev, "DMA mapping failed\n");
2069 goto sgt_map_failed;
2072 fd->bpid = FSL_DPAA_BPID_INV;
2073 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2074 qm_fd_addr_set64(fd, addr);
2080 for (j = 0; j < i; j++)
2081 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
2082 qm_sg_entry_get_len(&sgt[j]), dma_dir);
2085 free_pages((unsigned long)buff_start, 0);
2090 static inline int dpaa_xmit(struct dpaa_priv *priv,
2091 struct rtnl_link_stats64 *percpu_stats,
2095 struct qman_fq *egress_fq;
2098 egress_fq = priv->egress_fqs[queue];
2099 if (fd->bpid == FSL_DPAA_BPID_INV)
2100 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2102 /* Trace this Tx fd */
2103 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2105 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2106 err = qman_enqueue(egress_fq, fd);
2111 if (unlikely(err < 0)) {
2112 percpu_stats->tx_fifo_errors++;
2116 percpu_stats->tx_packets++;
2117 percpu_stats->tx_bytes += qm_fd_get_length(fd);
2122 #ifdef CONFIG_DPAA_ERRATUM_A050385
2123 static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
2125 struct dpaa_priv *priv = netdev_priv(net_dev);
2126 struct sk_buff *new_skb, *skb = *s;
2127 unsigned char *start, i;
2129 /* check linear buffer alignment */
2130 if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
2133 /* linear buffers just need to have an aligned start */
2134 if (!skb_is_nonlinear(skb))
2137 /* linear data size for nonlinear skbs needs to be aligned */
2138 if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
2141 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2142 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2144 /* all fragments need to have aligned start addresses */
2145 if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
2148 /* all but last fragment need to have aligned sizes */
2149 if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
2150 (i < skb_shinfo(skb)->nr_frags - 1))
2157 /* copy all the skb content into a new linear buffer */
2158 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2163 /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
2164 skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
2166 /* Workaround for DPAA_A050385 requires data start to be aligned */
2167 start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
2168 if (start - new_skb->data)
2169 skb_reserve(new_skb, start - new_skb->data);
2171 skb_put(new_skb, skb->len);
2172 skb_copy_bits(skb, 0, new_skb->data, skb->len);
2173 skb_copy_header(new_skb, skb);
2174 new_skb->dev = skb->dev;
2176 /* Copy relevant timestamp info from the old skb to the new */
2177 if (priv->tx_tstamp) {
2178 skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
2179 skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
2180 skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
2182 skb_set_owner_w(new_skb, skb->sk);
2185 /* We move the headroom when we align it so we have to reset the
2186 * network and transport header offsets relative to the new data
2187 * pointer. The checksum offload relies on these offsets.
2189 skb_set_network_header(new_skb, skb_network_offset(skb));
2190 skb_set_transport_header(new_skb, skb_transport_offset(skb));
2198 static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
2199 struct xdp_frame **init_xdpf)
2201 struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
2202 void *new_buff, *aligned_data;
2207 /* Check the data alignment and make sure the headroom is large
2208 * enough to store the xdpf backpointer. Use an aligned headroom
2211 * Due to alignment constraints, we give XDP access to the full 256
2212 * byte frame headroom. If the XDP program uses all of it, copy the
2213 * data to a new buffer and make room for storing the backpointer.
2215 if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
2216 xdpf->headroom >= priv->tx_headroom) {
2217 xdpf->headroom = priv->tx_headroom;
2221 /* Try to move the data inside the buffer just enough to align it and
2222 * store the xdpf backpointer. If the available headroom isn't large
2223 * enough, resort to allocating a new buffer and copying the data.
2225 aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
2226 data_shift = xdpf->data - aligned_data;
2228 /* The XDP frame's headroom needs to be large enough to accommodate
2229 * shifting the data as well as storing the xdpf backpointer.
2231 if (xdpf->headroom >= data_shift + priv->tx_headroom) {
2232 memmove(aligned_data, xdpf->data, xdpf->len);
2233 xdpf->data = aligned_data;
2234 xdpf->headroom = priv->tx_headroom;
2238 /* The new xdp_frame is stored in the new buffer. Reserve enough space
2239 * in the headroom for storing it along with the driver's private
2240 * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
2241 * guarantee the data's alignment in the buffer.
2243 headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
2244 DPAA_FD_DATA_ALIGNMENT);
2246 /* Assure the extended headroom and data don't overflow the buffer,
2247 * while maintaining the mandatory tailroom.
2249 if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
2250 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
2253 p = dev_alloc_pages(0);
2257 /* Copy the data to the new buffer at a properly aligned offset */
2258 new_buff = page_address(p);
2259 memcpy(new_buff + headroom, xdpf->data, xdpf->len);
2261 /* Create an XDP frame around the new buffer in a similar fashion
2262 * to xdp_convert_buff_to_frame.
2264 new_xdpf = new_buff;
2265 new_xdpf->data = new_buff + headroom;
2266 new_xdpf->len = xdpf->len;
2267 new_xdpf->headroom = priv->tx_headroom;
2268 new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
2269 new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
2271 /* Release the initial buffer */
2272 xdp_return_frame_rx_napi(xdpf);
2274 *init_xdpf = new_xdpf;
2280 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2282 const int queue_mapping = skb_get_queue_mapping(skb);
2283 bool nonlinear = skb_is_nonlinear(skb);
2284 struct rtnl_link_stats64 *percpu_stats;
2285 struct dpaa_percpu_priv *percpu_priv;
2286 struct netdev_queue *txq;
2287 struct dpaa_priv *priv;
2292 priv = netdev_priv(net_dev);
2293 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2294 percpu_stats = &percpu_priv->stats;
2296 qm_fd_clear_fd(&fd);
2299 /* We're going to store the skb backpointer at the beginning
2300 * of the data buffer, so we need a privately owned skb
2302 * We've made sure skb is not shared in dev->priv_flags,
2303 * we need to verify the skb head is not cloned
2305 if (skb_cow_head(skb, priv->tx_headroom))
2308 WARN_ON(skb_is_nonlinear(skb));
2311 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2312 * make sure we don't feed FMan with more fragments than it supports.
2314 if (unlikely(nonlinear &&
2315 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2316 /* If the egress skb contains more fragments than we support
2317 * we have no choice but to linearize it ourselves.
2319 if (__skb_linearize(skb))
2322 nonlinear = skb_is_nonlinear(skb);
2325 #ifdef CONFIG_DPAA_ERRATUM_A050385
2326 if (unlikely(fman_has_errata_a050385())) {
2327 if (dpaa_a050385_wa_skb(net_dev, &skb))
2329 nonlinear = skb_is_nonlinear(skb);
2334 /* Just create a S/G fd based on the skb */
2335 err = skb_to_sg_fd(priv, skb, &fd);
2336 percpu_priv->tx_frag_skbuffs++;
2338 /* Create a contig FD from this skb */
2339 err = skb_to_contig_fd(priv, skb, &fd, &offset);
2341 if (unlikely(err < 0))
2342 goto skb_to_fd_failed;
2344 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2346 /* LLTX requires to do our own update of trans_start */
2347 txq_trans_cond_update(txq);
2349 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2350 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2351 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2354 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2355 return NETDEV_TX_OK;
2357 dpaa_cleanup_tx_fd(priv, &fd, false);
2360 percpu_stats->tx_errors++;
2362 return NETDEV_TX_OK;
2365 static void dpaa_rx_error(struct net_device *net_dev,
2366 const struct dpaa_priv *priv,
2367 struct dpaa_percpu_priv *percpu_priv,
2368 const struct qm_fd *fd,
2371 if (net_ratelimit())
2372 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2373 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2375 percpu_priv->stats.rx_errors++;
2377 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2378 percpu_priv->rx_errors.dme++;
2379 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2380 percpu_priv->rx_errors.fpe++;
2381 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2382 percpu_priv->rx_errors.fse++;
2383 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2384 percpu_priv->rx_errors.phe++;
2386 dpaa_fd_release(net_dev, fd);
2389 static void dpaa_tx_error(struct net_device *net_dev,
2390 const struct dpaa_priv *priv,
2391 struct dpaa_percpu_priv *percpu_priv,
2392 const struct qm_fd *fd,
2395 struct sk_buff *skb;
2397 if (net_ratelimit())
2398 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2399 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2401 percpu_priv->stats.tx_errors++;
2403 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2407 static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2409 struct dpaa_napi_portal *np =
2410 container_of(napi, struct dpaa_napi_portal, napi);
2415 cleaned = qman_p_poll_dqrr(np->p, budget);
2417 if (np->xdp_act & XDP_REDIRECT)
2420 if (cleaned < budget) {
2421 napi_complete_done(napi, cleaned);
2422 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2423 } else if (np->down) {
2424 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2430 static void dpaa_tx_conf(struct net_device *net_dev,
2431 const struct dpaa_priv *priv,
2432 struct dpaa_percpu_priv *percpu_priv,
2433 const struct qm_fd *fd,
2436 struct sk_buff *skb;
2438 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2439 if (net_ratelimit())
2440 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2441 be32_to_cpu(fd->status) &
2442 FM_FD_STAT_TX_ERRORS);
2444 percpu_priv->stats.tx_errors++;
2447 percpu_priv->tx_confirm++;
2449 skb = dpaa_cleanup_tx_fd(priv, fd, true);
2454 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2455 struct qman_portal *portal, bool sched_napi)
2458 /* Disable QMan IRQ and invoke NAPI */
2459 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2461 percpu_priv->np.p = portal;
2462 napi_schedule(&percpu_priv->np.napi);
2463 percpu_priv->in_interrupt++;
2469 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2471 const struct qm_dqrr_entry *dq,
2474 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2475 struct dpaa_percpu_priv *percpu_priv;
2476 struct net_device *net_dev;
2477 struct dpaa_bp *dpaa_bp;
2478 struct dpaa_priv *priv;
2480 net_dev = dpaa_fq->net_dev;
2481 priv = netdev_priv(net_dev);
2482 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2484 return qman_cb_dqrr_consume;
2486 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2488 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2489 return qman_cb_dqrr_stop;
2491 dpaa_eth_refill_bpools(priv);
2492 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2494 return qman_cb_dqrr_consume;
2497 static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
2498 struct xdp_frame *xdpf)
2500 struct dpaa_priv *priv = netdev_priv(net_dev);
2501 struct rtnl_link_stats64 *percpu_stats;
2502 struct dpaa_percpu_priv *percpu_priv;
2503 struct dpaa_eth_swbp *swbp;
2504 struct netdev_queue *txq;
2510 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2511 percpu_stats = &percpu_priv->stats;
2513 #ifdef CONFIG_DPAA_ERRATUM_A050385
2514 if (unlikely(fman_has_errata_a050385())) {
2515 if (dpaa_a050385_wa_xdpf(priv, &xdpf)) {
2522 if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) {
2527 buff_start = xdpf->data - xdpf->headroom;
2529 /* Leave empty the skb backpointer at the start of the buffer.
2530 * Save the XDP frame for easy cleanup on confirmation.
2532 swbp = (struct dpaa_eth_swbp *)buff_start;
2536 qm_fd_clear_fd(&fd);
2537 fd.bpid = FSL_DPAA_BPID_INV;
2538 fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2539 qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len);
2541 addr = dma_map_single(priv->tx_dma_dev, buff_start,
2542 xdpf->headroom + xdpf->len,
2544 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2549 qm_fd_addr_set64(&fd, addr);
2551 /* Bump the trans_start */
2552 txq = netdev_get_tx_queue(net_dev, smp_processor_id());
2553 txq_trans_cond_update(txq);
2555 err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
2557 dma_unmap_single(priv->tx_dma_dev, addr,
2558 qm_fd_get_offset(&fd) + qm_fd_get_length(&fd),
2566 percpu_stats->tx_errors++;
2570 static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
2571 struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
2573 ssize_t fd_off = qm_fd_get_offset(fd);
2574 struct bpf_prog *xdp_prog;
2575 struct xdp_frame *xdpf;
2576 struct xdp_buff xdp;
2580 xdp_prog = READ_ONCE(priv->xdp_prog);
2584 xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
2586 xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
2587 XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
2589 /* We reserve a fixed headroom of 256 bytes under the erratum and we
2590 * offer it all to XDP programs to use. If no room is left for the
2591 * xdpf backpointer on TX, we will need to copy the data.
2592 * Disable metadata support since data realignments might be required
2593 * and the information can be lost.
2595 #ifdef CONFIG_DPAA_ERRATUM_A050385
2596 if (unlikely(fman_has_errata_a050385())) {
2597 xdp_set_data_meta_invalid(&xdp);
2598 xdp.data_hard_start = vaddr;
2599 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2603 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2605 /* Update the length and the offset of the FD */
2606 qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
2610 #ifdef CONFIG_DPAA_ERRATUM_A050385
2611 *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 :
2612 xdp.data - xdp.data_meta;
2614 *xdp_meta_len = xdp.data - xdp.data_meta;
2618 /* We can access the full headroom when sending the frame
2621 xdp.data_hard_start = vaddr;
2622 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2623 xdpf = xdp_convert_buff_to_frame(&xdp);
2624 if (unlikely(!xdpf)) {
2625 free_pages((unsigned long)vaddr, 0);
2629 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
2630 xdp_return_frame_rx_napi(xdpf);
2634 /* Allow redirect to use the full headroom */
2635 xdp.data_hard_start = vaddr;
2636 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2638 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
2640 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2641 free_pages((unsigned long)vaddr, 0);
2645 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
2648 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2651 /* Free the buffer */
2652 free_pages((unsigned long)vaddr, 0);
2659 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2661 const struct qm_dqrr_entry *dq,
2664 bool ts_valid = false, hash_valid = false;
2665 struct skb_shared_hwtstamps *shhwtstamps;
2666 unsigned int skb_len, xdp_meta_len = 0;
2667 struct rtnl_link_stats64 *percpu_stats;
2668 struct dpaa_percpu_priv *percpu_priv;
2669 const struct qm_fd *fd = &dq->fd;
2670 dma_addr_t addr = qm_fd_addr(fd);
2671 struct dpaa_napi_portal *np;
2672 enum qm_fd_format fd_format;
2673 struct net_device *net_dev;
2674 u32 fd_status, hash_offset;
2675 struct qm_sg_entry *sgt;
2676 struct dpaa_bp *dpaa_bp;
2677 struct dpaa_fq *dpaa_fq;
2678 struct dpaa_priv *priv;
2679 struct sk_buff *skb;
2686 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2687 fd_status = be32_to_cpu(fd->status);
2688 fd_format = qm_fd_get_format(fd);
2689 net_dev = dpaa_fq->net_dev;
2690 priv = netdev_priv(net_dev);
2691 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2693 return qman_cb_dqrr_consume;
2695 /* Trace the Rx fd */
2696 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2698 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2699 percpu_stats = &percpu_priv->stats;
2700 np = &percpu_priv->np;
2702 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
2703 return qman_cb_dqrr_stop;
2705 /* Make sure we didn't run out of buffers */
2706 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2707 /* Unable to refill the buffer pool due to insufficient
2708 * system memory. Just release the frame back into the pool,
2709 * otherwise we'll soon end up with an empty buffer pool.
2711 dpaa_fd_release(net_dev, &dq->fd);
2712 return qman_cb_dqrr_consume;
2715 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2716 if (net_ratelimit())
2717 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2718 fd_status & FM_FD_STAT_RX_ERRORS);
2720 percpu_stats->rx_errors++;
2721 dpaa_fd_release(net_dev, fd);
2722 return qman_cb_dqrr_consume;
2725 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2728 /* prefetch the first 64 bytes of the frame or the SGT start */
2729 vaddr = phys_to_virt(addr);
2730 prefetch(vaddr + qm_fd_get_offset(fd));
2732 /* The only FD types that we may receive are contig and S/G */
2733 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2735 /* Account for either the contig buffer or the SGT buffer (depending on
2736 * which case we were in) having been removed from the pool.
2738 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2741 /* Extract the timestamp stored in the headroom before running XDP */
2742 if (priv->rx_tstamp) {
2743 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
2746 WARN_ONCE(1, "fman_port_get_tstamp failed!\n");
2749 /* Extract the hash stored in the headroom before running XDP */
2750 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2751 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2753 hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
2757 if (likely(fd_format == qm_fd_contig)) {
2758 xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
2759 dpaa_fq, &xdp_meta_len);
2760 np->xdp_act |= xdp_act;
2761 if (xdp_act != XDP_PASS) {
2762 percpu_stats->rx_packets++;
2763 percpu_stats->rx_bytes += qm_fd_get_length(fd);
2764 return qman_cb_dqrr_consume;
2766 skb = contig_fd_to_skb(priv, fd);
2768 /* XDP doesn't support S/G frames. Return the fragments to the
2769 * buffer pool and release the SGT.
2771 if (READ_ONCE(priv->xdp_prog)) {
2772 WARN_ONCE(1, "S/G frames not supported under XDP\n");
2773 sgt = vaddr + qm_fd_get_offset(fd);
2774 dpaa_release_sgt_members(sgt);
2775 free_pages((unsigned long)vaddr, 0);
2776 return qman_cb_dqrr_consume;
2778 skb = sg_fd_to_skb(priv, fd);
2781 return qman_cb_dqrr_consume;
2784 skb_metadata_set(skb, xdp_meta_len);
2786 /* Set the previously extracted timestamp */
2788 shhwtstamps = skb_hwtstamps(skb);
2789 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2790 shhwtstamps->hwtstamp = ns_to_ktime(ns);
2793 skb->protocol = eth_type_trans(skb, net_dev);
2795 /* Set the previously extracted hash */
2797 enum pkt_hash_types type;
2799 /* if L4 exists, it was used in the hash generation */
2800 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2801 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2802 skb_set_hash(skb, hash, type);
2807 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2808 percpu_stats->rx_dropped++;
2809 return qman_cb_dqrr_consume;
2812 percpu_stats->rx_packets++;
2813 percpu_stats->rx_bytes += skb_len;
2815 return qman_cb_dqrr_consume;
2818 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2820 const struct qm_dqrr_entry *dq,
2823 struct dpaa_percpu_priv *percpu_priv;
2824 struct net_device *net_dev;
2825 struct dpaa_priv *priv;
2827 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2828 priv = netdev_priv(net_dev);
2830 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2832 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2833 return qman_cb_dqrr_stop;
2835 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2837 return qman_cb_dqrr_consume;
2840 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2842 const struct qm_dqrr_entry *dq,
2845 struct dpaa_percpu_priv *percpu_priv;
2846 struct net_device *net_dev;
2847 struct dpaa_priv *priv;
2849 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2850 priv = netdev_priv(net_dev);
2853 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2855 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2857 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2858 return qman_cb_dqrr_stop;
2860 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2862 return qman_cb_dqrr_consume;
2865 static void egress_ern(struct qman_portal *portal,
2867 const union qm_mr_entry *msg)
2869 const struct qm_fd *fd = &msg->ern.fd;
2870 struct dpaa_percpu_priv *percpu_priv;
2871 const struct dpaa_priv *priv;
2872 struct net_device *net_dev;
2873 struct sk_buff *skb;
2875 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2876 priv = netdev_priv(net_dev);
2877 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2879 percpu_priv->stats.tx_dropped++;
2880 percpu_priv->stats.tx_fifo_errors++;
2881 count_ern(percpu_priv, msg);
2883 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2884 dev_kfree_skb_any(skb);
2887 static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2888 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2889 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2890 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2891 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2892 .egress_ern = { .cb = { .ern = egress_ern } }
2895 static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2897 struct dpaa_percpu_priv *percpu_priv;
2900 for_each_online_cpu(i) {
2901 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2903 percpu_priv->np.down = false;
2904 napi_enable(&percpu_priv->np.napi);
2908 static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2910 struct dpaa_percpu_priv *percpu_priv;
2913 for_each_online_cpu(i) {
2914 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2916 percpu_priv->np.down = true;
2917 napi_disable(&percpu_priv->np.napi);
2921 static int dpaa_open(struct net_device *net_dev)
2923 struct mac_device *mac_dev;
2924 struct dpaa_priv *priv;
2927 priv = netdev_priv(net_dev);
2928 mac_dev = priv->mac_dev;
2929 dpaa_eth_napi_enable(priv);
2931 err = phylink_of_phy_connect(mac_dev->phylink,
2932 mac_dev->dev->of_node, 0);
2934 goto phy_init_failed;
2936 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2937 err = fman_port_enable(mac_dev->port[i]);
2939 goto mac_start_failed;
2942 err = priv->mac_dev->enable(mac_dev->fman_mac);
2944 netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
2945 goto mac_start_failed;
2947 phylink_start(mac_dev->phylink);
2949 netif_tx_start_all_queues(net_dev);
2954 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2955 fman_port_disable(mac_dev->port[i]);
2956 phylink_disconnect_phy(mac_dev->phylink);
2959 dpaa_eth_napi_disable(priv);
2964 static int dpaa_eth_stop(struct net_device *net_dev)
2966 struct dpaa_priv *priv;
2969 err = dpaa_stop(net_dev);
2971 priv = netdev_priv(net_dev);
2972 dpaa_eth_napi_disable(priv);
2977 static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
2979 int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom;
2981 /* We do not support S/G fragments when XDP is enabled.
2982 * Limit the MTU in relation to the buffer size.
2984 if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) {
2985 dev_warn(priv->net_dev->dev.parent,
2986 "The maximum MTU for XDP is %d\n",
2987 max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN);
2994 static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
2996 struct dpaa_priv *priv = netdev_priv(net_dev);
2998 if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
3001 net_dev->mtu = new_mtu;
3005 static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
3007 struct dpaa_priv *priv = netdev_priv(net_dev);
3008 struct bpf_prog *old_prog;
3012 /* S/G fragments are not supported in XDP-mode */
3013 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
3014 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
3018 up = netif_running(net_dev);
3021 dpaa_eth_stop(net_dev);
3023 old_prog = xchg(&priv->xdp_prog, bpf->prog);
3025 bpf_prog_put(old_prog);
3028 err = dpaa_open(net_dev);
3030 NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed");
3038 static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
3040 switch (xdp->command) {
3041 case XDP_SETUP_PROG:
3042 return dpaa_setup_xdp(net_dev, xdp);
3048 static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
3049 struct xdp_frame **frames, u32 flags)
3051 struct xdp_frame *xdpf;
3054 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3057 if (!netif_running(net_dev))
3060 for (i = 0; i < n; i++) {
3062 if (dpaa_xdp_xmit_frame(net_dev, xdpf))
3070 static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3072 struct dpaa_priv *priv = netdev_priv(dev);
3073 struct hwtstamp_config config;
3075 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3078 switch (config.tx_type) {
3079 case HWTSTAMP_TX_OFF:
3080 /* Couldn't disable rx/tx timestamping separately.
3083 priv->tx_tstamp = false;
3085 case HWTSTAMP_TX_ON:
3086 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3087 priv->tx_tstamp = true;
3093 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3094 /* Couldn't disable rx/tx timestamping separately.
3097 priv->rx_tstamp = false;
3099 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3100 priv->rx_tstamp = true;
3101 /* TS is set for all frame types, not only those requested */
3102 config.rx_filter = HWTSTAMP_FILTER_ALL;
3105 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3109 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
3112 struct dpaa_priv *priv = netdev_priv(net_dev);
3114 if (cmd == SIOCGMIIREG) {
3115 if (net_dev->phydev)
3116 return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
3120 if (cmd == SIOCSHWTSTAMP)
3121 return dpaa_ts_ioctl(net_dev, rq, cmd);
3126 static const struct net_device_ops dpaa_ops = {
3127 .ndo_open = dpaa_open,
3128 .ndo_start_xmit = dpaa_start_xmit,
3129 .ndo_stop = dpaa_eth_stop,
3130 .ndo_tx_timeout = dpaa_tx_timeout,
3131 .ndo_get_stats64 = dpaa_get_stats64,
3132 .ndo_change_carrier = fixed_phy_change_carrier,
3133 .ndo_set_mac_address = dpaa_set_mac_address,
3134 .ndo_validate_addr = eth_validate_addr,
3135 .ndo_set_rx_mode = dpaa_set_rx_mode,
3136 .ndo_eth_ioctl = dpaa_ioctl,
3137 .ndo_setup_tc = dpaa_setup_tc,
3138 .ndo_change_mtu = dpaa_change_mtu,
3139 .ndo_bpf = dpaa_xdp,
3140 .ndo_xdp_xmit = dpaa_xdp_xmit,
3143 static int dpaa_napi_add(struct net_device *net_dev)
3145 struct dpaa_priv *priv = netdev_priv(net_dev);
3146 struct dpaa_percpu_priv *percpu_priv;
3149 for_each_possible_cpu(cpu) {
3150 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3152 netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
3158 static void dpaa_napi_del(struct net_device *net_dev)
3160 struct dpaa_priv *priv = netdev_priv(net_dev);
3161 struct dpaa_percpu_priv *percpu_priv;
3164 for_each_possible_cpu(cpu) {
3165 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3167 netif_napi_del(&percpu_priv->np.napi);
3171 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
3172 struct bm_buffer *bmb)
3174 dma_addr_t addr = bm_buf_addr(bmb);
3176 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
3179 skb_free_frag(phys_to_virt(addr));
3182 /* Alloc the dpaa_bp struct and configure default values */
3183 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
3185 struct dpaa_bp *dpaa_bp;
3187 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
3189 return ERR_PTR(-ENOMEM);
3191 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
3192 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
3193 if (!dpaa_bp->percpu_count)
3194 return ERR_PTR(-ENOMEM);
3196 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
3198 dpaa_bp->seed_cb = dpaa_bp_seed;
3199 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
3204 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
3205 * We won't be sending congestion notifications to FMan; for now, we just use
3206 * this CGR to generate enqueue rejections to FMan in order to drop the frames
3207 * before they reach our ingress queues and eat up memory.
3209 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
3211 struct qm_mcc_initcgr initcgr;
3215 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
3217 if (netif_msg_drv(priv))
3218 pr_err("Error %d allocating CGR ID\n", err);
3222 /* Enable CS TD, but disable Congestion State Change Notifications. */
3223 memset(&initcgr, 0, sizeof(initcgr));
3224 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
3225 initcgr.cgr.cscn_en = QM_CGR_EN;
3226 cs_th = DPAA_INGRESS_CS_THRESHOLD;
3227 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
3229 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
3230 initcgr.cgr.cstd_en = QM_CGR_EN;
3232 /* This CGR will be associated with the SWP affined to the current CPU.
3233 * However, we'll place all our ingress FQs in it.
3235 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
3238 if (netif_msg_drv(priv))
3239 pr_err("Error %d creating ingress CGR with ID %d\n",
3240 err, priv->ingress_cgr.cgrid);
3241 qman_release_cgrid(priv->ingress_cgr.cgrid);
3244 if (netif_msg_drv(priv))
3245 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
3246 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
3248 priv->use_ingress_cgr = true;
3254 static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
3255 enum port_type port)
3259 /* The frame headroom must accommodate:
3260 * - the driver private data area
3261 * - parse results, hash results, timestamp if selected
3262 * If either hash results or time stamp are selected, both will
3263 * be copied to/from the frame headroom, as TS is located between PR and
3264 * HR in the IC and IC copy size has a granularity of 16bytes
3265 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
3267 * Also make sure the headroom is a multiple of data_align bytes
3269 headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
3272 #ifdef CONFIG_DPAA_ERRATUM_A050385
3273 if (unlikely(fman_has_errata_a050385()))
3274 headroom = XDP_PACKET_HEADROOM;
3277 return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
3279 return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
3283 static int dpaa_eth_probe(struct platform_device *pdev)
3285 struct net_device *net_dev = NULL;
3286 struct dpaa_bp *dpaa_bp = NULL;
3287 struct dpaa_fq *dpaa_fq, *tmp;
3288 struct dpaa_priv *priv = NULL;
3289 struct fm_port_fqs port_fqs;
3290 struct mac_device *mac_dev;
3291 int err = 0, channel;
3296 err = bman_is_probed();
3298 return -EPROBE_DEFER;
3300 dev_err(dev, "failing probe due to bman probe error\n");
3303 err = qman_is_probed();
3305 return -EPROBE_DEFER;
3307 dev_err(dev, "failing probe due to qman probe error\n");
3310 err = bman_portals_probed();
3312 return -EPROBE_DEFER;
3315 "failing probe due to bman portals probe error\n");
3318 err = qman_portals_probed();
3320 return -EPROBE_DEFER;
3323 "failing probe due to qman portals probe error\n");
3327 /* Allocate this early, so we can store relevant information in
3330 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
3332 dev_err(dev, "alloc_etherdev_mq() failed\n");
3336 /* Do this here, so we can be verbose early */
3337 SET_NETDEV_DEV(net_dev, dev->parent);
3338 dev_set_drvdata(dev, net_dev);
3340 priv = netdev_priv(net_dev);
3341 priv->net_dev = net_dev;
3343 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
3345 mac_dev = dpaa_mac_dev_get(pdev);
3346 if (IS_ERR(mac_dev)) {
3347 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
3348 err = PTR_ERR(mac_dev);
3352 /* Devices used for DMA mapping */
3353 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
3354 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
3355 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
3357 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
3360 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
3364 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
3365 * we choose conservatively and let the user explicitly set a higher
3366 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
3368 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
3369 * start with the maximum allowed.
3371 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
3373 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
3376 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
3377 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
3380 dpaa_bp = dpaa_bp_alloc(dev);
3381 if (IS_ERR(dpaa_bp)) {
3382 err = PTR_ERR(dpaa_bp);
3385 /* the raw size of the buffers used for reception */
3386 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
3387 /* avoid runtime computations by keeping the usable size here */
3388 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
3389 dpaa_bp->priv = priv;
3391 err = dpaa_bp_alloc_pool(dpaa_bp);
3394 priv->dpaa_bp = dpaa_bp;
3396 INIT_LIST_HEAD(&priv->dpaa_fq_list);
3398 memset(&port_fqs, 0, sizeof(port_fqs));
3400 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
3402 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
3406 priv->mac_dev = mac_dev;
3408 channel = dpaa_get_channel();
3410 dev_err(dev, "dpaa_get_channel() failed\n");
3415 priv->channel = (u16)channel;
3417 /* Walk the CPUs with affine portals
3418 * and add this pool channel to each's dequeue mask.
3420 dpaa_eth_add_channel(priv->channel, &pdev->dev);
3422 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
3424 /* Create a congestion group for this netdev, with
3425 * dynamically-allocated CGR ID.
3426 * Must be executed after probing the MAC, but before
3427 * assigning the egress FQs to the CGRs.
3429 err = dpaa_eth_cgr_init(priv);
3431 dev_err(dev, "Error initializing CGR\n");
3435 err = dpaa_ingress_cgr_init(priv);
3437 dev_err(dev, "Error initializing ingress CGR\n");
3438 goto delete_egress_cgr;
3441 /* Add the FQs to the interface, and make them active */
3442 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
3443 err = dpaa_fq_init(dpaa_fq, false);
3448 priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
3449 priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
3451 /* All real interfaces need their ports initialized */
3452 err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
3453 &priv->buf_layout[0], dev);
3457 /* Rx traffic distribution based on keygen hashing defaults to on */
3458 priv->keygen_in_use = true;
3460 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
3461 if (!priv->percpu_priv) {
3462 dev_err(dev, "devm_alloc_percpu() failed\n");
3468 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3470 /* Initialize NAPI */
3471 err = dpaa_napi_add(net_dev);
3473 goto delete_dpaa_napi;
3475 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3477 goto delete_dpaa_napi;
3479 dpaa_eth_sysfs_init(&net_dev->dev);
3481 netif_info(priv, probe, net_dev, "Probed interface %s\n",
3487 dpaa_napi_del(net_dev);
3489 dpaa_fq_free(dev, &priv->dpaa_fq_list);
3490 qman_delete_cgr_safe(&priv->ingress_cgr);
3491 qman_release_cgrid(priv->ingress_cgr.cgrid);
3493 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3494 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3496 dpaa_bps_free(priv);
3498 dev_set_drvdata(dev, NULL);
3499 free_netdev(net_dev);
3504 static int dpaa_remove(struct platform_device *pdev)
3506 struct net_device *net_dev;
3507 struct dpaa_priv *priv;
3512 net_dev = dev_get_drvdata(dev);
3514 priv = netdev_priv(net_dev);
3516 dpaa_eth_sysfs_remove(dev);
3518 dev_set_drvdata(dev, NULL);
3519 unregister_netdev(net_dev);
3520 phylink_destroy(priv->mac_dev->phylink);
3522 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3524 qman_delete_cgr_safe(&priv->ingress_cgr);
3525 qman_release_cgrid(priv->ingress_cgr.cgrid);
3526 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3527 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3529 dpaa_napi_del(net_dev);
3531 dpaa_bps_free(priv);
3533 free_netdev(net_dev);
3538 static const struct platform_device_id dpaa_devtype[] = {
3540 .name = "dpaa-ethernet",
3545 MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3547 static struct platform_driver dpaa_driver = {
3549 .name = KBUILD_MODNAME,
3551 .id_table = dpaa_devtype,
3552 .probe = dpaa_eth_probe,
3553 .remove = dpaa_remove
3556 static int __init dpaa_load(void)
3560 pr_debug("FSL DPAA Ethernet driver\n");
3562 /* initialize dpaa_eth mirror values */
3563 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3564 dpaa_max_frm = fman_get_max_frm();
3566 err = platform_driver_register(&dpaa_driver);
3568 pr_err("Error, platform_driver_register() = %d\n", err);
3572 module_init(dpaa_load);
3574 static void __exit dpaa_unload(void)
3576 platform_driver_unregister(&dpaa_driver);
3578 /* Only one channel is used and needs to be released after all
3579 * interfaces are removed
3581 dpaa_release_channel();
3583 module_exit(dpaa_unload);
3585 MODULE_LICENSE("Dual BSD/GPL");
3586 MODULE_DESCRIPTION("FSL DPAA Ethernet driver");