From: Shenwei Wang Date: Mon, 31 Oct 2022 18:53:50 +0000 (-0500) Subject: net: fec: add initial XDP support X-Git-Tag: v6.6.7~3913^2~301 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6d6b39f180b83dfe1e938382b68dd1e6cb51363c;p=platform%2Fkernel%2Flinux-starfive.git net: fec: add initial XDP support This patch adds the initial XDP support to Freescale driver. It supports XDP_PASS, XDP_DROP and XDP_REDIRECT actions. Upcoming patches will add support for XDP_TX and Zero Copy features. As the patch is rather large, the part of codes to collect the statistics is separated and will prepare a dedicated patch for that part. I just tested with the application of xdpsock. -- Native here means running command of "xdpsock -i eth0" -- SKB-Mode means running command of "xdpsock -S -i eth0" The following are the testing result relating to XDP mode: root@imx8qxpc0mek:~/bpf# ./xdpsock -i eth0 sock0@eth0:0 rxdrop xdp-drv pps pkts 1.00 rx 371347 2717794 tx 0 0 root@imx8qxpc0mek:~/bpf# ./xdpsock -S -i eth0 sock0@eth0:0 rxdrop xdp-skb pps pkts 1.00 rx 202229 404528 tx 0 0 root@imx8qxpc0mek:~/bpf# ./xdp2 eth0 proto 0: 496708 pkt/s proto 0: 505469 pkt/s proto 0: 505283 pkt/s proto 0: 505443 pkt/s proto 0: 505465 pkt/s root@imx8qxpc0mek:~/bpf# ./xdp2 -S eth0 proto 0: 0 pkt/s proto 17: 118778 pkt/s proto 17: 118989 pkt/s proto 0: 1 pkt/s proto 17: 118987 pkt/s proto 0: 0 pkt/s proto 17: 118943 pkt/s proto 17: 118976 pkt/s proto 0: 1 pkt/s proto 17: 119006 pkt/s proto 0: 0 pkt/s proto 17: 119071 pkt/s proto 17: 119092 pkt/s Signed-off-by: Shenwei Wang Reported-by: kernel test robot Link: https://lore.kernel.org/r/20221031185350.2045675-1-shenwei.wang@nxp.com Signed-off-by: Paolo Abeni --- diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 476e386..61e847b 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -348,7 +348,6 @@ struct bufdesc_ex { */ #define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM) - #define FEC_ENET_RX_PAGES 256 #define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -663,6 +662,9 @@ struct fec_enet_private { struct imx_sc_ipc *ipc_handle; + /* XDP BPF Program */ + struct bpf_prog *xdp_prog; + u64 ethtool_stats[]; }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bfbb560..4fbdefb 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -89,6 +89,11 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; #define FEC_ENET_OPD_V 0xFFF0 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ +#define FEC_ENET_XDP_PASS 0 +#define FEC_ENET_XDP_CONSUMED BIT(0) +#define FEC_ENET_XDP_TX BIT(1) +#define FEC_ENET_XDP_REDIR BIT(2) + struct fec_devinfo { u32 quirks; }; @@ -418,13 +423,14 @@ static int fec_enet_create_page_pool(struct fec_enet_private *fep, struct fec_enet_priv_rx_q *rxq, int size) { + struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); struct page_pool_params pp_params = { .order = 0, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = size, .nid = dev_to_node(&fep->pdev->dev), .dev = &fep->pdev->dev, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, .offset = FEC_ENET_XDP_HEADROOM, .max_len = FEC_ENET_RX_FRSIZE, }; @@ -1499,6 +1505,59 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); } +static u32 +fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) +{ + unsigned int sync, len = xdp->data_end - xdp->data; + u32 ret = FEC_ENET_XDP_PASS; + struct page *page; + int err; + u32 act; + + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + ret = FEC_ENET_XDP_PASS; + break; + + case XDP_REDIRECT: + err = xdp_do_redirect(fep->netdev, xdp, prog); + if (!err) { + ret = FEC_ENET_XDP_REDIR; + } else { + ret = FEC_ENET_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + } + break; + + default: + bpf_warn_invalid_xdp_action(fep->netdev, prog, act); + fallthrough; + + case XDP_TX: + bpf_warn_invalid_xdp_action(fep->netdev, prog, act); + fallthrough; + + case XDP_ABORTED: + fallthrough; /* handle aborts by dropping packet */ + + case XDP_DROP: + ret = FEC_ENET_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + break; + } + + return ret; +} + /* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, @@ -1520,6 +1579,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) u16 vlan_tag; int index = 0; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); + u32 ret, xdp_result = FEC_ENET_XDP_PASS; + struct xdp_buff xdp; struct page *page; #ifdef CONFIG_M532x @@ -1531,6 +1593,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) * These get messed up if we get called due to a busy condition. */ bdp = rxq->bd.cur; + xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1580,6 +1643,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(page_address(page)); fec_enet_update_cbd(rxq, bdp, index); + if (xdp_prog) { + xdp_buff_clear_frags_flag(&xdp); + xdp_prepare_buff(&xdp, page_address(page), + FEC_ENET_XDP_HEADROOM, pkt_len, false); + + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); + xdp_result |= ret; + if (ret != FEC_ENET_XDP_PASS) + goto rx_processing_done; + } + /* The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. @@ -1675,6 +1749,10 @@ rx_processing_done: writel(0, rxq->bd.reg_desc_active); } rxq->bd.cur = bdp; + + if (xdp_result & FEC_ENET_XDP_REDIR) + xdp_do_flush_map(); + return pkt_received; } @@ -3518,6 +3596,148 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; } +static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct fec_enet_private *fep = netdev_priv(dev); + bool is_run = netif_running(dev); + struct bpf_prog *old_prog; + + switch (bpf->command) { + case XDP_SETUP_PROG: + if (is_run) { + napi_disable(&fep->napi); + netif_tx_disable(dev); + } + + old_prog = xchg(&fep->xdp_prog, bpf->prog); + fec_restart(dev); + + if (is_run) { + napi_enable(&fep->napi); + netif_tx_start_all_queues(dev); + } + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; + + case XDP_SETUP_XSK_POOL: + return -EOPNOTSUPP; + + default: + return -EOPNOTSUPP; + } +} + +static int +fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= fep->num_tx_queues) + index -= fep->num_tx_queues; + + return index; +} + +static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq, + struct xdp_frame *frame) +{ + unsigned int index, status, estatus; + struct bufdesc *bdp, *last_bdp; + dma_addr_t dma_addr; + int entries_free; + + entries_free = fec_enet_get_free_txdesc_num(txq); + if (entries_free < MAX_SKB_FRAGS + 1) { + netdev_err(fep->netdev, "NOT enough BD for SG!\n"); + return NETDEV_TX_OK; + } + + /* Fill in a Tx ring entry */ + bdp = txq->bd.cur; + last_bdp = bdp; + status = fec16_to_cpu(bdp->cbd_sc); + status &= ~BD_ENET_TX_STATS; + + index = fec_enet_get_bd_index(bdp, &txq->bd); + + dma_addr = dma_map_single(&fep->pdev->dev, frame->data, + frame->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dma_addr)) + return FEC_ENET_XDP_CONSUMED; + + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) + estatus = BD_ENET_TX_INT; + + bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); + bdp->cbd_datlen = cpu_to_fec16(frame->len); + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); + + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = cpu_to_fec32(estatus); + } + + index = fec_enet_get_bd_index(last_bdp, &txq->bd); + txq->tx_skbuff[index] = NULL; + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); + bdp->cbd_sc = cpu_to_fec16(status); + + /* If this was the last BD in the ring, start at the beginning again. */ + bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + + txq->bd.cur = bdp; + + return 0; +} + +static int fec_enet_xdp_xmit(struct net_device *dev, + int num_frames, + struct xdp_frame **frames, + u32 flags) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + int cpu = smp_processor_id(); + struct netdev_queue *nq; + unsigned int queue; + int i; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + for (i = 0; i < num_frames; i++) + fec_enet_txq_xmit_frame(fep, txq, frames[i]); + + /* Make sure the update to bdp and tx_skbuff are performed. */ + wmb(); + + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + + __netif_tx_unlock(nq); + + return num_frames; +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, @@ -3532,6 +3752,8 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_poll_controller = fec_poll_controller, #endif .ndo_set_features = fec_set_features, + .ndo_bpf = fec_enet_bpf, + .ndo_xdp_xmit = fec_enet_xdp_xmit, }; static const unsigned short offset_des_active_rxq[] = {