static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
{
struct lan966x_tx *tx = &lan966x->tx;
+ struct lan966x_rx *rx = &lan966x->rx;
struct lan966x_tx_dcb_buf *dcb_buf;
struct xdp_frame_bulk bq;
struct lan966x_db *db;
if (dcb_buf->xdp_ndo)
xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
else
- xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
+ page_pool_recycle_direct(rx->page_pool,
+ dcb_buf->data.page);
}
clear = true;
tx->last_in_use = next_to_use;
}
-int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
- struct xdp_frame *xdpf,
- struct page *page,
- bool dma_map)
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
{
struct lan966x *lan966x = port->lan966x;
struct lan966x_tx_dcb_buf *next_dcb_buf;
struct lan966x_tx *tx = &lan966x->tx;
+ struct xdp_frame *xdpf;
dma_addr_t dma_addr;
+ struct page *page;
int next_to_use;
__be32 *ifh;
int ret = 0;
goto out;
}
+ /* Get the next buffer */
+ next_dcb_buf = &tx->dcbs_buf[next_to_use];
+
/* Generate new IFH */
- if (dma_map) {
+ if (!len) {
+ xdpf = ptr;
+
if (xdpf->headroom < IFH_LEN_BYTES) {
ret = NETDEV_TX_OK;
goto out;
goto out;
}
+ next_dcb_buf->data.xdpf = xdpf;
+ next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
+
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use,
xdpf->len + IFH_LEN_BYTES,
dma_addr);
} else {
+ page = ptr;
+
ifh = page_address(page) + XDP_PACKET_HEADROOM;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
lan966x_ifh_set_bypass(ifh, 1);
dma_addr = page_pool_get_dma_addr(page);
dma_sync_single_for_device(lan966x->dev,
dma_addr + XDP_PACKET_HEADROOM,
- xdpf->len + IFH_LEN_BYTES,
+ len + IFH_LEN_BYTES,
DMA_TO_DEVICE);
+ next_dcb_buf->data.page = page;
+ next_dcb_buf->len = len + IFH_LEN_BYTES;
+
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use,
- xdpf->len + IFH_LEN_BYTES,
+ len + IFH_LEN_BYTES,
dma_addr + XDP_PACKET_HEADROOM);
}
/* Fill up the buffer */
- next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = false;
- next_dcb_buf->data.xdpf = xdpf;
- next_dcb_buf->xdp_ndo = dma_map;
- next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
+ next_dcb_buf->xdp_ndo = !len;
next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true;
next_dcb_buf->ptp = false;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
+ struct page *page;
} data;
u32 len;
u32 used : 1;
int lan966x_ptp_del_traps(struct lan966x_port *port);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
-int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
- struct xdp_frame *frame,
- struct page *page,
- bool dma_map);
+int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
struct xdp_frame *xdpf = frames[i];
int err;
- err = lan966x_fdma_xmit_xdpf(port, xdpf, NULL, true);
+ err = lan966x_fdma_xmit_xdpf(port, xdpf, 0);
if (err)
break;
{
struct bpf_prog *xdp_prog = port->xdp_prog;
struct lan966x *lan966x = port->lan966x;
- struct xdp_frame *xdpf;
struct xdp_buff xdp;
u32 act;
case XDP_PASS:
return FDMA_PASS;
case XDP_TX:
- xdpf = xdp_convert_buff_to_frame(&xdp);
- if (!xdpf)
- return FDMA_DROP;
-
- return lan966x_fdma_xmit_xdpf(port, xdpf, page, false) ?
+ return lan966x_fdma_xmit_xdpf(port, page,
+ data_len - IFH_LEN_BYTES) ?
FDMA_DROP : FDMA_TX;
case XDP_REDIRECT:
if (xdp_do_redirect(port->dev, &xdp, xdp_prog))