if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
else if (ring_is_xdp(ring))
- page_frag_free(tx_buffer->raw_buf);
+ xdp_return_frame(tx_buffer->xdpf->data,
+ &tx_buffer->xdpf->mem);
else
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
/* free the skb/XDP data */
if (ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
+ xdp_return_frame(tx_buf->xdpf->data, &tx_buf->xdpf->mem);
else
napi_consume_skb(tx_buf->skb, napi_budget);
if (!xdp_prog)
goto xdp_out;
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
struct i40e_ring *xdp_ring)
{
- u32 size = xdp->data_end - xdp->data;
u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
+ struct xdp_frame *xdpf;
dma_addr_t dma;
+ u32 size;
+
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ return I40E_XDP_CONSUMED;
+
+ size = xdpf->len;
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
- dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
+ dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED;
tx_bi = &xdp_ring->tx_bi[i];
tx_bi->bytecount = size;
tx_bi->gso_segs = 1;
- tx_bi->raw_buf = xdp->data;
+ tx_bi->xdpf = xdpf;
/* record length, and DMA address */
dma_unmap_len_set(tx_bi, len, size);