dma_addr_t dma = 0;
u32 size;
- tx_info->xdpf = convert_to_xdp_frame(xdp);
+ tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
size = tx_info->xdpf->len;
ena_buf = tx_info->bufs;
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return I40E_XDP_CONSUMED;
*/
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return ICE_XDP_CONSUMED;
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
int cpu;
u32 ret;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return MVNETA_XDP_DROPPED;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return false;
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr;
} else {
- /* Driver assumes that convert_to_xdp_frame returns an xdp_frame
- * that points to the same memory region as the original
- * xdp_buff. It allows to map the memory only once and to use
- * the DMA_BIDIRECTIONAL mode.
+ /* Driver assumes that xdp_convert_buff_to_frame returns
+ * an xdp_frame that points to the same memory region as
+ * the original xdp_buff. It allows to map the memory only
+ * once and to use the DMA_BIDIRECTIONAL mode.
*/
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
case XDP_TX:
/* Buffer ownership passes to tx on success. */
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1);
static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
{
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
u32 ret;
if (unlikely(!xdpf))
ret = CPSW_XDP_PASS;
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto drop;
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
{
- struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+ struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
if (unlikely(!frame))
return -EOVERFLOW;
static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
struct veth_xdp_tx_bq *bq)
{
- struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+ struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
if (unlikely(!frame))
return -EOVERFLOW;
break;
case XDP_TX:
stats->xdp_tx++;
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
break;
case XDP_TX:
stats->xdp_tx++;
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
/* Convert xdp_buff to xdp_frame */
static inline
-struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
+struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
{
struct xdp_frame *xdp_frame;
int metasize;
{
struct xdp_frame *xdpf;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
if (unlikely(err))
return err;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;