struct virtnet_rx_stats {
struct virtnet_rq_stat_items rx;
- struct {
- unsigned int xdp_tx;
- unsigned int xdp_tx_drops;
- } tx;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
return &vi->sq[qp];
}
-static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
- struct xdp_frame *xdpf)
-{
- struct xdp_frame *xdpf_sent;
- struct send_queue *sq;
- unsigned int len;
-
- sq = virtnet_xdp_sq(vi);
-
- /* Free up any pending old buffers before queueing new ones. */
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
- xdp_return_frame(xdpf_sent);
-
- return __virtnet_xdp_xmit_one(vi, sq, xdpf);
-}
-
static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- stats->tx.xdp_tx++;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
- stats->tx.xdp_tx_drops++;
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- stats->tx.xdp_tx++;
- err = __virtnet_xdp_tx_xmit(vi, xdpf);
- if (unlikely(err)) {
- stats->tx.xdp_tx_drops++;
+ err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
+ if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rx_stats stats = {};
- struct send_queue *sq;
unsigned int len;
void *buf;
int i;
}
u64_stats_update_end(&rq->stats.syncp);
- sq = virtnet_xdp_sq(vi);
- u64_stats_update_begin(&sq->stats.syncp);
- sq->stats.xdp_tx += stats.tx.xdp_tx;
- sq->stats.xdp_tx_drops += stats.tx.xdp_tx_drops;
- u64_stats_update_end(&sq->stats.syncp);
-
return stats.rx.packets;
}