return ret;
}
-static int
+static void
mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog,
struct page *page,
struct mvneta_stats *stats)
{
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
struct skb_shared_info *sinfo;
- int ret = 0;
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
- if (xdp_prog)
- ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
-
rxq->left_size = rx_desc->data_size - len;
rx_desc->buf_phys_addr = 0;
-
- return ret;
}
static void
rxq->refill_num++;
if (rx_status & MVNETA_RXD_FIRST_DESC) {
- int err;
-
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
goto next;
}
- err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- xdp_prog, page, &ps);
- if (err)
- continue;
-
desc_status = rx_desc->status;
+ mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, page,
+ &ps);
} else {
if (unlikely(!xdp_buf.data_hard_start))
continue;
goto next;
}
+ if (xdp_prog &&
+ mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, &ps))
+ goto next;
+
skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);