(index << priv->log_rx_info);
if (ring->page_cache.index > 0) {
- frags[0] = ring->page_cache.buf[--ring->page_cache.index];
+ ring->page_cache.index--;
+ frags[0].page = ring->page_cache.buf[ring->page_cache.index].page;
+ frags[0].dma = ring->page_cache.buf[ring->page_cache.index].dma;
+ frags[0].page_offset = XDP_PACKET_HEADROOM;
rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
frags[0].page_offset);
return 0;
if (cache->index >= MLX4_EN_CACHE_SIZE)
return false;
- cache->buf[cache->index++] = *frame;
+ cache->buf[cache->index].page = frame->page;
+ cache->buf[cache->index].dma = frame->dma;
+ cache->index++;
return true;
}
int i;
for (i = 0; i < ring->page_cache.index; i++) {
- struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
-
- dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
- priv->dma_dir);
- put_page(frame->page);
+ dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(ring->page_cache.buf[i].page);
}
ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
struct mlx4_en_rx_alloc frame = {
.page = tx_info->page,
.dma = tx_info->map0_dma,
- .page_offset = XDP_PACKET_HEADROOM,
- .page_size = PAGE_SIZE,
};
if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {