prev->last_in_page = true;
}
-static int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
+static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
return 0;
}
-static void mlx5e_free_di_list(struct mlx5e_rq *rq)
+static void mlx5e_free_au_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.alloc_units);
}
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, node);
+ err = mlx5e_init_au_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
}
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
}
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
return true;
}
-static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, struct mlx5e_alloc_unit *au)
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
struct mlx5e_rq_stats *stats = rq->stats;
return true;
}
-static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct mlx5e_alloc_unit *au)
+static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
dma_addr_t addr;
return 0;
}
-static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, struct mlx5e_alloc_unit *au)
+static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
if (rq->xsk_pool)
return mlx5e_xsk_page_alloc_pool(rq, au);
}
static inline void mlx5e_page_release(struct mlx5e_rq *rq,
- struct mlx5e_alloc_unit *au,
+ union mlx5e_alloc_unit *au,
bool recycle)
{
if (rq->xsk_pool)
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
+ union mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
unsigned int truesize)
{
dma_addr_t addr = page_pool_get_dma_addr(au->page);
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
- struct mlx5e_alloc_unit *alloc_units = wi->alloc_units;
+ union mlx5e_alloc_unit *alloc_units = wi->alloc_units;
bool no_xdp_xmit;
int i;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
- struct mlx5e_alloc_unit au;
+ union mlx5e_alloc_unit au;
err = mlx5e_page_alloc(rq, &au);
if (unlikely(err))
while (--i >= 0) {
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
- struct mlx5e_alloc_unit au = {
+ union mlx5e_alloc_unit au = {
.page = dma_info->page,
};
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
- struct mlx5e_alloc_unit *au = &wi->alloc_units[0];
+ union mlx5e_alloc_unit *au = &wi->alloc_units[0];
struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
hd_info = &shampo->info[index];
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->page != deleted_page) {
- struct mlx5e_alloc_unit au = {
+ union mlx5e_alloc_unit au = {
.page = hd_info->page,
};
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct mlx5e_alloc_unit *au = wi->au;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
- struct mlx5e_alloc_unit *au = wi->au;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
static void
mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
- struct mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
+ union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- struct mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
- struct mlx5e_alloc_unit *head_au = au;
+ union mlx5e_alloc_unit *head_au = au;
struct sk_buff *skb;
dma_addr_t addr;
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- struct mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u64 addr = shampo->info[header_index].addr;
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_alloc_unit au = {
+ union mlx5e_alloc_unit au = {
.page = shampo->info[header_index].page,
};
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
- struct mlx5e_alloc_unit *au;
+ union mlx5e_alloc_unit *au;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;