1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. */
4 #ifndef XSK_BUFF_POOL_H_
5 #define XSK_BUFF_POOL_H_
7 #include <linux/if_xdp.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
25 struct xsk_buff_pool *pool;
28 struct list_head free_list_node;
32 dma_addr_t *dma_pages;
34 struct net_device *netdev;
36 struct list_head list; /* Protected by the RTNL_LOCK */
41 struct xsk_buff_pool {
42 /* Members only used in the control path first. */
44 struct net_device *netdev;
45 struct list_head xsk_tx_list;
46 /* Protects modifications to the xsk_tx_list */
47 spinlock_t xsk_tx_list_lock;
49 struct xdp_umem *umem;
50 struct work_struct work;
51 struct list_head free_list;
55 /* Data path members as close to free_heads at the end as possible. */
56 struct xsk_queue *fq ____cacheline_aligned_in_smp;
58 /* For performance reasons, each buff pool has its own array of dma_pages
59 * even when they are identical.
61 dma_addr_t *dma_pages;
62 struct xdp_buff_xsk *heads;
63 struct xdp_desc *tx_descs;
72 u8 cached_need_wakeup;
73 bool uses_need_wakeup;
77 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
78 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
79 * sockets share a single cq when the same netdev and queue id is shared.
82 struct xdp_buff_xsk *free_heads[];
86 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
87 struct xdp_umem *umem);
88 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
89 u16 queue_id, u16 flags);
90 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
91 struct net_device *dev, u16 queue_id);
92 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
93 void xp_destroy(struct xsk_buff_pool *pool);
94 void xp_release(struct xdp_buff_xsk *xskb);
95 void xp_get_pool(struct xsk_buff_pool *pool);
96 bool xp_put_pool(struct xsk_buff_pool *pool);
97 void xp_clear_dev(struct xsk_buff_pool *pool);
98 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
99 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
101 /* AF_XDP, and XDP core. */
102 void xp_free(struct xdp_buff_xsk *xskb);
104 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
105 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
106 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
107 unsigned long attrs, struct page **pages, u32 nr_pages);
108 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
109 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
110 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
111 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
112 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
113 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
118 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
120 return xskb->frame_dma;
123 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
124 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
126 xp_dma_sync_for_cpu_slow(xskb);
129 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
131 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
132 dma_addr_t dma, size_t size)
134 if (!pool->dma_need_sync)
137 xp_dma_sync_for_device_slow(pool, dma, size);
140 /* Masks for xdp_umem_page flags.
141 * The low 12-bits of the addr will be 0 since this is the page address, so we
142 * can use them for flags.
144 #define XSK_NEXT_PG_CONTIG_SHIFT 0
145 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
147 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
150 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
152 if (likely(!cross_pg))
155 if (pool->dma_pages_cnt) {
156 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
157 XSK_NEXT_PG_CONTIG_MASK);
161 return addr + len > pool->addrs_cnt;
164 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
166 return addr & pool->chunk_mask;
169 static inline u64 xp_unaligned_extract_addr(u64 addr)
171 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
174 static inline u64 xp_unaligned_extract_offset(u64 addr)
176 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
179 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
181 return xp_unaligned_extract_addr(addr) +
182 xp_unaligned_extract_offset(addr);
185 #endif /* XSK_BUFF_POOL_H_ */