1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
19 /* Masks for xdp_umem_page flags.
20 * The low 12-bits of the addr will be 0 since this is the page address, so we
21 * can use them for flags.
23 #define XSK_NEXT_PG_CONTIG_SHIFT 0
24 #define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
26 struct xdp_umem_page {
31 struct xdp_umem_fq_reuse {
37 /* Flags for the umem flags field.
39 * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
40 * flags. See inlude/uapi/include/linux/if_xdp.h.
42 #define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
47 struct xdp_umem_page *pages;
52 struct user_struct *user;
53 unsigned long address;
55 struct work_struct work;
62 struct net_device *dev;
63 struct xdp_umem_fq_reuse *fq_reuse;
65 spinlock_t xsk_list_lock;
66 struct list_head xsk_list;
69 /* Nodes are linked in the struct xdp_sock map_list field, and used to
70 * track which maps a certain socket reside in.
74 struct list_head node;
76 struct xdp_sock **map_entry;
80 /* struct sock must be the first member of struct xdp_sock */
83 struct net_device *dev;
84 struct xdp_umem *umem;
85 struct list_head flush_node;
93 /* Protects multiple processes in the control path */
95 struct xsk_queue *tx ____cacheline_aligned_in_smp;
96 struct list_head list;
97 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
98 * in the SKB destructor callback.
100 spinlock_t tx_completion_lock;
101 /* Protects generic receive. */
104 struct list_head map_list;
105 /* Protects map_list */
106 spinlock_t map_list_lock;
110 #ifdef CONFIG_XDP_SOCKETS
111 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
112 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
113 void xsk_flush(struct xdp_sock *xs);
114 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
115 /* Used from netdev driver */
116 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
117 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
118 void xsk_umem_discard_addr(struct xdp_umem *umem);
119 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
120 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
121 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
122 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
123 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
124 struct xdp_umem_fq_reuse *newq);
125 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
126 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
127 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
128 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
129 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
130 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
131 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
133 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
134 struct xdp_sock **map_entry);
135 int xsk_map_inc(struct xsk_map *map);
136 void xsk_map_put(struct xsk_map *map);
138 static inline u64 xsk_umem_extract_addr(u64 addr)
140 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
143 static inline u64 xsk_umem_extract_offset(u64 addr)
145 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
148 static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
150 return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
153 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
155 unsigned long page_addr;
157 addr = xsk_umem_add_offset_to_addr(addr);
158 page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
160 return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
163 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
165 addr = xsk_umem_add_offset_to_addr(addr);
167 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
170 /* Reuse-queue aware version of FILL queue helpers */
171 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
173 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
175 if (rq->length >= cnt)
178 return xsk_umem_has_addrs(umem, cnt - rq->length);
181 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
183 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
186 return xsk_umem_peek_addr(umem, addr);
188 *addr = rq->handles[rq->length - 1];
192 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
194 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
197 xsk_umem_discard_addr(umem);
202 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
204 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
206 rq->handles[rq->length++] = addr;
209 /* Handle the offset appropriately depending on aligned or unaligned mode.
210 * For unaligned mode, we store the offset in the upper 16-bits of the address.
211 * For aligned mode, we simply add the offset to the address.
213 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
216 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
217 return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
219 return address + offset;
222 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
227 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
232 static inline void xsk_flush(struct xdp_sock *xs)
236 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
241 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
246 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
251 static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
255 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
259 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
260 struct xdp_desc *desc)
265 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
269 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
274 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
275 struct xdp_umem *umem,
276 struct xdp_umem_fq_reuse *newq)
280 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
284 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
290 static inline u64 xsk_umem_extract_addr(u64 addr)
295 static inline u64 xsk_umem_extract_offset(u64 addr)
300 static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
305 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
310 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
315 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
320 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
325 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
329 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
333 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
337 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
341 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
345 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
349 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
354 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
360 #endif /* CONFIG_XDP_SOCKETS */
362 #endif /* _LINUX_XDP_SOCK_H */