Another setsockopt (XDP_TX_QUEUE) is added to let the process allocate
a queue, where the user process can pass frames to be transmitted by
the kernel.
The mmapping of the queue is done using the XDP_PGOFF_TX_QUEUE offset.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
+ struct xsk_queue *tx ____cacheline_aligned_in_smp;
/* Protects multiple processes in the control path */
struct mutex mutex;
u64 rx_dropped;
/* XDP socket options */
#define XDP_RX_RING 1
+#define XDP_TX_RING 2
#define XDP_UMEM_REG 3
#define XDP_UMEM_FILL_RING 4
#define XDP_UMEM_COMPLETION_RING 5
/* Pgoff for mmaping the rings */
#define XDP_PGOFF_RX_RING 0
+#define XDP_PGOFF_TX_RING 0x80000000
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000
#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000
goto out_release;
}
- if (!xs->rx) {
+ if (!xs->rx && !xs->tx) {
err = -EINVAL;
goto out_unlock;
}
switch (optname) {
case XDP_RX_RING:
+ case XDP_TX_RING:
{
struct xsk_queue **q;
int entries;
return -EFAULT;
mutex_lock(&xs->mutex);
- q = &xs->rx;
+ q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
err = xsk_init_queue(entries, q, false);
mutex_unlock(&xs->mutex);
return err;
if (offset == XDP_PGOFF_RX_RING) {
q = xs->rx;
+ } else if (offset == XDP_PGOFF_TX_RING) {
+ q = xs->tx;
} else {
if (!xs->umem)
return -EINVAL;
return;
xskq_destroy(xs->rx);
+ xskq_destroy(xs->tx);
xdp_put_umem(xs->umem);
sk_refcnt_debug_dec(sk);