#include <linux/mutex.h>
#include <net/sock.h>
+struct net_device;
+struct xsk_queue;
struct xdp_umem;
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
+ struct xsk_queue *rx;
+ struct net_device *dev;
struct xdp_umem *umem;
/* Protects multiple processes in the control path */
struct mutex mutex;
#include <linux/types.h>
/* XDP socket options */
+#define XDP_RX_RING 1
#define XDP_UMEM_REG 3
#define XDP_UMEM_FILL_RING 4
};
/* Pgoff for mmaping the rings */
+#define XDP_PGOFF_RX_RING 0
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000
+struct xdp_desc {
+ __u32 idx;
+ __u32 len;
+ __u16 offset;
+ __u8 flags;
+ __u8 padding[5];
+};
+
struct xdp_ring {
__u32 producer __attribute__((aligned(64)));
__u32 consumer __attribute__((aligned(64)));
};
+/* Used for the RX and TX queues for packets */
+struct xdp_rxtx_ring {
+ struct xdp_ring ptrs;
+ struct xdp_desc desc[0] __attribute__((aligned(64)));
+};
+
/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
struct xdp_ring ptrs;
#include <linux/net.h>
#include <linux/netdevice.h>
#include <net/xdp_sock.h>
+#include <net/xdp.h>
#include "xsk_queue.h"
#include "xdp_umem.h"
return (struct xdp_sock *)sk;
}
-static int xsk_init_queue(u32 entries, struct xsk_queue **queue)
+static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
+ bool umem_queue)
{
struct xsk_queue *q;
if (entries == 0 || *queue || !is_power_of_2(entries))
return -EINVAL;
- q = xskq_create(entries);
+ q = xskq_create(entries, umem_queue);
if (!q)
return -ENOMEM;
return -ENOPROTOOPT;
switch (optname) {
+ case XDP_RX_RING:
+ {
+ struct xsk_queue **q;
+ int entries;
+
+ if (optlen < sizeof(entries))
+ return -EINVAL;
+ if (copy_from_user(&entries, optval, sizeof(entries)))
+ return -EFAULT;
+
+ mutex_lock(&xs->mutex);
+ q = &xs->rx;
+ err = xsk_init_queue(entries, q, false);
+ mutex_unlock(&xs->mutex);
+ return err;
+ }
case XDP_UMEM_REG:
{
struct xdp_umem_reg mr;
mutex_lock(&xs->mutex);
q = &xs->umem->fq;
- err = xsk_init_queue(entries, q);
+ err = xsk_init_queue(entries, q, true);
mutex_unlock(&xs->mutex);
return err;
}
unsigned long pfn;
struct page *qpg;
- if (!xs->umem)
- return -EINVAL;
+ if (offset == XDP_PGOFF_RX_RING) {
+ q = xs->rx;
+ } else {
+ if (!xs->umem)
+ return -EINVAL;
- if (offset == XDP_UMEM_PGOFF_FILL_RING)
- q = xs->umem->fq;
- else
- return -EINVAL;
+ if (offset == XDP_UMEM_PGOFF_FILL_RING)
+ q = xs->umem->fq;
+ else
+ return -EINVAL;
+ }
if (!q)
return -EINVAL;
if (!sock_flag(sk, SOCK_DEAD))
return;
+ xskq_destroy(xs->rx);
xdp_put_umem(xs->umem);
sk_refcnt_debug_dec(sk);
return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u32);
}
-struct xsk_queue *xskq_create(u32 nentries)
+static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
+{
+ return (sizeof(struct xdp_ring) +
+ q->nentries * sizeof(struct xdp_desc));
+}
+
+struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
{
struct xsk_queue *q;
gfp_t gfp_flags;
gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
__GFP_COMP | __GFP_NORETRY;
- size = xskq_umem_get_ring_size(q);
+ size = umem_queue ? xskq_umem_get_ring_size(q) :
+ xskq_rxtx_get_ring_size(q);
q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
get_order(size));
u64 invalid_descs;
};
-struct xsk_queue *xskq_create(u32 nentries);
+struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q);
#endif /* _LINUX_XSK_QUEUE_H */