1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/vmalloc.h>
20 #include "xsk_queue.h"
22 static DEFINE_IDA(umem_ida);
24 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
40 static void xdp_umem_addr_unmap(struct xdp_umem *umem)
46 static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
49 umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
55 static void xdp_umem_release(struct xdp_umem *umem)
58 ida_free(&umem_ida, umem->id);
60 xdp_umem_addr_unmap(umem);
61 xdp_umem_unpin_pages(umem);
63 xdp_umem_unaccount_pages(umem);
67 static void xdp_umem_release_deferred(struct work_struct *work)
69 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
71 xdp_umem_release(umem);
74 void xdp_get_umem(struct xdp_umem *umem)
76 refcount_inc(&umem->users);
79 void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
84 if (refcount_dec_and_test(&umem->users)) {
86 INIT_WORK(&umem->work, xdp_umem_release_deferred);
87 schedule_work(&umem->work);
89 xdp_umem_release(umem);
94 static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
96 unsigned int gup_flags = FOLL_WRITE;
100 umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN);
104 mmap_read_lock(current->mm);
105 npgs = pin_user_pages(address, umem->npgs,
106 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
107 mmap_read_unlock(current->mm);
109 if (npgs != umem->npgs) {
121 xdp_umem_unpin_pages(umem);
128 static int xdp_umem_account_pages(struct xdp_umem *umem)
130 unsigned long lock_limit, new_npgs, old_npgs;
132 if (capable(CAP_IPC_LOCK))
135 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
136 umem->user = get_uid(current_user());
139 old_npgs = atomic_long_read(&umem->user->locked_vm);
140 new_npgs = old_npgs + umem->npgs;
141 if (new_npgs > lock_limit) {
142 free_uid(umem->user);
146 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
147 new_npgs) != old_npgs);
151 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
153 u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
154 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
155 u64 npgs, addr = mr->addr, size = mr->len;
156 unsigned int chunks, chunks_rem;
159 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
160 /* Strictly speaking we could support this, if:
162 * - using an IOMMU, or
163 * - making sure the memory area is consecutive
164 * but for now, we simply say "computer says no".
169 if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
172 if (!unaligned_chunks && !is_power_of_2(chunk_size))
175 if (!PAGE_ALIGNED(addr)) {
176 /* Memory area has to be page size aligned. For
177 * simplicity, this might change.
182 if ((addr + size) < addr)
185 npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
191 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
195 if (!unaligned_chunks && chunks_rem)
198 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
202 umem->headroom = headroom;
203 umem->chunk_size = chunk_size;
204 umem->chunks = chunks;
205 umem->npgs = (u32)npgs;
208 umem->flags = mr->flags;
210 INIT_LIST_HEAD(&umem->xsk_dma_list);
211 refcount_set(&umem->users, 1);
213 err = xdp_umem_account_pages(umem);
217 err = xdp_umem_pin_pages(umem, (unsigned long)addr);
221 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
228 xdp_umem_unpin_pages(umem);
230 xdp_umem_unaccount_pages(umem);
234 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
236 struct xdp_umem *umem;
239 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
241 return ERR_PTR(-ENOMEM);
243 err = ida_alloc(&umem_ida, GFP_KERNEL);
250 err = xdp_umem_reg(umem, mr);
252 ida_free(&umem_ida, umem->id);