#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/idr.h>
-#include <linux/highmem.h>
+#include <linux/vmalloc.h>
#include "xdp_umem.h"
#include "xsk_queue.h"
unsigned int i;
for (i = 0; i < umem->npgs; i++)
- kunmap(umem->pgs[i]);
+ if (PageHighMem(umem->pgs[i]))
+ vunmap(umem->pages[i].addr);
+}
+
+static int xdp_umem_map_pages(struct xdp_umem *umem)
+{
+ unsigned int i;
+ void *addr;
+
+ for (i = 0; i < umem->npgs; i++) {
+ if (PageHighMem(umem->pgs[i]))
+ addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL);
+ else
+ addr = page_address(umem->pgs[i]);
+
+ if (!addr) {
+ xdp_umem_unmap_pages(umem);
+ return -ENOMEM;
+ }
+
+ umem->pages[i].addr = addr;
+ }
+
+ return 0;
}
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
unsigned int chunks, chunks_per_page;
u64 addr = mr->addr, size = mr->len;
- int size_chk, err, i;
+ int size_chk, err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
/* Strictly speaking we could support this, if:
goto out_account;
}
- for (i = 0; i < umem->npgs; i++)
- umem->pages[i].addr = kmap(umem->pgs[i]);
+ err = xdp_umem_map_pages(umem);
+ if (!err)
+ return 0;
- return 0;
+ kfree(umem->pages);
out_account:
xdp_umem_unaccount_pages(umem);