size_t size; /* Map size (bytes) */
int prot; /* IOMMU_READ/WRITE */
bool iommu_mapped;
+ bool lock_cap; /* capable(CAP_IPC_LOCK) */
struct task_struct *task;
struct rb_root pfn_list; /* Ex-user pinned pfn list */
};
return ret;
}
-static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
+static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
{
struct mm_struct *mm;
- bool is_current;
int ret;
if (!npage)
return 0;
- is_current = (task->mm == current->mm);
-
- mm = is_current ? task->mm : get_task_mm(task);
+ mm = async ? get_task_mm(dma->task) : dma->task->mm;
if (!mm)
return -ESRCH; /* process exited */
ret = down_write_killable(&mm->mmap_sem);
if (!ret) {
if (npage > 0) {
- if (lock_cap ? !*lock_cap :
- !has_capability(task, CAP_IPC_LOCK)) {
+ if (!dma->lock_cap) {
unsigned long limit;
- limit = task_rlimit(task,
+ limit = task_rlimit(dma->task,
RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (mm->locked_vm + npage > limit)
up_write(&mm->mmap_sem);
}
- if (!is_current)
+ if (async)
mmput(mm);
return ret;
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
- bool lock_cap, unsigned long limit)
+ unsigned long limit)
{
unsigned long pfn = 0;
long ret, pinned = 0, lock_acct = 0;
* pages are already counted against the user.
*/
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+ if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
limit << PAGE_SHIFT);
}
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
- if (!lock_cap &&
+ if (!dma->lock_cap &&
current->mm->locked_vm + lock_acct + 1 > limit) {
put_pfn(pfn, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
}
out:
- ret = vfio_lock_acct(current, lock_acct, &lock_cap);
+ ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
if (ret) {
}
if (do_accounting)
- vfio_lock_acct(dma->task, locked - unlocked, NULL);
+ vfio_lock_acct(dma, locked - unlocked, true);
return unlocked;
}
ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
- ret = vfio_lock_acct(dma->task, 1, NULL);
+ ret = vfio_lock_acct(dma, 1, true);
if (ret) {
put_pfn(*pfn_base, dma->prot);
if (ret == -ENOMEM)
unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
if (do_accounting)
- vfio_lock_acct(dma->task, -unlocked, NULL);
+ vfio_lock_acct(dma, -unlocked, true);
return unlocked;
}
dma->iommu_mapped = false;
if (do_accounting) {
- vfio_lock_acct(dma->task, -unlocked, NULL);
+ vfio_lock_acct(dma, -unlocked, true);
return 0;
}
return unlocked;
size_t size = map_size;
long npage;
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- bool lock_cap = capable(CAP_IPC_LOCK);
int ret = 0;
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
- size >> PAGE_SHIFT, &pfn,
- lock_cap, limit);
+ size >> PAGE_SHIFT, &pfn, limit);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
- get_task_struct(current);
- dma->task = current;
+
+ /*
+ * We need to be able to both add to a task's locked memory and test
+ * against the locked memory limit and we need to be able to do both
+ * outside of this call path as pinning can be asynchronous via the
+ * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a
+ * task_struct and VM locked pages requires an mm_struct, however
+ * holding an indefinite mm reference is not recommended, therefore we
+ * only hold a reference to a task. We could hold a reference to
+ * current, however QEMU uses this call path through vCPU threads,
+ * which can be killed resulting in a NULL mm and failure in the unmap
+ * path when called via a different thread. Avoid this problem by
+ * using the group_leader as threads within the same group require
+ * both CLONE_THREAD and CLONE_VM and will therefore use the same
+ * mm_struct.
+ *
+ * Previously we also used the task for testing CAP_IPC_LOCK at the
+ * time of pinning and accounting, however has_capability() makes use
+ * of real_cred, a copy-on-write field, so we can't guarantee that it
+ * matches group_leader, or in fact that it might not change by the
+ * time it's evaluated. If a process were to call MAP_DMA with
+ * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
+ * possibly see different results for an iommu_mapped vfio_dma vs
+ * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the
+ * time of calling MAP_DMA.
+ */
+ get_task_struct(current->group_leader);
+ dma->task = current->group_leader;
+ dma->lock_cap = capable(CAP_IPC_LOCK);
+
dma->pfn_list = RB_ROOT;
/* Insert zero-sized and grow as we map chunks of it */
struct vfio_domain *d;
struct rb_node *n;
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- bool lock_cap = capable(CAP_IPC_LOCK);
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
npage = vfio_pin_pages_remote(dma, vaddr,
n >> PAGE_SHIFT,
- &pfn, lock_cap,
- limit);
+ &pfn, limit);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
if (!is_invalid_reserved_pfn(vpfn->pfn))
locked++;
}
- vfio_lock_acct(dma->task, locked - unlocked, NULL);
+ vfio_lock_acct(dma, locked - unlocked, true);
}
}