}
struct nouveau_svmm {
+ struct mmu_notifier notifier;
struct nouveau_vmm *vmm;
struct {
unsigned long start;
struct mutex mutex;
- struct mm_struct *mm;
struct hmm_mirror mirror;
};
}
static int
-nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
+nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *update)
{
- struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
+ struct nouveau_svmm *svmm =
+ container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
+ if (unlikely(!svmm->vmm))
+ goto out;
+
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
}
nouveau_svmm_invalidate(svmm, start, limit);
+
+out:
mutex_unlock(&svmm->mutex);
return 0;
}
-static void
-nouveau_svmm_release(struct hmm_mirror *mirror)
+static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
+{
+ kfree(container_of(mn, struct nouveau_svmm, notifier));
+}
+
+static const struct mmu_notifier_ops nouveau_mn_ops = {
+ .invalidate_range_start = nouveau_svmm_invalidate_range_start,
+ .free_notifier = nouveau_svmm_free_notifier,
+};
+
+static int
+nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
+ const struct mmu_notifier_range *update)
{
+ return 0;
}
-static const struct hmm_mirror_ops
-nouveau_svmm = {
+static const struct hmm_mirror_ops nouveau_svmm = {
.sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
- .release = nouveau_svmm_release,
};
void
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
hmm_mirror_unregister(&svmm->mirror);
- kfree(*psvmm);
+ mutex_lock(&svmm->mutex);
+ svmm->vmm = NULL;
+ mutex_unlock(&svmm->mutex);
+ mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
- goto done;
+ goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
- goto done;
+ goto out_free;
- /* Enable HMM mirroring of CPU address-space to VMM. */
- svmm->mm = get_task_mm(current);
- down_write(&svmm->mm->mmap_sem);
+ down_write(¤t->mm->mmap_sem);
svmm->mirror.ops = &nouveau_svmm;
- ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
- if (ret == 0) {
- cli->svm.svmm = svmm;
- cli->svm.cli = cli;
- }
- up_write(&svmm->mm->mmap_sem);
- mmput(svmm->mm);
+ ret = hmm_mirror_register(&svmm->mirror, current->mm);
+ if (ret)
+ goto out_mm_unlock;
-done:
+ svmm->notifier.ops = &nouveau_mn_ops;
+ ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
- nouveau_svmm_fini(&svmm);
+ goto out_hmm_unregister;
+ /* Note, ownership of svmm transfers to mmu_notifier */
+
+ cli->svm.svmm = svmm;
+ cli->svm.cli = cli;
+ up_write(¤t->mm->mmap_sem);
mutex_unlock(&cli->mutex);
+ return 0;
+
+out_hmm_unregister:
+ hmm_mirror_unregister(&svmm->mirror);
+out_mm_unlock:
+ up_write(¤t->mm->mmap_sem);
+out_free:
+ mutex_unlock(&cli->mutex);
+ kfree(svmm);
return ret;
}
ret = hmm_range_register(range, &svmm->mirror);
if (ret) {
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
return (int)ret;
}
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
return -EBUSY;
}
if (ret <= 0) {
if (ret == 0)
ret = -EBUSY;
- up_read(&svmm->mm->mmap_sem);
+ up_read(&svmm->notifier.mm->mmap_sem);
hmm_range_unregister(range);
return ret;
}
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ struct mm_struct *mm;
+
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
+ mm = svmm->notifier.mm;
/* We try and group handling of faults within a small
* window into a single update.
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&svmm->mm->mmap_sem);
- vma = find_vma_intersection(svmm->mm, start, limit);
+ down_read(&mm->mmap_sem);
+ vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
if (buffer->fault[fi]->addr != start) {
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
}
/* Cancel any faults in the window whose pages didn't manage