/*
* struct hmm_range - track invalidation lock on virtual address range
*
+ * @notifier: an optional mmu_interval_notifier
+ * @notifier_seq: when notifier is used this is the result of
+ * mmu_interval_read_begin()
* @hmm: the core HMM structure this range is active against
* @vma: the vm area struct for the range
* @list: all range lock are on a list
* @valid: pfns array did not change since it has been fill by an HMM function
*/
struct hmm_range {
+ struct mmu_interval_notifier *notifier;
+ unsigned long notifier_seq;
struct hmm *hmm;
struct list_head list;
unsigned long start;
}
EXPORT_SYMBOL(hmm_range_unregister);
+static bool needs_retry(struct hmm_range *range)
+{
+ if (range->notifier)
+ return mmu_interval_check_retry(range->notifier,
+ range->notifier_seq);
+ return !range->valid;
+}
+
static const struct mm_walk_ops hmm_walk_ops = {
.pud_entry = hmm_vma_walk_pud,
.pmd_entry = hmm_vma_walk_pmd,
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
unsigned long start = range->start, end;
struct hmm_vma_walk hmm_vma_walk;
- struct hmm *hmm = range->hmm;
+ struct mm_struct *mm;
struct vm_area_struct *vma;
int ret;
- lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
+ if (range->notifier)
+ mm = range->notifier->mm;
+ else
+ mm = range->hmm->mmu_notifier.mm;
+
+ lockdep_assert_held(&mm->mmap_sem);
do {
/* If range is no longer valid force retry. */
- if (!range->valid)
+ if (needs_retry(range))
return -EBUSY;
- vma = find_vma(hmm->mmu_notifier.mm, start);
+ vma = find_vma(mm, start);
if (vma == NULL || (vma->vm_flags & device_vma))
return -EFAULT;
start = hmm_vma_walk.last;
/* Keep trying while the range is valid. */
- } while (ret == -EBUSY && range->valid);
+ } while (ret == -EBUSY && !needs_retry(range));
if (ret) {
unsigned long i;
continue;
/* Check if range is being invalidated */
- if (!range->valid) {
+ if (needs_retry(range)) {
ret = -EBUSY;
goto unmap;
}