return ret;
}
-/* Should pair with userfaultfd_signal_pending() */
static inline long userfaultfd_get_blocking_state(unsigned int flags)
{
if (flags & FAULT_FLAG_INTERRUPTIBLE)
return TASK_UNINTERRUPTIBLE;
}
-/* Should pair with userfaultfd_get_blocking_state() */
-static inline bool userfaultfd_signal_pending(unsigned int flags)
-{
- if (flags & FAULT_FLAG_INTERRUPTIBLE)
- return signal_pending(current);
-
- if (flags & FAULT_FLAG_KILLABLE)
- return fatal_signal_pending(current);
-
- return false;
-}
-
/*
* The locking rules involved in returning VM_FAULT_RETRY depending on
* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
vmf->flags, reason);
mmap_read_unlock(mm);
- if (likely(must_wait && !READ_ONCE(ctx->released) &&
- !userfaultfd_signal_pending(vmf->flags))) {
+ if (likely(must_wait && !READ_ONCE(ctx->released))) {
wake_up_poll(&ctx->fd_wqh, EPOLLIN);
schedule();
- ret |= VM_FAULT_MAJOR;
-
- /*
- * False wakeups can orginate even from rwsem before
- * up_read() however userfaults will wait either for a
- * targeted wakeup on the specific uwq waitqueue from
- * wake_userfault() or for signals or for uffd
- * release.
- */
- while (!READ_ONCE(uwq.waken)) {
- /*
- * This needs the full smp_store_mb()
- * guarantee as the state write must be
- * visible to other CPUs before reading
- * uwq.waken from other CPUs.
- */
- set_current_state(blocking_state);
- if (READ_ONCE(uwq.waken) ||
- READ_ONCE(ctx->released) ||
- userfaultfd_signal_pending(vmf->flags))
- break;
- schedule();
- }
}
__set_current_state(TASK_RUNNING);