sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
+ atomic_set(&sbq->completion_cnt, 0);
+ atomic_set(&sbq->wakeup_cnt, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
return -ENOMEM;
}
- for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++)
init_waitqueue_head(&sbq->ws[i].wait);
- atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
- }
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
-static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
- unsigned int wake_batch)
-{
- int i;
-
- if (sbq->wake_batch != wake_batch) {
- WRITE_ONCE(sbq->wake_batch, wake_batch);
- /*
- * Pairs with the memory barrier in sbitmap_queue_wake_up()
- * to ensure that the batch size is updated before the wait
- * counts.
- */
- smp_mb();
- for (i = 0; i < SBQ_WAIT_QUEUES; i++)
- atomic_set(&sbq->ws[i].wait_cnt, 1);
- }
-}
-
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
wake_batch = sbq_calc_wake_batch(sbq, depth);
- __sbitmap_queue_update_wake_batch(sbq, wake_batch);
+ if (sbq->wake_batch != wake_batch)
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
}
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
min_batch, SBQ_WAKE_BATCH);
- __sbitmap_queue_update_wake_batch(sbq, wake_batch);
+
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
- if (waitqueue_active(&ws->wait) && atomic_read(&ws->wait_cnt)) {
+ if (waitqueue_active(&ws->wait)) {
if (wake_index != atomic_read(&sbq->wake_index))
atomic_set(&sbq->wake_index, wake_index);
return ws;
return NULL;
}
-static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
- struct sbq_wait_state *ws;
- unsigned int wake_batch;
- int wait_cnt, cur, sub;
- bool ret;
+ unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
+ struct sbq_wait_state *ws = NULL;
+ unsigned int wakeups;
- if (*nr <= 0)
- return false;
+ if (!atomic_read(&sbq->ws_active))
+ return;
- ws = sbq_wake_ptr(sbq);
- if (!ws)
- return false;
+ atomic_add(nr, &sbq->completion_cnt);
+ wakeups = atomic_read(&sbq->wakeup_cnt);
- cur = atomic_read(&ws->wait_cnt);
do {
- /*
- * For concurrent callers of this, callers should call this
- * function again to wakeup a new batch on a different 'ws'.
- */
- if (cur == 0)
- return true;
- sub = min(*nr, cur);
- wait_cnt = cur - sub;
- } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
-
- /*
- * If we decremented queue without waiters, retry to avoid lost
- * wakeups.
- */
- if (wait_cnt > 0)
- return !waitqueue_active(&ws->wait);
+ if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
+ return;
- *nr -= sub;
-
- /*
- * When wait_cnt == 0, we have to be particularly careful as we are
- * responsible to reset wait_cnt regardless whether we've actually
- * woken up anybody. But in case we didn't wakeup anybody, we still
- * need to retry.
- */
- ret = !waitqueue_active(&ws->wait);
- wake_batch = READ_ONCE(sbq->wake_batch);
+ if (!ws) {
+ ws = sbq_wake_ptr(sbq);
+ if (!ws)
+ return;
+ }
+ } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
+ &wakeups, wakeups + wake_batch));
- /*
- * Wake up first in case that concurrent callers decrease wait_cnt
- * while waitqueue is empty.
- */
wake_up_nr(&ws->wait, wake_batch);
-
- /*
- * Pairs with the memory barrier in sbitmap_queue_resize() to
- * ensure that we see the batch size update before the wait
- * count is reset.
- *
- * Also pairs with the implicit barrier between decrementing wait_cnt
- * and checking for waitqueue_active() to make sure waitqueue_active()
- * sees result of the wakeup if atomic_dec_return() has seen the result
- * of atomic_set().
- */
- smp_mb__before_atomic();
-
- /*
- * Increase wake_index before updating wait_cnt, otherwise concurrent
- * callers can see valid wait_cnt in old waitqueue, which can cause
- * invalid wakeup on the old waitqueue.
- */
- sbq_index_atomic_inc(&sbq->wake_index);
- atomic_set(&ws->wait_cnt, wake_batch);
-
- return ret || *nr;
-}
-
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
-{
- while (__sbq_wake_up(sbq, &nr))
- ;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[i];
-
- seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
- atomic_read(&ws->wait_cnt),
+ seq_printf(m, "\t{.wait=%s},\n",
waitqueue_active(&ws->wait) ? "active" : "inactive");
}
seq_puts(m, "}\n");