In a later patch, we'll want to be able to handle this flag without
holding the sp_lock. Change this field to an unsigned long flags
field, and declare a new flag in it that can be managed with atomic
bitops.
Signed-off-by: Jeff Layton <jlayton@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
- int sp_task_pending;/* has pending task */
+#define SP_TASK_PENDING (0) /* still work to do even if no
+ * xprt is queued. */
+ unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
/*
*/
wake_up_process(rqstp->rq_task);
} else
- pool->sp_task_pending = 1;
+ set_bit(SP_TASK_PENDING, &pool->sp_flags);
spin_unlock_bh(&pool->sp_lock);
}
}
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
- pool->sp_task_pending = 0;
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
} else {
- if (pool->sp_task_pending) {
- pool->sp_task_pending = 0;
+ if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) {
xprt = ERR_PTR(-EAGAIN);
goto out;
}