#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
rq->internal_tag = BLK_MQ_NO_TAG;
rq->start_time_ns = ktime_get_ns();
rq->part = NULL;
- refcount_set(&rq->ref, 1);
blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
- * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
- const bool pm = flags & BLK_MQ_REQ_PREEMPT;
+ const bool pm = flags & BLK_MQ_REQ_PM;
while (true) {
bool success = false;
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
- if (pm || !blk_queue_pm_only(q)) {
+ if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
+ !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
- (pm || (blk_pm_request_resume(q),
- !blk_queue_pm_only(q)))) ||
+ blk_pm_resume_queue(pm, q)) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
- WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+ WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
if (unlikely(!current->io_context))
create_task_io_context(current, GFP_ATOMIC, q->node);
- if (blk_throtl_bio(bio)) {
- blkcg_bio_issue_init(bio);
+ if (blk_throtl_bio(bio))
return false;
- }
blk_cgroup_bio_start(bio);
blkcg_bio_issue_init(bio);