ARM: tizen_bcm2711_defconfig: Fix console loglevel for logo display
[platform/kernel/linux-rpi.git] / block / blk-core.c
index ac00d2f..26664f2 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -120,7 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->internal_tag = BLK_MQ_NO_TAG;
        rq->start_time_ns = ktime_get_ns();
        rq->part = NULL;
-       refcount_set(&rq->ref, 1);
        blk_crypto_rq_set_defaults(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
@@ -186,6 +186,10 @@ static const struct {
        /* device mapper special case, should not leak out: */
        [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
 
+       /* zone device specific errors */
+       [BLK_STS_ZONE_OPEN_RESOURCE]    = { -ETOOMANYREFS, "open zones exceeded" },
+       [BLK_STS_ZONE_ACTIVE_RESOURCE]  = { -EOVERFLOW, "active zones exceeded" },
+
        /* everything else not covered above: */
        [BLK_STS_IOERR]         = { -EIO,       "I/O" },
 };
@@ -420,11 +424,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
- * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
  */
 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
-       const bool pm = flags & BLK_MQ_REQ_PREEMPT;
+       const bool pm = flags & BLK_MQ_REQ_PM;
 
        while (true) {
                bool success = false;
@@ -436,7 +440,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                         * responsible for ensuring that that counter is
                         * globally visible before the queue is unfrozen.
                         */
-                       if (pm || !blk_queue_pm_only(q)) {
+                       if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
+                           !blk_queue_pm_only(q)) {
                                success = true;
                        } else {
                                percpu_ref_put(&q->q_usage_counter);
@@ -461,8 +466,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
                wait_event(q->mq_freeze_wq,
                           (!q->mq_freeze_depth &&
-                           (pm || (blk_pm_request_resume(q),
-                                   !blk_queue_pm_only(q)))) ||
+                           blk_pm_resume_queue(pm, q)) ||
                           blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
@@ -626,7 +630,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
        struct request *req;
 
        WARN_ON_ONCE(op & REQ_NOWAIT);
-       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
 
        req = blk_mq_alloc_request(q, op, flags);
        if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
@@ -893,10 +897,8 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
        if (unlikely(!current->io_context))
                create_task_io_context(current, GFP_ATOMIC, q->node);
 
-       if (blk_throtl_bio(bio)) {
-               blkcg_bio_issue_init(bio);
+       if (blk_throtl_bio(bio))
                return false;
-       }
 
        blk_cgroup_bio_start(bio);
        blkcg_bio_issue_init(bio);