block: don't allow enabling a cache on devices that don't support it
[platform/kernel/linux-starfive.git] / block / blk-core.c
index 5487912..ebb7a16 100644 (file)
@@ -672,6 +672,18 @@ static void __submit_bio_noacct_mq(struct bio *bio)
 
 void submit_bio_noacct_nocheck(struct bio *bio)
 {
+       blk_cgroup_bio_start(bio);
+       blkcg_bio_issue_init(bio);
+
+       if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+               trace_block_bio_queue(bio);
+               /*
+                * Now that enqueuing has been traced, we need to trace
+                * completion as well.
+                */
+               bio_set_flag(bio, BIO_TRACE_COMPLETION);
+       }
+
        /*
         * We only want one ->submit_bio to be active at a time, else stack
         * usage with stacked devices could be a problem.  Use current->bio_list
@@ -776,17 +788,6 @@ void submit_bio_noacct(struct bio *bio)
 
        if (blk_throtl_bio(bio))
                return;
-
-       blk_cgroup_bio_start(bio);
-       blkcg_bio_issue_init(bio);
-
-       if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
-               trace_block_bio_queue(bio);
-               /* Now that enqueuing has been traced, we need to trace
-                * completion as well.
-                */
-               bio_set_flag(bio, BIO_TRACE_COMPLETION);
-       }
        submit_bio_noacct_nocheck(bio);
        return;
 
@@ -841,10 +842,16 @@ EXPORT_SYMBOL(submit_bio);
  */
 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
 {
-       struct request_queue *q = bdev_get_queue(bio->bi_bdev);
        blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
+       struct block_device *bdev;
+       struct request_queue *q;
        int ret = 0;
 
+       bdev = READ_ONCE(bio->bi_bdev);
+       if (!bdev)
+               return 0;
+
+       q = bdev_get_queue(bdev);
        if (cookie == BLK_QC_T_NONE ||
            !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
                return 0;
@@ -904,7 +911,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
         */
        rcu_read_lock();
        bio = READ_ONCE(kiocb->private);
-       if (bio && bio->bi_bdev)
+       if (bio)
                ret = bio_poll(bio, iob, flags);
        rcu_read_unlock();
 
@@ -1133,8 +1140,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 {
        if (!list_empty(&plug->cb_list))
                flush_plug_callbacks(plug, from_schedule);
-       if (!rq_list_empty(plug->mq_list))
-               blk_mq_flush_plug_list(plug, from_schedule);
+       blk_mq_flush_plug_list(plug, from_schedule);
        /*
         * Unconditionally flush out cached requests, even if the unplug
         * event came from schedule. Since we know hold references to the