perf lock contention: Track and show mmap_lock with address
[platform/kernel/linux-starfive.git] / block / blk-merge.c
index b7c193d..6460abd 100644 (file)
@@ -276,7 +276,7 @@ static bool bvec_split_segs(const struct queue_limits *lim,
  * responsible for ensuring that @bs is only destroyed after processing of the
  * split bio has finished.
  */
-static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
                unsigned *segs, struct bio_set *bs, unsigned max_bytes)
 {
        struct bio_vec bv, bvprv, *bvprvp = NULL;
@@ -336,6 +336,7 @@ split:
        bio_clear_polled(bio);
        return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
 }
+EXPORT_SYMBOL_GPL(bio_split_rw);
 
 /**
  * __bio_split_to_limits - split a bio to fit the queue limits
@@ -586,13 +587,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 }
 EXPORT_SYMBOL(__blk_rq_map_sg);
 
-static inline unsigned int blk_rq_get_max_segments(struct request *rq)
-{
-       if (req_op(rq) == REQ_OP_DISCARD)
-               return queue_max_discard_segments(rq->q);
-       return queue_max_segments(rq->q);
-}
-
 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
                                                  sector_t offset)
 {
@@ -757,6 +751,33 @@ void blk_rq_set_mixed_merge(struct request *rq)
        rq->rq_flags |= RQF_MIXED_MERGE;
 }
 
+static inline blk_opf_t bio_failfast(const struct bio *bio)
+{
+       if (bio->bi_opf & REQ_RAHEAD)
+               return REQ_FAILFAST_MASK;
+
+       return bio->bi_opf & REQ_FAILFAST_MASK;
+}
+
+/*
+ * After we are marked as MIXED_MERGE, any new RA bio has to be updated
+ * as failfast, and request's failfast has to be updated in case of
+ * front merge.
+ */
+static inline void blk_update_mixed_merge(struct request *req,
+               struct bio *bio, bool front_merge)
+{
+       if (req->rq_flags & RQF_MIXED_MERGE) {
+               if (bio->bi_opf & REQ_RAHEAD)
+                       bio->bi_opf |= REQ_FAILFAST_MASK;
+
+               if (front_merge) {
+                       req->cmd_flags &= ~REQ_FAILFAST_MASK;
+                       req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
+               }
+       }
+}
+
 static void blk_account_io_merge_request(struct request *req)
 {
        if (blk_do_io_stat(req)) {
@@ -954,7 +975,7 @@ enum bio_merge_status {
 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
                struct bio *bio, unsigned int nr_segs)
 {
-       const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
+       const blk_opf_t ff = bio_failfast(bio);
 
        if (!ll_back_merge_fn(req, bio, nr_segs))
                return BIO_MERGE_FAILED;
@@ -965,6 +986,8 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
 
+       blk_update_mixed_merge(req, bio, false);
+
        req->biotail->bi_next = bio;
        req->biotail = bio;
        req->__data_len += bio->bi_iter.bi_size;
@@ -978,7 +1001,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
                struct bio *bio, unsigned int nr_segs)
 {
-       const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
+       const blk_opf_t ff = bio_failfast(bio);
 
        if (!ll_front_merge_fn(req, bio, nr_segs))
                return BIO_MERGE_FAILED;
@@ -989,6 +1012,8 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
 
+       blk_update_mixed_merge(req, bio, true);
+
        bio->bi_next = req->bio;
        req->bio = bio;