Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[platform/kernel/linux-rpi.git] / block / blk-merge.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10
11 #include <trace/events/block.h>
12
13 #include "blk.h"
14
15 static struct bio *blk_bio_discard_split(struct request_queue *q,
16                                          struct bio *bio,
17                                          struct bio_set *bs,
18                                          unsigned *nsegs)
19 {
20         unsigned int max_discard_sectors, granularity;
21         int alignment;
22         sector_t tmp;
23         unsigned split_sectors;
24
25         *nsegs = 1;
26
27         /* Zero-sector (unknown) and one-sector granularities are the same.  */
28         granularity = max(q->limits.discard_granularity >> 9, 1U);
29
30         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
31         max_discard_sectors -= max_discard_sectors % granularity;
32
33         if (unlikely(!max_discard_sectors)) {
34                 /* XXX: warn */
35                 return NULL;
36         }
37
38         if (bio_sectors(bio) <= max_discard_sectors)
39                 return NULL;
40
41         split_sectors = max_discard_sectors;
42
43         /*
44          * If the next starting sector would be misaligned, stop the discard at
45          * the previous aligned sector.
46          */
47         alignment = (q->limits.discard_alignment >> 9) % granularity;
48
49         tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
50         tmp = sector_div(tmp, granularity);
51
52         if (split_sectors > tmp)
53                 split_sectors -= tmp;
54
55         return bio_split(bio, split_sectors, GFP_NOIO, bs);
56 }
57
58 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
59                 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
60 {
61         *nsegs = 1;
62
63         if (!q->limits.max_write_zeroes_sectors)
64                 return NULL;
65
66         if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
67                 return NULL;
68
69         return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
70 }
71
72 static struct bio *blk_bio_write_same_split(struct request_queue *q,
73                                             struct bio *bio,
74                                             struct bio_set *bs,
75                                             unsigned *nsegs)
76 {
77         *nsegs = 1;
78
79         if (!q->limits.max_write_same_sectors)
80                 return NULL;
81
82         if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
83                 return NULL;
84
85         return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
86 }
87
88 static inline unsigned get_max_io_size(struct request_queue *q,
89                                        struct bio *bio)
90 {
91         unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
92         unsigned mask = queue_logical_block_size(q) - 1;
93
94         /* aligned to logical block size */
95         sectors &= ~(mask >> 9);
96
97         return sectors;
98 }
99
100 static struct bio *blk_bio_segment_split(struct request_queue *q,
101                                          struct bio *bio,
102                                          struct bio_set *bs,
103                                          unsigned *segs)
104 {
105         struct bio_vec bv, bvprv, *bvprvp = NULL;
106         struct bvec_iter iter;
107         unsigned seg_size = 0, nsegs = 0, sectors = 0;
108         unsigned front_seg_size = bio->bi_seg_front_size;
109         bool do_split = true;
110         struct bio *new = NULL;
111         const unsigned max_sectors = get_max_io_size(q, bio);
112
113         bio_for_each_segment(bv, bio, iter) {
114                 /*
115                  * If the queue doesn't support SG gaps and adding this
116                  * offset would create a gap, disallow it.
117                  */
118                 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
119                         goto split;
120
121                 if (sectors + (bv.bv_len >> 9) > max_sectors) {
122                         /*
123                          * Consider this a new segment if we're splitting in
124                          * the middle of this vector.
125                          */
126                         if (nsegs < queue_max_segments(q) &&
127                             sectors < max_sectors) {
128                                 nsegs++;
129                                 sectors = max_sectors;
130                         }
131                         goto split;
132                 }
133
134                 if (bvprvp && blk_queue_cluster(q)) {
135                         if (seg_size + bv.bv_len > queue_max_segment_size(q))
136                                 goto new_segment;
137                         if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
138                                 goto new_segment;
139                         if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
140                                 goto new_segment;
141
142                         seg_size += bv.bv_len;
143                         bvprv = bv;
144                         bvprvp = &bvprv;
145                         sectors += bv.bv_len >> 9;
146
147                         continue;
148                 }
149 new_segment:
150                 if (nsegs == queue_max_segments(q))
151                         goto split;
152
153                 if (nsegs == 1 && seg_size > front_seg_size)
154                         front_seg_size = seg_size;
155
156                 nsegs++;
157                 bvprv = bv;
158                 bvprvp = &bvprv;
159                 seg_size = bv.bv_len;
160                 sectors += bv.bv_len >> 9;
161
162         }
163
164         do_split = false;
165 split:
166         *segs = nsegs;
167
168         if (do_split) {
169                 new = bio_split(bio, sectors, GFP_NOIO, bs);
170                 if (new)
171                         bio = new;
172         }
173
174         if (nsegs == 1 && seg_size > front_seg_size)
175                 front_seg_size = seg_size;
176         bio->bi_seg_front_size = front_seg_size;
177         if (seg_size > bio->bi_seg_back_size)
178                 bio->bi_seg_back_size = seg_size;
179
180         return do_split ? new : NULL;
181 }
182
183 void blk_queue_split(struct request_queue *q, struct bio **bio)
184 {
185         struct bio *split, *res;
186         unsigned nsegs;
187
188         switch (bio_op(*bio)) {
189         case REQ_OP_DISCARD:
190         case REQ_OP_SECURE_ERASE:
191                 split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
192                 break;
193         case REQ_OP_WRITE_ZEROES:
194                 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
195                 break;
196         case REQ_OP_WRITE_SAME:
197                 split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
198                 break;
199         default:
200                 split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
201                 break;
202         }
203
204         /* physical segments can be figured out during splitting */
205         res = split ? split : *bio;
206         res->bi_phys_segments = nsegs;
207         bio_set_flag(res, BIO_SEG_VALID);
208
209         if (split) {
210                 /* there isn't chance to merge the splitted bio */
211                 split->bi_opf |= REQ_NOMERGE;
212
213                 /*
214                  * Since we're recursing into make_request here, ensure
215                  * that we mark this bio as already having entered the queue.
216                  * If not, and the queue is going away, we can get stuck
217                  * forever on waiting for the queue reference to drop. But
218                  * that will never happen, as we're already holding a
219                  * reference to it.
220                  */
221                 bio_set_flag(*bio, BIO_QUEUE_ENTERED);
222
223                 bio_chain(split, *bio);
224                 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
225                 generic_make_request(*bio);
226                 *bio = split;
227         }
228 }
229 EXPORT_SYMBOL(blk_queue_split);
230
231 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
232                                              struct bio *bio,
233                                              bool no_sg_merge)
234 {
235         struct bio_vec bv, bvprv = { NULL };
236         int cluster, prev = 0;
237         unsigned int seg_size, nr_phys_segs;
238         struct bio *fbio, *bbio;
239         struct bvec_iter iter;
240
241         if (!bio)
242                 return 0;
243
244         switch (bio_op(bio)) {
245         case REQ_OP_DISCARD:
246         case REQ_OP_SECURE_ERASE:
247         case REQ_OP_WRITE_ZEROES:
248                 return 0;
249         case REQ_OP_WRITE_SAME:
250                 return 1;
251         }
252
253         fbio = bio;
254         cluster = blk_queue_cluster(q);
255         seg_size = 0;
256         nr_phys_segs = 0;
257         for_each_bio(bio) {
258                 bio_for_each_segment(bv, bio, iter) {
259                         /*
260                          * If SG merging is disabled, each bio vector is
261                          * a segment
262                          */
263                         if (no_sg_merge)
264                                 goto new_segment;
265
266                         if (prev && cluster) {
267                                 if (seg_size + bv.bv_len
268                                     > queue_max_segment_size(q))
269                                         goto new_segment;
270                                 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
271                                         goto new_segment;
272                                 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
273                                         goto new_segment;
274
275                                 seg_size += bv.bv_len;
276                                 bvprv = bv;
277                                 continue;
278                         }
279 new_segment:
280                         if (nr_phys_segs == 1 && seg_size >
281                             fbio->bi_seg_front_size)
282                                 fbio->bi_seg_front_size = seg_size;
283
284                         nr_phys_segs++;
285                         bvprv = bv;
286                         prev = 1;
287                         seg_size = bv.bv_len;
288                 }
289                 bbio = bio;
290         }
291
292         if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
293                 fbio->bi_seg_front_size = seg_size;
294         if (seg_size > bbio->bi_seg_back_size)
295                 bbio->bi_seg_back_size = seg_size;
296
297         return nr_phys_segs;
298 }
299
300 void blk_recalc_rq_segments(struct request *rq)
301 {
302         bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
303                         &rq->q->queue_flags);
304
305         rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
306                         no_sg_merge);
307 }
308
309 void blk_recount_segments(struct request_queue *q, struct bio *bio)
310 {
311         unsigned short seg_cnt;
312
313         /* estimate segment number by bi_vcnt for non-cloned bio */
314         if (bio_flagged(bio, BIO_CLONED))
315                 seg_cnt = bio_segments(bio);
316         else
317                 seg_cnt = bio->bi_vcnt;
318
319         if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
320                         (seg_cnt < queue_max_segments(q)))
321                 bio->bi_phys_segments = seg_cnt;
322         else {
323                 struct bio *nxt = bio->bi_next;
324
325                 bio->bi_next = NULL;
326                 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
327                 bio->bi_next = nxt;
328         }
329
330         bio_set_flag(bio, BIO_SEG_VALID);
331 }
332 EXPORT_SYMBOL(blk_recount_segments);
333
334 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
335                                    struct bio *nxt)
336 {
337         struct bio_vec end_bv = { NULL }, nxt_bv;
338
339         if (!blk_queue_cluster(q))
340                 return 0;
341
342         if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
343             queue_max_segment_size(q))
344                 return 0;
345
346         if (!bio_has_data(bio))
347                 return 1;
348
349         bio_get_last_bvec(bio, &end_bv);
350         bio_get_first_bvec(nxt, &nxt_bv);
351
352         if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
353                 return 0;
354
355         /*
356          * bio and nxt are contiguous in memory; check if the queue allows
357          * these two to be merged into one
358          */
359         if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
360                 return 1;
361
362         return 0;
363 }
364
365 static inline void
366 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
367                      struct scatterlist *sglist, struct bio_vec *bvprv,
368                      struct scatterlist **sg, int *nsegs, int *cluster)
369 {
370
371         int nbytes = bvec->bv_len;
372
373         if (*sg && *cluster) {
374                 if ((*sg)->length + nbytes > queue_max_segment_size(q))
375                         goto new_segment;
376
377                 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
378                         goto new_segment;
379                 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
380                         goto new_segment;
381
382                 (*sg)->length += nbytes;
383         } else {
384 new_segment:
385                 if (!*sg)
386                         *sg = sglist;
387                 else {
388                         /*
389                          * If the driver previously mapped a shorter
390                          * list, we could see a termination bit
391                          * prematurely unless it fully inits the sg
392                          * table on each mapping. We KNOW that there
393                          * must be more entries here or the driver
394                          * would be buggy, so force clear the
395                          * termination bit to avoid doing a full
396                          * sg_init_table() in drivers for each command.
397                          */
398                         sg_unmark_end(*sg);
399                         *sg = sg_next(*sg);
400                 }
401
402                 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
403                 (*nsegs)++;
404         }
405         *bvprv = *bvec;
406 }
407
408 static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
409                 struct scatterlist *sglist, struct scatterlist **sg)
410 {
411         *sg = sglist;
412         sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
413         return 1;
414 }
415
416 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
417                              struct scatterlist *sglist,
418                              struct scatterlist **sg)
419 {
420         struct bio_vec bvec, bvprv = { NULL };
421         struct bvec_iter iter;
422         int cluster = blk_queue_cluster(q), nsegs = 0;
423
424         for_each_bio(bio)
425                 bio_for_each_segment(bvec, bio, iter)
426                         __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
427                                              &nsegs, &cluster);
428
429         return nsegs;
430 }
431
432 /*
433  * map a request to scatterlist, return number of sg entries setup. Caller
434  * must make sure sg can hold rq->nr_phys_segments entries
435  */
436 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
437                   struct scatterlist *sglist)
438 {
439         struct scatterlist *sg = NULL;
440         int nsegs = 0;
441
442         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
443                 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
444         else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
445                 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
446         else if (rq->bio)
447                 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
448
449         if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
450             (blk_rq_bytes(rq) & q->dma_pad_mask)) {
451                 unsigned int pad_len =
452                         (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
453
454                 sg->length += pad_len;
455                 rq->extra_len += pad_len;
456         }
457
458         if (q->dma_drain_size && q->dma_drain_needed(rq)) {
459                 if (op_is_write(req_op(rq)))
460                         memset(q->dma_drain_buffer, 0, q->dma_drain_size);
461
462                 sg_unmark_end(sg);
463                 sg = sg_next(sg);
464                 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
465                             q->dma_drain_size,
466                             ((unsigned long)q->dma_drain_buffer) &
467                             (PAGE_SIZE - 1));
468                 nsegs++;
469                 rq->extra_len += q->dma_drain_size;
470         }
471
472         if (sg)
473                 sg_mark_end(sg);
474
475         /*
476          * Something must have been wrong if the figured number of
477          * segment is bigger than number of req's physical segments
478          */
479         WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
480
481         return nsegs;
482 }
483 EXPORT_SYMBOL(blk_rq_map_sg);
484
485 static inline int ll_new_hw_segment(struct request_queue *q,
486                                     struct request *req,
487                                     struct bio *bio)
488 {
489         int nr_phys_segs = bio_phys_segments(q, bio);
490
491         if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
492                 goto no_merge;
493
494         if (blk_integrity_merge_bio(q, req, bio) == false)
495                 goto no_merge;
496
497         /*
498          * This will form the start of a new hw segment.  Bump both
499          * counters.
500          */
501         req->nr_phys_segments += nr_phys_segs;
502         return 1;
503
504 no_merge:
505         req_set_nomerge(q, req);
506         return 0;
507 }
508
509 int ll_back_merge_fn(struct request_queue *q, struct request *req,
510                      struct bio *bio)
511 {
512         if (req_gap_back_merge(req, bio))
513                 return 0;
514         if (blk_integrity_rq(req) &&
515             integrity_req_gap_back_merge(req, bio))
516                 return 0;
517         if (blk_rq_sectors(req) + bio_sectors(bio) >
518             blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
519                 req_set_nomerge(q, req);
520                 return 0;
521         }
522         if (!bio_flagged(req->biotail, BIO_SEG_VALID))
523                 blk_recount_segments(q, req->biotail);
524         if (!bio_flagged(bio, BIO_SEG_VALID))
525                 blk_recount_segments(q, bio);
526
527         return ll_new_hw_segment(q, req, bio);
528 }
529
530 int ll_front_merge_fn(struct request_queue *q, struct request *req,
531                       struct bio *bio)
532 {
533
534         if (req_gap_front_merge(req, bio))
535                 return 0;
536         if (blk_integrity_rq(req) &&
537             integrity_req_gap_front_merge(req, bio))
538                 return 0;
539         if (blk_rq_sectors(req) + bio_sectors(bio) >
540             blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
541                 req_set_nomerge(q, req);
542                 return 0;
543         }
544         if (!bio_flagged(bio, BIO_SEG_VALID))
545                 blk_recount_segments(q, bio);
546         if (!bio_flagged(req->bio, BIO_SEG_VALID))
547                 blk_recount_segments(q, req->bio);
548
549         return ll_new_hw_segment(q, req, bio);
550 }
551
552 /*
553  * blk-mq uses req->special to carry normal driver per-request payload, it
554  * does not indicate a prepared command that we cannot merge with.
555  */
556 static bool req_no_special_merge(struct request *req)
557 {
558         struct request_queue *q = req->q;
559
560         return !q->mq_ops && req->special;
561 }
562
563 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
564                 struct request *next)
565 {
566         unsigned short segments = blk_rq_nr_discard_segments(req);
567
568         if (segments >= queue_max_discard_segments(q))
569                 goto no_merge;
570         if (blk_rq_sectors(req) + bio_sectors(next->bio) >
571             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
572                 goto no_merge;
573
574         req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
575         return true;
576 no_merge:
577         req_set_nomerge(q, req);
578         return false;
579 }
580
581 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
582                                 struct request *next)
583 {
584         int total_phys_segments;
585         unsigned int seg_size =
586                 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
587
588         /*
589          * First check if the either of the requests are re-queued
590          * requests.  Can't merge them if they are.
591          */
592         if (req_no_special_merge(req) || req_no_special_merge(next))
593                 return 0;
594
595         if (req_gap_back_merge(req, next->bio))
596                 return 0;
597
598         /*
599          * Will it become too large?
600          */
601         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
602             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
603                 return 0;
604
605         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
606         if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
607                 if (req->nr_phys_segments == 1)
608                         req->bio->bi_seg_front_size = seg_size;
609                 if (next->nr_phys_segments == 1)
610                         next->biotail->bi_seg_back_size = seg_size;
611                 total_phys_segments--;
612         }
613
614         if (total_phys_segments > queue_max_segments(q))
615                 return 0;
616
617         if (blk_integrity_merge_rq(q, req, next) == false)
618                 return 0;
619
620         /* Merge is OK... */
621         req->nr_phys_segments = total_phys_segments;
622         return 1;
623 }
624
625 /**
626  * blk_rq_set_mixed_merge - mark a request as mixed merge
627  * @rq: request to mark as mixed merge
628  *
629  * Description:
630  *     @rq is about to be mixed merged.  Make sure the attributes
631  *     which can be mixed are set in each bio and mark @rq as mixed
632  *     merged.
633  */
634 void blk_rq_set_mixed_merge(struct request *rq)
635 {
636         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
637         struct bio *bio;
638
639         if (rq->rq_flags & RQF_MIXED_MERGE)
640                 return;
641
642         /*
643          * @rq will no longer represent mixable attributes for all the
644          * contained bios.  It will just track those of the first one.
645          * Distributes the attributs to each bio.
646          */
647         for (bio = rq->bio; bio; bio = bio->bi_next) {
648                 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
649                              (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
650                 bio->bi_opf |= ff;
651         }
652         rq->rq_flags |= RQF_MIXED_MERGE;
653 }
654
655 static void blk_account_io_merge(struct request *req)
656 {
657         if (blk_do_io_stat(req)) {
658                 struct hd_struct *part;
659                 int cpu;
660
661                 cpu = part_stat_lock();
662                 part = req->part;
663
664                 part_round_stats(req->q, cpu, part);
665                 part_dec_in_flight(req->q, part, rq_data_dir(req));
666
667                 hd_struct_put(part);
668                 part_stat_unlock();
669         }
670 }
671
672 /*
673  * For non-mq, this has to be called with the request spinlock acquired.
674  * For mq with scheduling, the appropriate queue wide lock should be held.
675  */
676 static struct request *attempt_merge(struct request_queue *q,
677                                      struct request *req, struct request *next)
678 {
679         if (!q->mq_ops)
680                 lockdep_assert_held(q->queue_lock);
681
682         if (!rq_mergeable(req) || !rq_mergeable(next))
683                 return NULL;
684
685         if (req_op(req) != req_op(next))
686                 return NULL;
687
688         /*
689          * not contiguous
690          */
691         if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
692                 return NULL;
693
694         if (rq_data_dir(req) != rq_data_dir(next)
695             || req->rq_disk != next->rq_disk
696             || req_no_special_merge(next))
697                 return NULL;
698
699         if (req_op(req) == REQ_OP_WRITE_SAME &&
700             !blk_write_same_mergeable(req->bio, next->bio))
701                 return NULL;
702
703         /*
704          * Don't allow merge of different write hints, or for a hint with
705          * non-hint IO.
706          */
707         if (req->write_hint != next->write_hint)
708                 return NULL;
709
710         /*
711          * If we are allowed to merge, then append bio list
712          * from next to rq and release next. merge_requests_fn
713          * will have updated segment counts, update sector
714          * counts here. Handle DISCARDs separately, as they
715          * have separate settings.
716          */
717         if (req_op(req) == REQ_OP_DISCARD) {
718                 if (!req_attempt_discard_merge(q, req, next))
719                         return NULL;
720         } else if (!ll_merge_requests_fn(q, req, next))
721                 return NULL;
722
723         /*
724          * If failfast settings disagree or any of the two is already
725          * a mixed merge, mark both as mixed before proceeding.  This
726          * makes sure that all involved bios have mixable attributes
727          * set properly.
728          */
729         if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
730             (req->cmd_flags & REQ_FAILFAST_MASK) !=
731             (next->cmd_flags & REQ_FAILFAST_MASK)) {
732                 blk_rq_set_mixed_merge(req);
733                 blk_rq_set_mixed_merge(next);
734         }
735
736         /*
737          * At this point we have either done a back merge or front merge. We
738          * need the smaller start_time_ns of the merged requests to be the
739          * current request for accounting purposes.
740          */
741         if (next->start_time_ns < req->start_time_ns)
742                 req->start_time_ns = next->start_time_ns;
743
744         req->biotail->bi_next = next->bio;
745         req->biotail = next->biotail;
746
747         req->__data_len += blk_rq_bytes(next);
748
749         if (req_op(req) != REQ_OP_DISCARD)
750                 elv_merge_requests(q, req, next);
751
752         /*
753          * 'next' is going away, so update stats accordingly
754          */
755         blk_account_io_merge(next);
756
757         req->ioprio = ioprio_best(req->ioprio, next->ioprio);
758         if (blk_rq_cpu_valid(next))
759                 req->cpu = next->cpu;
760
761         /*
762          * ownership of bio passed from next to req, return 'next' for
763          * the caller to free
764          */
765         next->bio = NULL;
766         return next;
767 }
768
769 struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
770 {
771         struct request *next = elv_latter_request(q, rq);
772
773         if (next)
774                 return attempt_merge(q, rq, next);
775
776         return NULL;
777 }
778
779 struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
780 {
781         struct request *prev = elv_former_request(q, rq);
782
783         if (prev)
784                 return attempt_merge(q, prev, rq);
785
786         return NULL;
787 }
788
789 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
790                           struct request *next)
791 {
792         struct elevator_queue *e = q->elevator;
793         struct request *free;
794
795         if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
796                 if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
797                         return 0;
798
799         free = attempt_merge(q, rq, next);
800         if (free) {
801                 __blk_put_request(q, free);
802                 return 1;
803         }
804
805         return 0;
806 }
807
808 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
809 {
810         if (!rq_mergeable(rq) || !bio_mergeable(bio))
811                 return false;
812
813         if (req_op(rq) != bio_op(bio))
814                 return false;
815
816         /* different data direction or already started, don't merge */
817         if (bio_data_dir(bio) != rq_data_dir(rq))
818                 return false;
819
820         /* must be same device and not a special request */
821         if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
822                 return false;
823
824         /* only merge integrity protected bio into ditto rq */
825         if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
826                 return false;
827
828         /* must be using the same buffer */
829         if (req_op(rq) == REQ_OP_WRITE_SAME &&
830             !blk_write_same_mergeable(rq->bio, bio))
831                 return false;
832
833         /*
834          * Don't allow merge of different write hints, or for a hint with
835          * non-hint IO.
836          */
837         if (rq->write_hint != bio->bi_write_hint)
838                 return false;
839
840         return true;
841 }
842
843 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
844 {
845         if (req_op(rq) == REQ_OP_DISCARD &&
846             queue_max_discard_segments(rq->q) > 1)
847                 return ELEVATOR_DISCARD_MERGE;
848         else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
849                 return ELEVATOR_BACK_MERGE;
850         else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
851                 return ELEVATOR_FRONT_MERGE;
852         return ELEVATOR_NO_MERGE;
853 }