Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / block / blk-merge.c
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13                                              struct bio *bio)
14 {
15         unsigned int phys_size;
16         struct bio_vec *bv, *bvprv = NULL;
17         int cluster, i, high, highprv = 1;
18         unsigned int seg_size, nr_phys_segs;
19         struct bio *fbio, *bbio;
20
21         if (!bio)
22                 return 0;
23
24         fbio = bio;
25         cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
26         seg_size = 0;
27         phys_size = nr_phys_segs = 0;
28         for_each_bio(bio) {
29                 bio_for_each_segment(bv, bio, i) {
30                         /*
31                          * the trick here is making sure that a high page is
32                          * never considered part of another segment, since that
33                          * might change with the bounce page.
34                          */
35                         high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
36                         if (high || highprv)
37                                 goto new_segment;
38                         if (cluster) {
39                                 if (seg_size + bv->bv_len
40                                     > queue_max_segment_size(q))
41                                         goto new_segment;
42                                 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
43                                         goto new_segment;
44                                 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
45                                         goto new_segment;
46
47                                 seg_size += bv->bv_len;
48                                 bvprv = bv;
49                                 continue;
50                         }
51 new_segment:
52                         if (nr_phys_segs == 1 && seg_size >
53                             fbio->bi_seg_front_size)
54                                 fbio->bi_seg_front_size = seg_size;
55
56                         nr_phys_segs++;
57                         bvprv = bv;
58                         seg_size = bv->bv_len;
59                         highprv = high;
60                 }
61                 bbio = bio;
62         }
63
64         if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
65                 fbio->bi_seg_front_size = seg_size;
66         if (seg_size > bbio->bi_seg_back_size)
67                 bbio->bi_seg_back_size = seg_size;
68
69         return nr_phys_segs;
70 }
71
72 void blk_recalc_rq_segments(struct request *rq)
73 {
74         rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
75 }
76
77 void blk_recount_segments(struct request_queue *q, struct bio *bio)
78 {
79         struct bio *nxt = bio->bi_next;
80
81         bio->bi_next = NULL;
82         bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
83         bio->bi_next = nxt;
84         bio->bi_flags |= (1 << BIO_SEG_VALID);
85 }
86 EXPORT_SYMBOL(blk_recount_segments);
87
88 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
89                                    struct bio *nxt)
90 {
91         if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
92                 return 0;
93
94         if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
95             queue_max_segment_size(q))
96                 return 0;
97
98         if (!bio_has_data(bio))
99                 return 1;
100
101         if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
102                 return 0;
103
104         /*
105          * bio and nxt are contiguous in memory; check if the queue allows
106          * these two to be merged into one
107          */
108         if (BIO_SEG_BOUNDARY(q, bio, nxt))
109                 return 1;
110
111         return 0;
112 }
113
114 /*
115  * map a request to scatterlist, return number of sg entries setup. Caller
116  * must make sure sg can hold rq->nr_phys_segments entries
117  */
118 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
119                   struct scatterlist *sglist)
120 {
121         struct bio_vec *bvec, *bvprv;
122         struct req_iterator iter;
123         struct scatterlist *sg;
124         int nsegs, cluster;
125
126         nsegs = 0;
127         cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
128
129         /*
130          * for each bio in rq
131          */
132         bvprv = NULL;
133         sg = NULL;
134         rq_for_each_segment(bvec, rq, iter) {
135                 int nbytes = bvec->bv_len;
136
137                 if (bvprv && cluster) {
138                         if (sg->length + nbytes > queue_max_segment_size(q))
139                                 goto new_segment;
140
141                         if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
142                                 goto new_segment;
143                         if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
144                                 goto new_segment;
145
146                         sg->length += nbytes;
147                 } else {
148 new_segment:
149                         if (!sg)
150                                 sg = sglist;
151                         else {
152                                 /*
153                                  * If the driver previously mapped a shorter
154                                  * list, we could see a termination bit
155                                  * prematurely unless it fully inits the sg
156                                  * table on each mapping. We KNOW that there
157                                  * must be more entries here or the driver
158                                  * would be buggy, so force clear the
159                                  * termination bit to avoid doing a full
160                                  * sg_init_table() in drivers for each command.
161                                  */
162                                 sg->page_link &= ~0x02;
163                                 sg = sg_next(sg);
164                         }
165
166                         sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
167                         nsegs++;
168                 }
169                 bvprv = bvec;
170         } /* segments in rq */
171
172
173         if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
174             (blk_rq_bytes(rq) & q->dma_pad_mask)) {
175                 unsigned int pad_len =
176                         (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
177
178                 sg->length += pad_len;
179                 rq->extra_len += pad_len;
180         }
181
182         if (q->dma_drain_size && q->dma_drain_needed(rq)) {
183                 if (rq->cmd_flags & REQ_RW)
184                         memset(q->dma_drain_buffer, 0, q->dma_drain_size);
185
186                 sg->page_link &= ~0x02;
187                 sg = sg_next(sg);
188                 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
189                             q->dma_drain_size,
190                             ((unsigned long)q->dma_drain_buffer) &
191                             (PAGE_SIZE - 1));
192                 nsegs++;
193                 rq->extra_len += q->dma_drain_size;
194         }
195
196         if (sg)
197                 sg_mark_end(sg);
198
199         return nsegs;
200 }
201 EXPORT_SYMBOL(blk_rq_map_sg);
202
203 static inline int ll_new_hw_segment(struct request_queue *q,
204                                     struct request *req,
205                                     struct bio *bio)
206 {
207         int nr_phys_segs = bio_phys_segments(q, bio);
208
209         if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
210             req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
211                 req->cmd_flags |= REQ_NOMERGE;
212                 if (req == q->last_merge)
213                         q->last_merge = NULL;
214                 return 0;
215         }
216
217         /*
218          * This will form the start of a new hw segment.  Bump both
219          * counters.
220          */
221         req->nr_phys_segments += nr_phys_segs;
222         return 1;
223 }
224
225 int ll_back_merge_fn(struct request_queue *q, struct request *req,
226                      struct bio *bio)
227 {
228         unsigned short max_sectors;
229
230         if (unlikely(blk_pc_request(req)))
231                 max_sectors = queue_max_hw_sectors(q);
232         else
233                 max_sectors = queue_max_sectors(q);
234
235         if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
236                 req->cmd_flags |= REQ_NOMERGE;
237                 if (req == q->last_merge)
238                         q->last_merge = NULL;
239                 return 0;
240         }
241         if (!bio_flagged(req->biotail, BIO_SEG_VALID))
242                 blk_recount_segments(q, req->biotail);
243         if (!bio_flagged(bio, BIO_SEG_VALID))
244                 blk_recount_segments(q, bio);
245
246         return ll_new_hw_segment(q, req, bio);
247 }
248
249 int ll_front_merge_fn(struct request_queue *q, struct request *req,
250                       struct bio *bio)
251 {
252         unsigned short max_sectors;
253
254         if (unlikely(blk_pc_request(req)))
255                 max_sectors = queue_max_hw_sectors(q);
256         else
257                 max_sectors = queue_max_sectors(q);
258
259
260         if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
261                 req->cmd_flags |= REQ_NOMERGE;
262                 if (req == q->last_merge)
263                         q->last_merge = NULL;
264                 return 0;
265         }
266         if (!bio_flagged(bio, BIO_SEG_VALID))
267                 blk_recount_segments(q, bio);
268         if (!bio_flagged(req->bio, BIO_SEG_VALID))
269                 blk_recount_segments(q, req->bio);
270
271         return ll_new_hw_segment(q, req, bio);
272 }
273
274 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
275                                 struct request *next)
276 {
277         int total_phys_segments;
278         unsigned int seg_size =
279                 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
280
281         /*
282          * First check if the either of the requests are re-queued
283          * requests.  Can't merge them if they are.
284          */
285         if (req->special || next->special)
286                 return 0;
287
288         /*
289          * Will it become too large?
290          */
291         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
292                 return 0;
293
294         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
295         if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
296                 if (req->nr_phys_segments == 1)
297                         req->bio->bi_seg_front_size = seg_size;
298                 if (next->nr_phys_segments == 1)
299                         next->biotail->bi_seg_back_size = seg_size;
300                 total_phys_segments--;
301         }
302
303         if (total_phys_segments > queue_max_phys_segments(q))
304                 return 0;
305
306         if (total_phys_segments > queue_max_hw_segments(q))
307                 return 0;
308
309         /* Merge is OK... */
310         req->nr_phys_segments = total_phys_segments;
311         return 1;
312 }
313
314 /**
315  * blk_rq_set_mixed_merge - mark a request as mixed merge
316  * @rq: request to mark as mixed merge
317  *
318  * Description:
319  *     @rq is about to be mixed merged.  Make sure the attributes
320  *     which can be mixed are set in each bio and mark @rq as mixed
321  *     merged.
322  */
323 void blk_rq_set_mixed_merge(struct request *rq)
324 {
325         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
326         struct bio *bio;
327
328         if (rq->cmd_flags & REQ_MIXED_MERGE)
329                 return;
330
331         /*
332          * @rq will no longer represent mixable attributes for all the
333          * contained bios.  It will just track those of the first one.
334          * Distributes the attributs to each bio.
335          */
336         for (bio = rq->bio; bio; bio = bio->bi_next) {
337                 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
338                              (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
339                 bio->bi_rw |= ff;
340         }
341         rq->cmd_flags |= REQ_MIXED_MERGE;
342 }
343
344 static void blk_account_io_merge(struct request *req)
345 {
346         if (blk_do_io_stat(req)) {
347                 struct hd_struct *part;
348                 int cpu;
349
350                 cpu = part_stat_lock();
351                 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
352
353                 part_round_stats(cpu, part);
354                 part_dec_in_flight(part, rq_data_dir(req));
355
356                 part_stat_unlock();
357         }
358 }
359
360 /*
361  * Has to be called with the request spinlock acquired
362  */
363 static int attempt_merge(struct request_queue *q, struct request *req,
364                           struct request *next)
365 {
366         if (!rq_mergeable(req) || !rq_mergeable(next))
367                 return 0;
368
369         /*
370          * not contiguous
371          */
372         if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
373                 return 0;
374
375         if (rq_data_dir(req) != rq_data_dir(next)
376             || req->rq_disk != next->rq_disk
377             || next->special)
378                 return 0;
379
380         if (blk_integrity_rq(req) != blk_integrity_rq(next))
381                 return 0;
382
383         /*
384          * If we are allowed to merge, then append bio list
385          * from next to rq and release next. merge_requests_fn
386          * will have updated segment counts, update sector
387          * counts here.
388          */
389         if (!ll_merge_requests_fn(q, req, next))
390                 return 0;
391
392         /*
393          * If failfast settings disagree or any of the two is already
394          * a mixed merge, mark both as mixed before proceeding.  This
395          * makes sure that all involved bios have mixable attributes
396          * set properly.
397          */
398         if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
399             (req->cmd_flags & REQ_FAILFAST_MASK) !=
400             (next->cmd_flags & REQ_FAILFAST_MASK)) {
401                 blk_rq_set_mixed_merge(req);
402                 blk_rq_set_mixed_merge(next);
403         }
404
405         /*
406          * At this point we have either done a back merge
407          * or front merge. We need the smaller start_time of
408          * the merged requests to be the current request
409          * for accounting purposes.
410          */
411         if (time_after(req->start_time, next->start_time))
412                 req->start_time = next->start_time;
413
414         req->biotail->bi_next = next->bio;
415         req->biotail = next->biotail;
416
417         req->__data_len += blk_rq_bytes(next);
418
419         elv_merge_requests(q, req, next);
420
421         /*
422          * 'next' is going away, so update stats accordingly
423          */
424         blk_account_io_merge(next);
425
426         req->ioprio = ioprio_best(req->ioprio, next->ioprio);
427         if (blk_rq_cpu_valid(next))
428                 req->cpu = next->cpu;
429
430         /* owner-ship of bio passed from next to req */
431         next->bio = NULL;
432         __blk_put_request(q, next);
433         return 1;
434 }
435
436 int attempt_back_merge(struct request_queue *q, struct request *rq)
437 {
438         struct request *next = elv_latter_request(q, rq);
439
440         if (next)
441                 return attempt_merge(q, rq, next);
442
443         return 0;
444 }
445
446 int attempt_front_merge(struct request_queue *q, struct request *rq)
447 {
448         struct request *prev = elv_former_request(q, rq);
449
450         if (prev)
451                 return attempt_merge(q, prev, rq);
452
453         return 0;
454 }