block: permit PREFLUSH and POSTFLUSH without prepare_flush_fn
[platform/adaptation/renesas_rcar/renesas_kernel.git] / block / blk-barrier.c
1 /*
2  * Functions related to barrier IO handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/gfp.h>
9
10 #include "blk.h"
11
12 /**
13  * blk_queue_ordered - does this queue support ordered writes
14  * @q:        the request queue
15  * @ordered:  one of QUEUE_ORDERED_*
16  * @prepare_flush_fn: rq setup helper for cache flush ordered writes
17  *
18  * Description:
19  *   For journalled file systems, doing ordered writes on a commit
20  *   block instead of explicitly doing wait_on_buffer (which is bad
21  *   for performance) can be a big win. Block drivers supporting this
22  *   feature should call this function and indicate so.
23  *
24  **/
25 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26                       prepare_flush_fn *prepare_flush_fn)
27 {
28         if (ordered != QUEUE_ORDERED_NONE &&
29             ordered != QUEUE_ORDERED_DRAIN &&
30             ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
31             ordered != QUEUE_ORDERED_DRAIN_FUA &&
32             ordered != QUEUE_ORDERED_TAG &&
33             ordered != QUEUE_ORDERED_TAG_FLUSH &&
34             ordered != QUEUE_ORDERED_TAG_FUA) {
35                 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
36                 return -EINVAL;
37         }
38
39         q->ordered = ordered;
40         q->next_ordered = ordered;
41         q->prepare_flush_fn = prepare_flush_fn;
42
43         return 0;
44 }
45 EXPORT_SYMBOL(blk_queue_ordered);
46
47 /*
48  * Cache flushing for ordered writes handling
49  */
50 unsigned blk_ordered_cur_seq(struct request_queue *q)
51 {
52         if (!q->ordseq)
53                 return 0;
54         return 1 << ffz(q->ordseq);
55 }
56
57 unsigned blk_ordered_req_seq(struct request *rq)
58 {
59         struct request_queue *q = rq->q;
60
61         BUG_ON(q->ordseq == 0);
62
63         if (rq == &q->pre_flush_rq)
64                 return QUEUE_ORDSEQ_PREFLUSH;
65         if (rq == &q->bar_rq)
66                 return QUEUE_ORDSEQ_BAR;
67         if (rq == &q->post_flush_rq)
68                 return QUEUE_ORDSEQ_POSTFLUSH;
69
70         /*
71          * !fs requests don't need to follow barrier ordering.  Always
72          * put them at the front.  This fixes the following deadlock.
73          *
74          * http://thread.gmane.org/gmane.linux.kernel/537473
75          */
76         if (rq->cmd_type != REQ_TYPE_FS)
77                 return QUEUE_ORDSEQ_DRAIN;
78
79         if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
80             (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
81                 return QUEUE_ORDSEQ_DRAIN;
82         else
83                 return QUEUE_ORDSEQ_DONE;
84 }
85
86 bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
87 {
88         struct request *rq;
89
90         if (error && !q->orderr)
91                 q->orderr = error;
92
93         BUG_ON(q->ordseq & seq);
94         q->ordseq |= seq;
95
96         if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
97                 return false;
98
99         /*
100          * Okay, sequence complete.
101          */
102         q->ordseq = 0;
103         rq = q->orig_bar_rq;
104         __blk_end_request_all(rq, q->orderr);
105         return true;
106 }
107
108 static void pre_flush_end_io(struct request *rq, int error)
109 {
110         elv_completed_request(rq->q, rq);
111         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
112 }
113
114 static void bar_end_io(struct request *rq, int error)
115 {
116         elv_completed_request(rq->q, rq);
117         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
118 }
119
120 static void post_flush_end_io(struct request *rq, int error)
121 {
122         elv_completed_request(rq->q, rq);
123         blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
124 }
125
126 static void queue_flush(struct request_queue *q, unsigned which)
127 {
128         struct request *rq;
129         rq_end_io_fn *end_io;
130
131         if (which == QUEUE_ORDERED_DO_PREFLUSH) {
132                 rq = &q->pre_flush_rq;
133                 end_io = pre_flush_end_io;
134         } else {
135                 rq = &q->post_flush_rq;
136                 end_io = post_flush_end_io;
137         }
138
139         blk_rq_init(q, rq);
140         rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
141         rq->rq_disk = q->bar_rq.rq_disk;
142         rq->end_io = end_io;
143         if (q->prepare_flush_fn)
144                 q->prepare_flush_fn(q, rq);
145
146         elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
147 }
148
149 static inline bool start_ordered(struct request_queue *q, struct request **rqp)
150 {
151         struct request *rq = *rqp;
152         unsigned skip = 0;
153
154         q->orderr = 0;
155         q->ordered = q->next_ordered;
156         q->ordseq |= QUEUE_ORDSEQ_STARTED;
157
158         /*
159          * For an empty barrier, there's no actual BAR request, which
160          * in turn makes POSTFLUSH unnecessary.  Mask them off.
161          */
162         if (!blk_rq_sectors(rq)) {
163                 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
164                                 QUEUE_ORDERED_DO_POSTFLUSH);
165                 /*
166                  * Empty barrier on a write-through device w/ ordered
167                  * tag has no command to issue and without any command
168                  * to issue, ordering by tag can't be used.  Drain
169                  * instead.
170                  */
171                 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
172                     !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
173                         q->ordered &= ~QUEUE_ORDERED_BY_TAG;
174                         q->ordered |= QUEUE_ORDERED_BY_DRAIN;
175                 }
176         }
177
178         /* stash away the original request */
179         blk_dequeue_request(rq);
180         q->orig_bar_rq = rq;
181         rq = NULL;
182
183         /*
184          * Queue ordered sequence.  As we stack them at the head, we
185          * need to queue in reverse order.  Note that we rely on that
186          * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
187          * request gets inbetween ordered sequence.
188          */
189         if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
190                 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
191                 rq = &q->post_flush_rq;
192         } else
193                 skip |= QUEUE_ORDSEQ_POSTFLUSH;
194
195         if (q->ordered & QUEUE_ORDERED_DO_BAR) {
196                 rq = &q->bar_rq;
197
198                 /* initialize proxy request and queue it */
199                 blk_rq_init(q, rq);
200                 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
201                         rq->cmd_flags |= REQ_WRITE;
202                 if (q->ordered & QUEUE_ORDERED_DO_FUA)
203                         rq->cmd_flags |= REQ_FUA;
204                 init_request_from_bio(rq, q->orig_bar_rq->bio);
205                 rq->end_io = bar_end_io;
206
207                 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
208         } else
209                 skip |= QUEUE_ORDSEQ_BAR;
210
211         if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
212                 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
213                 rq = &q->pre_flush_rq;
214         } else
215                 skip |= QUEUE_ORDSEQ_PREFLUSH;
216
217         if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
218                 rq = NULL;
219         else
220                 skip |= QUEUE_ORDSEQ_DRAIN;
221
222         *rqp = rq;
223
224         /*
225          * Complete skipped sequences.  If whole sequence is complete,
226          * return false to tell elevator that this request is gone.
227          */
228         return !blk_ordered_complete_seq(q, skip, 0);
229 }
230
231 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
232 {
233         struct request *rq = *rqp;
234         const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
235                                 (rq->cmd_flags & REQ_HARDBARRIER);
236
237         if (!q->ordseq) {
238                 if (!is_barrier)
239                         return true;
240
241                 if (q->next_ordered != QUEUE_ORDERED_NONE)
242                         return start_ordered(q, rqp);
243                 else {
244                         /*
245                          * Queue ordering not supported.  Terminate
246                          * with prejudice.
247                          */
248                         blk_dequeue_request(rq);
249                         __blk_end_request_all(rq, -EOPNOTSUPP);
250                         *rqp = NULL;
251                         return false;
252                 }
253         }
254
255         /*
256          * Ordered sequence in progress
257          */
258
259         /* Special requests are not subject to ordering rules. */
260         if (rq->cmd_type != REQ_TYPE_FS &&
261             rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
262                 return true;
263
264         if (q->ordered & QUEUE_ORDERED_BY_TAG) {
265                 /* Ordered by tag.  Blocking the next barrier is enough. */
266                 if (is_barrier && rq != &q->bar_rq)
267                         *rqp = NULL;
268         } else {
269                 /* Ordered by draining.  Wait for turn. */
270                 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
271                 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
272                         *rqp = NULL;
273         }
274
275         return true;
276 }
277
278 static void bio_end_empty_barrier(struct bio *bio, int err)
279 {
280         if (err) {
281                 if (err == -EOPNOTSUPP)
282                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
283                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
284         }
285         if (bio->bi_private)
286                 complete(bio->bi_private);
287         bio_put(bio);
288 }
289
290 /**
291  * blkdev_issue_flush - queue a flush
292  * @bdev:       blockdev to issue flush for
293  * @gfp_mask:   memory allocation flags (for bio_alloc)
294  * @error_sector:       error sector
295  * @flags:      BLKDEV_IFL_* flags to control behaviour
296  *
297  * Description:
298  *    Issue a flush for the block device in question. Caller can supply
299  *    room for storing the error offset in case of a flush error, if they
300  *    wish to. If WAIT flag is not passed then caller may check only what
301  *    request was pushed in some internal queue for later handling.
302  */
303 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
304                 sector_t *error_sector, unsigned long flags)
305 {
306         DECLARE_COMPLETION_ONSTACK(wait);
307         struct request_queue *q;
308         struct bio *bio;
309         int ret = 0;
310
311         if (bdev->bd_disk == NULL)
312                 return -ENXIO;
313
314         q = bdev_get_queue(bdev);
315         if (!q)
316                 return -ENXIO;
317
318         bio = bio_alloc(gfp_mask, 0);
319         bio->bi_end_io = bio_end_empty_barrier;
320         bio->bi_bdev = bdev;
321         if (test_bit(BLKDEV_WAIT, &flags))
322                 bio->bi_private = &wait;
323
324         bio_get(bio);
325         submit_bio(WRITE_BARRIER, bio);
326         if (test_bit(BLKDEV_WAIT, &flags)) {
327                 wait_for_completion(&wait);
328                 /*
329                  * The driver must store the error location in ->bi_sector, if
330                  * it supports it. For non-stacked drivers, this should be
331                  * copied from blk_rq_pos(rq).
332                  */
333                 if (error_sector)
334                         *error_sector = bio->bi_sector;
335         }
336
337         if (bio_flagged(bio, BIO_EOPNOTSUPP))
338                 ret = -EOPNOTSUPP;
339         else if (!bio_flagged(bio, BIO_UPTODATE))
340                 ret = -EIO;
341
342         bio_put(bio);
343         return ret;
344 }
345 EXPORT_SYMBOL(blkdev_issue_flush);