2 * Some low level IO code, and hacks for various block layer limitations
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include <linux/blkdev.h>
14 static void bch_bi_idx_hack_endio(struct bio *bio, int error)
16 struct bio *p = bio->bi_private;
22 static void bch_generic_make_request_hack(struct bio *bio)
24 if (bio->bi_iter.bi_idx) {
26 struct bvec_iter iter;
27 unsigned segs = bio_segments(bio);
28 struct bio *clone = bio_alloc(GFP_NOIO, segs);
30 bio_for_each_segment(bv, bio, iter)
31 clone->bi_io_vec[clone->bi_vcnt++] = bv;
33 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
34 clone->bi_bdev = bio->bi_bdev;
35 clone->bi_rw = bio->bi_rw;
36 clone->bi_vcnt = segs;
37 clone->bi_iter.bi_size = bio->bi_iter.bi_size;
39 clone->bi_private = bio;
40 clone->bi_end_io = bch_bi_idx_hack_endio;
46 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
47 * bios might have had more than that (before we split them per device
50 * To be taken out once immutable bvec stuff is in.
52 bio->bi_max_vecs = bio->bi_vcnt;
54 generic_make_request(bio);
58 * bch_bio_split - split a bio
60 * @sectors: number of sectors to split from the front of @bio
62 * @bs: bio set to allocate from
64 * Allocates and returns a new bio which represents @sectors from the start of
65 * @bio, and updates @bio to represent the remaining sectors.
67 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
70 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
71 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
72 * freed before the split.
74 struct bio *bch_bio_split(struct bio *bio, int sectors,
75 gfp_t gfp, struct bio_set *bs)
77 unsigned vcnt = 0, nbytes = sectors << 9;
79 struct bvec_iter iter;
80 struct bio *ret = NULL;
84 if (sectors >= bio_sectors(bio))
87 if (bio->bi_rw & REQ_DISCARD) {
88 ret = bio_alloc_bioset(gfp, 1, bs);
94 bio_for_each_segment(bv, bio, iter) {
97 if (nbytes <= bv.bv_len)
103 ret = bio_alloc_bioset(gfp, vcnt, bs);
107 bio_for_each_segment(bv, bio, iter) {
108 ret->bi_io_vec[ret->bi_vcnt++] = bv;
110 if (ret->bi_vcnt == vcnt)
114 ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes;
116 ret->bi_bdev = bio->bi_bdev;
117 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
118 ret->bi_iter.bi_size = sectors << 9;
119 ret->bi_rw = bio->bi_rw;
121 if (bio_integrity(bio)) {
122 if (bio_integrity_clone(ret, bio, gfp)) {
127 bio_integrity_trim(ret, 0, bio_sectors(ret));
130 bio_advance(bio, ret->bi_iter.bi_size);
135 static unsigned bch_bio_max_sectors(struct bio *bio)
137 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
139 struct bvec_iter iter;
140 unsigned ret = 0, seg = 0;
142 if (bio->bi_rw & REQ_DISCARD)
143 return min(bio_sectors(bio), q->limits.max_discard_sectors);
145 bio_for_each_segment(bv, bio, iter) {
146 struct bvec_merge_data bvm = {
147 .bi_bdev = bio->bi_bdev,
148 .bi_sector = bio->bi_iter.bi_sector,
153 if (seg == min_t(unsigned, BIO_MAX_PAGES,
154 queue_max_segments(q)))
157 if (q->merge_bvec_fn &&
158 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
162 ret += bv.bv_len >> 9;
165 ret = min(ret, queue_max_sectors(q));
168 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
173 static void bch_bio_submit_split_done(struct closure *cl)
175 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
177 s->bio->bi_end_io = s->bi_end_io;
178 s->bio->bi_private = s->bi_private;
179 bio_endio(s->bio, 0);
181 closure_debug_destroy(&s->cl);
182 mempool_free(s, s->p->bio_split_hook);
185 static void bch_bio_submit_split_endio(struct bio *bio, int error)
187 struct closure *cl = bio->bi_private;
188 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
191 clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
197 void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
199 struct bio_split_hook *s;
202 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
205 if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
208 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
209 closure_init(&s->cl, NULL);
213 s->bi_end_io = bio->bi_end_io;
214 s->bi_private = bio->bi_private;
218 n = bch_bio_split(bio, bch_bio_max_sectors(bio),
219 GFP_NOIO, s->p->bio_split);
221 n->bi_end_io = bch_bio_submit_split_endio;
222 n->bi_private = &s->cl;
225 bch_generic_make_request_hack(n);
228 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
230 bch_generic_make_request_hack(bio);
233 /* Bios with headers */
235 void bch_bbio_free(struct bio *bio, struct cache_set *c)
237 struct bbio *b = container_of(bio, struct bbio, bio);
238 mempool_free(b, c->bio_meta);
241 struct bio *bch_bbio_alloc(struct cache_set *c)
243 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
244 struct bio *bio = &b->bio;
247 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
248 bio->bi_max_vecs = bucket_pages(c);
249 bio->bi_io_vec = bio->bi_inline_vecs;
254 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
256 struct bbio *b = container_of(bio, struct bbio, bio);
258 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
259 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
261 b->submit_time_us = local_clock_us();
262 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
265 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
266 struct bkey *k, unsigned ptr)
268 struct bbio *b = container_of(bio, struct bbio, bio);
269 bch_bkey_copy_single_ptr(&b->key, k, ptr);
270 __bch_submit_bbio(bio, c);
275 void bch_count_io_errors(struct cache *ca, int error, const char *m)
278 * The halflife of an error is:
279 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
282 if (ca->set->error_decay) {
283 unsigned count = atomic_inc_return(&ca->io_count);
285 while (count > ca->set->error_decay) {
287 unsigned old = count;
288 unsigned new = count - ca->set->error_decay;
291 * First we subtract refresh from count; each time we
292 * succesfully do so, we rescale the errors once:
295 count = atomic_cmpxchg(&ca->io_count, old, new);
300 errors = atomic_read(&ca->io_errors);
303 new = ((uint64_t) errors * 127) / 128;
304 errors = atomic_cmpxchg(&ca->io_errors,
306 } while (old != errors);
312 char buf[BDEVNAME_SIZE];
313 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
315 errors >>= IO_ERROR_SHIFT;
317 if (errors < ca->set->error_limit)
318 pr_err("%s: IO error on %s, recovering",
319 bdevname(ca->bdev, buf), m);
321 bch_cache_set_error(ca->set,
322 "%s: too many IO errors %s",
323 bdevname(ca->bdev, buf), m);
327 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
328 int error, const char *m)
330 struct bbio *b = container_of(bio, struct bbio, bio);
331 struct cache *ca = PTR_CACHE(c, &b->key, 0);
333 unsigned threshold = bio->bi_rw & REQ_WRITE
334 ? c->congested_write_threshold_us
335 : c->congested_read_threshold_us;
338 unsigned t = local_clock_us();
340 int us = t - b->submit_time_us;
341 int congested = atomic_read(&c->congested);
343 if (us > (int) threshold) {
345 c->congested_last_us = t;
347 ms = min(ms, CONGESTED_MAX + congested);
348 atomic_sub(ms, &c->congested);
349 } else if (congested < 0)
350 atomic_inc(&c->congested);
353 bch_count_io_errors(ca, error, m);
356 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
357 int error, const char *m)
359 struct closure *cl = bio->bi_private;
361 bch_bbio_count_io_errors(c, bio, error, m);