2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-read.c - pblk's read path
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa,
31 #ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa));
34 BUG_ON(!pblk_addr_in_cache(ppa));
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
40 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
41 unsigned long *read_bitmap)
43 struct bio *bio = rqd->bio;
44 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
45 sector_t blba = pblk_get_lba(bio);
46 int nr_secs = rqd->nr_ppas;
50 /* logic error: lba out-of-bounds. Ignore read request */
51 if (blba + nr_secs >= pblk->rl.nr_secs) {
52 WARN(1, "pblk: read lbas out of bounds\n");
56 pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
58 for (i = 0; i < nr_secs; i++) {
59 struct ppa_addr p = ppas[i];
60 sector_t lba = blba + i;
63 if (pblk_ppa_empty(p)) {
64 WARN_ON(test_and_set_bit(i, read_bitmap));
68 /* Try to read from write buffer. The address is later checked
69 * on the write buffer to prevent retrieving overwritten data.
71 if (pblk_addr_in_cache(p)) {
72 if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
73 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
76 WARN_ON(test_and_set_bit(i, read_bitmap));
78 #ifdef CONFIG_NVM_DEBUG
79 atomic_long_inc(&pblk->cache_reads);
82 /* Read from media non-cached sectors */
83 rqd->ppa_list[j++] = p;
87 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
90 if (pblk_io_aligned(pblk, nr_secs))
91 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
93 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
95 #ifdef CONFIG_NVM_DEBUG
96 atomic_long_add(nr_secs, &pblk->inflight_reads);
100 static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
104 err = pblk_submit_io(pblk, rqd);
111 static void pblk_end_io_read(struct nvm_rq *rqd)
113 struct pblk *pblk = rqd->private;
114 struct nvm_tgt_dev *dev = pblk->dev;
115 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
116 struct bio *bio = rqd->bio;
119 pblk_log_read_err(pblk, rqd);
120 #ifdef CONFIG_NVM_DEBUG
122 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
125 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
128 if (r_ctx->private) {
129 struct bio *orig_bio = r_ctx->private;
131 #ifdef CONFIG_NVM_DEBUG
132 WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
138 #ifdef CONFIG_NVM_DEBUG
139 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
140 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
143 pblk_free_rqd(pblk, rqd, READ);
144 atomic_dec(&pblk->inflight_io);
147 static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
148 unsigned int bio_init_idx,
149 unsigned long *read_bitmap)
151 struct bio *new_bio, *bio = rqd->bio;
152 struct bio_vec src_bv, dst_bv;
153 void *ppa_ptr = NULL;
155 dma_addr_t dma_ppa_list = 0;
156 int nr_secs = rqd->nr_ppas;
157 int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
159 DECLARE_COMPLETION_ONSTACK(wait);
161 new_bio = bio_alloc(GFP_KERNEL, nr_holes);
163 pr_err("pblk: could not alloc read bio\n");
167 if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
170 if (nr_holes != new_bio->bi_vcnt) {
171 pr_err("pblk: malformed bio\n");
175 new_bio->bi_iter.bi_sector = 0; /* internal bio */
176 bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
177 new_bio->bi_private = &wait;
178 new_bio->bi_end_io = pblk_end_bio_sync;
181 rqd->nr_ppas = nr_holes;
182 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
185 if (unlikely(nr_secs > 1 && nr_holes == 1)) {
186 ppa_ptr = rqd->ppa_list;
187 dma_ppa_list = rqd->dma_ppa_list;
188 rqd->ppa_addr = rqd->ppa_list[0];
191 ret = pblk_submit_read_io(pblk, rqd);
194 pr_err("pblk: read IO submission failed\n");
198 if (!wait_for_completion_io_timeout(&wait,
199 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
200 pr_err("pblk: partial read I/O timed out\n");
204 atomic_long_inc(&pblk->read_failed);
205 #ifdef CONFIG_NVM_DEBUG
206 pblk_print_failed_rqd(pblk, rqd, rqd->error);
210 if (unlikely(nr_secs > 1 && nr_holes == 1)) {
211 rqd->ppa_list = ppa_ptr;
212 rqd->dma_ppa_list = dma_ppa_list;
215 /* Fill the holes in the original bio */
217 hole = find_first_zero_bit(read_bitmap, nr_secs);
219 src_bv = new_bio->bi_io_vec[i++];
220 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
222 src_p = kmap_atomic(src_bv.bv_page);
223 dst_p = kmap_atomic(dst_bv.bv_page);
225 memcpy(dst_p + dst_bv.bv_offset,
226 src_p + src_bv.bv_offset,
227 PBLK_EXPOSED_PAGE_SIZE);
229 kunmap_atomic(src_p);
230 kunmap_atomic(dst_p);
232 mempool_free(src_bv.bv_page, pblk->page_pool);
234 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
235 } while (hole < nr_secs);
239 /* Complete the original bio and associated request */
241 rqd->nr_ppas = nr_secs;
245 pblk_end_io_read(rqd);
249 /* Free allocated pages in new bio */
250 pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
252 pblk_end_io_read(rqd);
256 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
257 unsigned long *read_bitmap)
259 struct bio *bio = rqd->bio;
261 sector_t lba = pblk_get_lba(bio);
263 /* logic error: lba out-of-bounds. Ignore read request */
264 if (lba >= pblk->rl.nr_secs) {
265 WARN(1, "pblk: read lba out of bounds\n");
269 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
271 #ifdef CONFIG_NVM_DEBUG
272 atomic_long_inc(&pblk->inflight_reads);
276 if (pblk_ppa_empty(ppa)) {
277 WARN_ON(test_and_set_bit(0, read_bitmap));
281 /* Try to read from write buffer. The address is later checked on the
282 * write buffer to prevent retrieving overwritten data.
284 if (pblk_addr_in_cache(ppa)) {
285 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
286 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
289 WARN_ON(test_and_set_bit(0, read_bitmap));
290 #ifdef CONFIG_NVM_DEBUG
291 atomic_long_inc(&pblk->cache_reads);
297 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
300 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
302 struct nvm_tgt_dev *dev = pblk->dev;
303 unsigned int nr_secs = pblk_get_secs(bio);
305 unsigned long read_bitmap; /* Max 64 ppas per request */
306 unsigned int bio_init_idx;
307 int ret = NVM_IO_ERR;
309 if (nr_secs > PBLK_MAX_REQ_ADDRS)
312 bitmap_zero(&read_bitmap, nr_secs);
314 rqd = pblk_alloc_rqd(pblk, READ);
316 pr_err_ratelimited("pblk: not able to alloc rqd");
320 rqd->opcode = NVM_OP_PREAD;
322 rqd->nr_ppas = nr_secs;
324 rqd->end_io = pblk_end_io_read;
326 /* Save the index for this bio's start. This is needed in case
327 * we need to fill a partial read.
329 bio_init_idx = pblk_get_bi_idx(bio);
331 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
332 &rqd->dma_meta_list);
333 if (!rqd->meta_list) {
334 pr_err("pblk: not able to allocate ppa list\n");
339 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
340 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
342 pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
344 pblk_read_rq(pblk, rqd, &read_bitmap);
348 if (bitmap_full(&read_bitmap, nr_secs)) {
350 atomic_inc(&pblk->inflight_io);
351 pblk_end_io_read(rqd);
355 /* All sectors are to be read from the device */
356 if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
357 struct bio *int_bio = NULL;
358 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
360 /* Clone read bio to deal with read errors internally */
361 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
363 pr_err("pblk: could not clone read bio\n");
368 r_ctx->private = bio;
370 ret = pblk_submit_read_io(pblk, rqd);
372 pr_err("pblk: read IO submission failed\n");
381 /* The read bio request could be partially filled by the write buffer,
382 * but there are some holes that need to be read from the drive.
384 ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
386 pr_err("pblk: failed to perform partial read\n");
393 pblk_free_rqd(pblk, rqd, READ);
397 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
398 struct pblk_line *line, u64 *lba_list,
399 unsigned int nr_secs)
401 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
405 pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
407 for (i = 0; i < nr_secs; i++) {
408 if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
409 pblk_ppa_empty(ppas[i])) {
410 lba_list[i] = ADDR_EMPTY;
414 rqd->ppa_list[valid_secs++] = ppas[i];
417 #ifdef CONFIG_NVM_DEBUG
418 atomic_long_add(valid_secs, &pblk->inflight_reads);
423 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
424 struct pblk_line *line, sector_t lba)
429 if (lba == ADDR_EMPTY)
432 /* logic error: lba out-of-bounds */
433 if (lba >= pblk->rl.nr_secs) {
434 WARN(1, "pblk: read lba out of bounds\n");
438 spin_lock(&pblk->trans_lock);
439 ppa = pblk_trans_map_get(pblk, lba);
440 spin_unlock(&pblk->trans_lock);
442 /* Ignore updated values until the moment */
443 if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
450 #ifdef CONFIG_NVM_DEBUG
451 atomic_long_inc(&pblk->inflight_reads);
458 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
459 unsigned int nr_secs, unsigned int *secs_to_gc,
460 struct pblk_line *line)
462 struct nvm_tgt_dev *dev = pblk->dev;
463 struct nvm_geo *geo = &dev->geo;
467 DECLARE_COMPLETION_ONSTACK(wait);
469 memset(&rqd, 0, sizeof(struct nvm_rq));
471 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
477 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
478 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
480 *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
482 if (*secs_to_gc == 1)
483 rqd.ppa_addr = rqd.ppa_list[0];
485 *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
491 data_len = (*secs_to_gc) * geo->sec_size;
492 bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
493 PBLK_KMALLOC_META, GFP_KERNEL);
495 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
499 bio->bi_iter.bi_sector = 0; /* internal bio */
500 bio_set_op_attrs(bio, REQ_OP_READ, 0);
502 rqd.opcode = NVM_OP_PREAD;
503 rqd.end_io = pblk_end_io_sync;
505 rqd.nr_ppas = *secs_to_gc;
506 rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
509 ret = pblk_submit_read_io(pblk, &rqd);
512 pr_err("pblk: GC read request failed\n");
516 if (!wait_for_completion_io_timeout(&wait,
517 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
518 pr_err("pblk: GC read I/O timed out\n");
520 atomic_dec(&pblk->inflight_io);
523 atomic_long_inc(&pblk->read_failed_gc);
524 #ifdef CONFIG_NVM_DEBUG
525 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
529 #ifdef CONFIG_NVM_DEBUG
530 atomic_long_add(*secs_to_gc, &pblk->sync_reads);
531 atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
532 atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
536 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
540 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);