Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-exynos.git] / drivers / lightnvm / pblk-read.c
1 /*
2  * Copyright (C) 2016 CNEX Labs
3  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4  *                  Matias Bjorling <matias@cnexlabs.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * pblk-read.c - pblk's read path
16  */
17
18 #include "pblk.h"
19
20 /*
21  * There is no guarantee that the value read from cache has not been updated and
22  * resides at another location in the cache. We guarantee though that if the
23  * value is read from the cache, it belongs to the mapped lba. In order to
24  * guarantee and order between writes and reads are ordered, a flush must be
25  * issued.
26  */
27 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28                                 sector_t lba, struct ppa_addr ppa,
29                                 int bio_iter, bool advanced_bio)
30 {
31 #ifdef CONFIG_NVM_DEBUG
32         /* Callers must ensure that the ppa points to a cache address */
33         BUG_ON(pblk_ppa_empty(ppa));
34         BUG_ON(!pblk_addr_in_cache(ppa));
35 #endif
36
37         return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38                                                 bio_iter, advanced_bio);
39 }
40
41 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
42                                  unsigned long *read_bitmap)
43 {
44         struct bio *bio = rqd->bio;
45         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
46         sector_t blba = pblk_get_lba(bio);
47         int nr_secs = rqd->nr_ppas;
48         bool advanced_bio = false;
49         int i, j = 0;
50
51         /* logic error: lba out-of-bounds. Ignore read request */
52         if (blba + nr_secs >= pblk->rl.nr_secs) {
53                 WARN(1, "pblk: read lbas out of bounds\n");
54                 return;
55         }
56
57         pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
58
59         for (i = 0; i < nr_secs; i++) {
60                 struct ppa_addr p = ppas[i];
61                 sector_t lba = blba + i;
62
63 retry:
64                 if (pblk_ppa_empty(p)) {
65                         WARN_ON(test_and_set_bit(i, read_bitmap));
66
67                         if (unlikely(!advanced_bio)) {
68                                 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
69                                 advanced_bio = true;
70                         }
71
72                         goto next;
73                 }
74
75                 /* Try to read from write buffer. The address is later checked
76                  * on the write buffer to prevent retrieving overwritten data.
77                  */
78                 if (pblk_addr_in_cache(p)) {
79                         if (!pblk_read_from_cache(pblk, bio, lba, p, i,
80                                                                 advanced_bio)) {
81                                 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
82                                 goto retry;
83                         }
84                         WARN_ON(test_and_set_bit(i, read_bitmap));
85                         advanced_bio = true;
86 #ifdef CONFIG_NVM_DEBUG
87                         atomic_long_inc(&pblk->cache_reads);
88 #endif
89                 } else {
90                         /* Read from media non-cached sectors */
91                         rqd->ppa_list[j++] = p;
92                 }
93
94 next:
95                 if (advanced_bio)
96                         bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
97         }
98
99         if (pblk_io_aligned(pblk, nr_secs))
100                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
101         else
102                 rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
103
104 #ifdef CONFIG_NVM_DEBUG
105         atomic_long_add(nr_secs, &pblk->inflight_reads);
106 #endif
107 }
108
109 static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
110 {
111         int err;
112
113         err = pblk_submit_io(pblk, rqd);
114         if (err)
115                 return NVM_IO_ERR;
116
117         return NVM_IO_OK;
118 }
119
120 static void pblk_end_io_read(struct nvm_rq *rqd)
121 {
122         struct pblk *pblk = rqd->private;
123         struct nvm_tgt_dev *dev = pblk->dev;
124         struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
125         struct bio *bio = rqd->bio;
126
127         if (rqd->error)
128                 pblk_log_read_err(pblk, rqd);
129 #ifdef CONFIG_NVM_DEBUG
130         else
131                 WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
132 #endif
133
134         nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
135
136         bio_put(bio);
137         if (r_ctx->private) {
138                 struct bio *orig_bio = r_ctx->private;
139
140 #ifdef CONFIG_NVM_DEBUG
141                 WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
142 #endif
143                 bio_endio(orig_bio);
144                 bio_put(orig_bio);
145         }
146
147 #ifdef CONFIG_NVM_DEBUG
148         atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
149         atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
150 #endif
151
152         pblk_free_rqd(pblk, rqd, READ);
153         atomic_dec(&pblk->inflight_io);
154 }
155
156 static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
157                                       unsigned int bio_init_idx,
158                                       unsigned long *read_bitmap)
159 {
160         struct bio *new_bio, *bio = rqd->bio;
161         struct bio_vec src_bv, dst_bv;
162         void *ppa_ptr = NULL;
163         void *src_p, *dst_p;
164         dma_addr_t dma_ppa_list = 0;
165         int nr_secs = rqd->nr_ppas;
166         int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
167         int i, ret, hole;
168         DECLARE_COMPLETION_ONSTACK(wait);
169
170         new_bio = bio_alloc(GFP_KERNEL, nr_holes);
171         if (!new_bio) {
172                 pr_err("pblk: could not alloc read bio\n");
173                 return NVM_IO_ERR;
174         }
175
176         if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
177                 goto err;
178
179         if (nr_holes != new_bio->bi_vcnt) {
180                 pr_err("pblk: malformed bio\n");
181                 goto err;
182         }
183
184         new_bio->bi_iter.bi_sector = 0; /* internal bio */
185         bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
186         new_bio->bi_private = &wait;
187         new_bio->bi_end_io = pblk_end_bio_sync;
188
189         rqd->bio = new_bio;
190         rqd->nr_ppas = nr_holes;
191         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
192         rqd->end_io = NULL;
193
194         if (unlikely(nr_secs > 1 && nr_holes == 1)) {
195                 ppa_ptr = rqd->ppa_list;
196                 dma_ppa_list = rqd->dma_ppa_list;
197                 rqd->ppa_addr = rqd->ppa_list[0];
198         }
199
200         ret = pblk_submit_read_io(pblk, rqd);
201         if (ret) {
202                 bio_put(rqd->bio);
203                 pr_err("pblk: read IO submission failed\n");
204                 goto err;
205         }
206
207         if (!wait_for_completion_io_timeout(&wait,
208                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
209                 pr_err("pblk: partial read I/O timed out\n");
210         }
211
212         if (rqd->error) {
213                 atomic_long_inc(&pblk->read_failed);
214 #ifdef CONFIG_NVM_DEBUG
215                 pblk_print_failed_rqd(pblk, rqd, rqd->error);
216 #endif
217         }
218
219         if (unlikely(nr_secs > 1 && nr_holes == 1)) {
220                 rqd->ppa_list = ppa_ptr;
221                 rqd->dma_ppa_list = dma_ppa_list;
222         }
223
224         /* Fill the holes in the original bio */
225         i = 0;
226         hole = find_first_zero_bit(read_bitmap, nr_secs);
227         do {
228                 src_bv = new_bio->bi_io_vec[i++];
229                 dst_bv = bio->bi_io_vec[bio_init_idx + hole];
230
231                 src_p = kmap_atomic(src_bv.bv_page);
232                 dst_p = kmap_atomic(dst_bv.bv_page);
233
234                 memcpy(dst_p + dst_bv.bv_offset,
235                         src_p + src_bv.bv_offset,
236                         PBLK_EXPOSED_PAGE_SIZE);
237
238                 kunmap_atomic(src_p);
239                 kunmap_atomic(dst_p);
240
241                 mempool_free(src_bv.bv_page, pblk->page_pool);
242
243                 hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
244         } while (hole < nr_secs);
245
246         bio_put(new_bio);
247
248         /* Complete the original bio and associated request */
249         rqd->bio = bio;
250         rqd->nr_ppas = nr_secs;
251         rqd->private = pblk;
252
253         bio_endio(bio);
254         pblk_end_io_read(rqd);
255         return NVM_IO_OK;
256
257 err:
258         /* Free allocated pages in new bio */
259         pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
260         rqd->private = pblk;
261         pblk_end_io_read(rqd);
262         return NVM_IO_ERR;
263 }
264
265 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
266                          unsigned long *read_bitmap)
267 {
268         struct bio *bio = rqd->bio;
269         struct ppa_addr ppa;
270         sector_t lba = pblk_get_lba(bio);
271
272         /* logic error: lba out-of-bounds. Ignore read request */
273         if (lba >= pblk->rl.nr_secs) {
274                 WARN(1, "pblk: read lba out of bounds\n");
275                 return;
276         }
277
278         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
279
280 #ifdef CONFIG_NVM_DEBUG
281         atomic_long_inc(&pblk->inflight_reads);
282 #endif
283
284 retry:
285         if (pblk_ppa_empty(ppa)) {
286                 WARN_ON(test_and_set_bit(0, read_bitmap));
287                 return;
288         }
289
290         /* Try to read from write buffer. The address is later checked on the
291          * write buffer to prevent retrieving overwritten data.
292          */
293         if (pblk_addr_in_cache(ppa)) {
294                 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
295                         pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
296                         goto retry;
297                 }
298                 WARN_ON(test_and_set_bit(0, read_bitmap));
299 #ifdef CONFIG_NVM_DEBUG
300                         atomic_long_inc(&pblk->cache_reads);
301 #endif
302         } else {
303                 rqd->ppa_addr = ppa;
304         }
305
306         rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
307 }
308
309 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
310 {
311         struct nvm_tgt_dev *dev = pblk->dev;
312         unsigned int nr_secs = pblk_get_secs(bio);
313         struct nvm_rq *rqd;
314         unsigned long read_bitmap; /* Max 64 ppas per request */
315         unsigned int bio_init_idx;
316         int ret = NVM_IO_ERR;
317
318         if (nr_secs > PBLK_MAX_REQ_ADDRS)
319                 return NVM_IO_ERR;
320
321         bitmap_zero(&read_bitmap, nr_secs);
322
323         rqd = pblk_alloc_rqd(pblk, READ);
324         if (IS_ERR(rqd)) {
325                 pr_err_ratelimited("pblk: not able to alloc rqd");
326                 return NVM_IO_ERR;
327         }
328
329         rqd->opcode = NVM_OP_PREAD;
330         rqd->bio = bio;
331         rqd->nr_ppas = nr_secs;
332         rqd->private = pblk;
333         rqd->end_io = pblk_end_io_read;
334
335         /* Save the index for this bio's start. This is needed in case
336          * we need to fill a partial read.
337          */
338         bio_init_idx = pblk_get_bi_idx(bio);
339
340         rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
341                                                         &rqd->dma_meta_list);
342         if (!rqd->meta_list) {
343                 pr_err("pblk: not able to allocate ppa list\n");
344                 goto fail_rqd_free;
345         }
346
347         if (nr_secs > 1) {
348                 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
349                 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
350
351                 pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
352         } else {
353                 pblk_read_rq(pblk, rqd, &read_bitmap);
354         }
355
356         bio_get(bio);
357         if (bitmap_full(&read_bitmap, nr_secs)) {
358                 bio_endio(bio);
359                 atomic_inc(&pblk->inflight_io);
360                 pblk_end_io_read(rqd);
361                 return NVM_IO_OK;
362         }
363
364         /* All sectors are to be read from the device */
365         if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
366                 struct bio *int_bio = NULL;
367                 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
368
369                 /* Clone read bio to deal with read errors internally */
370                 int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
371                 if (!int_bio) {
372                         pr_err("pblk: could not clone read bio\n");
373                         return NVM_IO_ERR;
374                 }
375
376                 rqd->bio = int_bio;
377                 r_ctx->private = bio;
378
379                 ret = pblk_submit_read_io(pblk, rqd);
380                 if (ret) {
381                         pr_err("pblk: read IO submission failed\n");
382                         if (int_bio)
383                                 bio_put(int_bio);
384                         return ret;
385                 }
386
387                 return NVM_IO_OK;
388         }
389
390         /* The read bio request could be partially filled by the write buffer,
391          * but there are some holes that need to be read from the drive.
392          */
393         ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
394         if (ret) {
395                 pr_err("pblk: failed to perform partial read\n");
396                 return ret;
397         }
398
399         return NVM_IO_OK;
400
401 fail_rqd_free:
402         pblk_free_rqd(pblk, rqd, READ);
403         return ret;
404 }
405
406 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
407                               struct pblk_line *line, u64 *lba_list,
408                               unsigned int nr_secs)
409 {
410         struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
411         int valid_secs = 0;
412         int i;
413
414         pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
415
416         for (i = 0; i < nr_secs; i++) {
417                 if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
418                                                 pblk_ppa_empty(ppas[i])) {
419                         lba_list[i] = ADDR_EMPTY;
420                         continue;
421                 }
422
423                 rqd->ppa_list[valid_secs++] = ppas[i];
424         }
425
426 #ifdef CONFIG_NVM_DEBUG
427         atomic_long_add(valid_secs, &pblk->inflight_reads);
428 #endif
429         return valid_secs;
430 }
431
432 static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
433                       struct pblk_line *line, sector_t lba)
434 {
435         struct ppa_addr ppa;
436         int valid_secs = 0;
437
438         if (lba == ADDR_EMPTY)
439                 goto out;
440
441         /* logic error: lba out-of-bounds */
442         if (lba >= pblk->rl.nr_secs) {
443                 WARN(1, "pblk: read lba out of bounds\n");
444                 goto out;
445         }
446
447         spin_lock(&pblk->trans_lock);
448         ppa = pblk_trans_map_get(pblk, lba);
449         spin_unlock(&pblk->trans_lock);
450
451         /* Ignore updated values until the moment */
452         if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
453                                                         pblk_ppa_empty(ppa))
454                 goto out;
455
456         rqd->ppa_addr = ppa;
457         valid_secs = 1;
458
459 #ifdef CONFIG_NVM_DEBUG
460         atomic_long_inc(&pblk->inflight_reads);
461 #endif
462
463 out:
464         return valid_secs;
465 }
466
467 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
468                         unsigned int nr_secs, unsigned int *secs_to_gc,
469                         struct pblk_line *line)
470 {
471         struct nvm_tgt_dev *dev = pblk->dev;
472         struct nvm_geo *geo = &dev->geo;
473         struct bio *bio;
474         struct nvm_rq rqd;
475         int ret, data_len;
476         DECLARE_COMPLETION_ONSTACK(wait);
477
478         memset(&rqd, 0, sizeof(struct nvm_rq));
479
480         rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
481                                                         &rqd.dma_meta_list);
482         if (!rqd.meta_list)
483                 return NVM_IO_ERR;
484
485         if (nr_secs > 1) {
486                 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
487                 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
488
489                 *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
490                                                                 nr_secs);
491                 if (*secs_to_gc == 1)
492                         rqd.ppa_addr = rqd.ppa_list[0];
493         } else {
494                 *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
495         }
496
497         if (!(*secs_to_gc))
498                 goto out;
499
500         data_len = (*secs_to_gc) * geo->sec_size;
501         bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
502                                                 PBLK_KMALLOC_META, GFP_KERNEL);
503         if (IS_ERR(bio)) {
504                 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
505                 goto err_free_dma;
506         }
507
508         bio->bi_iter.bi_sector = 0; /* internal bio */
509         bio_set_op_attrs(bio, REQ_OP_READ, 0);
510
511         rqd.opcode = NVM_OP_PREAD;
512         rqd.end_io = pblk_end_io_sync;
513         rqd.private = &wait;
514         rqd.nr_ppas = *secs_to_gc;
515         rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
516         rqd.bio = bio;
517
518         ret = pblk_submit_read_io(pblk, &rqd);
519         if (ret) {
520                 bio_endio(bio);
521                 pr_err("pblk: GC read request failed\n");
522                 goto err_free_dma;
523         }
524
525         if (!wait_for_completion_io_timeout(&wait,
526                                 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
527                 pr_err("pblk: GC read I/O timed out\n");
528         }
529         atomic_dec(&pblk->inflight_io);
530
531         if (rqd.error) {
532                 atomic_long_inc(&pblk->read_failed_gc);
533 #ifdef CONFIG_NVM_DEBUG
534                 pblk_print_failed_rqd(pblk, &rqd, rqd.error);
535 #endif
536         }
537
538 #ifdef CONFIG_NVM_DEBUG
539         atomic_long_add(*secs_to_gc, &pblk->sync_reads);
540         atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
541         atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
542 #endif
543
544 out:
545         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
546         return NVM_IO_OK;
547
548 err_free_dma:
549         nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
550         return NVM_IO_ERR;
551 }