lightnvm: pblk: control I/O flow also on tear down
authorJavier González <jg@lightnvm.io>
Fri, 7 Jul 2017 19:08:52 +0000 (21:08 +0200)
committerJens Axboe <axboe@kernel.dk>
Fri, 7 Jul 2017 19:17:34 +0000 (13:17 -0600)
When removing a pblk instance, control the write I/O flow to the
controller as we do in the fast path.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/lightnvm/pblk-core.c
drivers/lightnvm/pblk-recovery.c
drivers/lightnvm/pblk-write.c
drivers/lightnvm/pblk.h

index 11fe0c5..8150164 100644 (file)
@@ -1670,13 +1670,10 @@ void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
        queue_work(wq, &line_ws->ws);
 }
 
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
-                 unsigned long *lun_bitmap)
+static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
+                            int nr_ppas, int pos)
 {
-       struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
-       struct pblk_lun *rlun;
-       int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+       struct pblk_lun *rlun = &pblk->luns[pos];
        int ret;
 
        /*
@@ -1690,14 +1687,8 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
                WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
                                ppa_list[0].g.ch != ppa_list[i].g.ch);
 #endif
-       /* If the LUN has been locked for this same request, do no attempt to
-        * lock it again
-        */
-       if (test_and_set_bit(pos, lun_bitmap))
-               return;
 
-       rlun = &pblk->luns[pos];
-       ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
+       ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
        if (ret) {
                switch (ret) {
                case -ETIME:
@@ -1710,6 +1701,50 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
        }
 }
 
+void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+{
+       struct nvm_tgt_dev *dev = pblk->dev;
+       struct nvm_geo *geo = &dev->geo;
+       int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+
+       __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+}
+
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+                 unsigned long *lun_bitmap)
+{
+       struct nvm_tgt_dev *dev = pblk->dev;
+       struct nvm_geo *geo = &dev->geo;
+       int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+
+       /* If the LUN has been locked for this same request, do no attempt to
+        * lock it again
+        */
+       if (test_and_set_bit(pos, lun_bitmap))
+               return;
+
+       __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+}
+
+void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+{
+       struct nvm_tgt_dev *dev = pblk->dev;
+       struct nvm_geo *geo = &dev->geo;
+       struct pblk_lun *rlun;
+       int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+
+#ifdef CONFIG_NVM_DEBUG
+       int i;
+
+       for (i = 1; i < nr_ppas; i++)
+               WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
+                               ppa_list[0].g.ch != ppa_list[i].g.ch);
+#endif
+
+       rlun = &pblk->luns[pos];
+       up(&rlun->wr_sem);
+}
+
 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
                unsigned long *lun_bitmap)
 {
index 0e48d3e..cb556e0 100644 (file)
@@ -340,9 +340,14 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
        struct pblk *pblk = pad_rq->pblk;
        struct nvm_tgt_dev *dev = pblk->dev;
 
-       kref_put(&pad_rq->ref, pblk_recov_complete);
+       pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+
+       bio_put(rqd->bio);
        nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
        pblk_free_rqd(pblk, rqd, WRITE);
+
+       atomic_dec(&pblk->inflight_io);
+       kref_put(&pad_rq->ref, pblk_recov_complete);
 }
 
 static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
@@ -385,7 +390,7 @@ next_pad_rq:
        rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
        if (rq_ppas < pblk->min_write_pgs) {
                pr_err("pblk: corrupted pad line %d\n", line->id);
-               goto free_rq;
+               goto fail_free_pad;
        }
 
        rq_len = rq_ppas * geo->sec_size;
@@ -393,7 +398,7 @@ next_pad_rq:
        meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
        if (!meta_list) {
                ret = -ENOMEM;
-               goto free_data;
+               goto fail_free_pad;
        }
 
        ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
@@ -404,9 +409,9 @@ next_pad_rq:
                ret = PTR_ERR(rqd);
                goto fail_free_meta;
        }
-       memset(rqd, 0, pblk_w_rq_size);
 
-       bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
+       bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
+                                               PBLK_VMALLOC_META, GFP_KERNEL);
        if (IS_ERR(bio)) {
                ret = PTR_ERR(bio);
                goto fail_free_rqd;
@@ -453,15 +458,15 @@ next_pad_rq:
        }
 
        kref_get(&pad_rq->ref);
+       pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
 
        ret = pblk_submit_io(pblk, rqd);
        if (ret) {
                pr_err("pblk: I/O submission failed: %d\n", ret);
-               goto free_data;
+               pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+               goto fail_free_bio;
        }
 
-       atomic_dec(&pblk->inflight_io);
-
        left_line_ppas -= rq_ppas;
        left_ppas -= rq_ppas;
        if (left_ppas && left_line_ppas)
@@ -475,17 +480,23 @@ next_pad_rq:
                ret = -ETIME;
        }
 
+       if (!pblk_line_is_full(line))
+               pr_err("pblk: corrupted padded line: %d\n", line->id);
+
+       vfree(data);
 free_rq:
        kfree(pad_rq);
-free_data:
-       vfree(data);
        return ret;
 
+fail_free_bio:
+       bio_put(bio);
 fail_free_rqd:
        pblk_free_rqd(pblk, rqd, WRITE);
 fail_free_meta:
        nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
+fail_free_pad:
        kfree(pad_rq);
+       vfree(data);
        return ret;
 }
 
index d62a8f4..cc2b941 100644 (file)
@@ -178,15 +178,12 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
 {
        struct pblk *pblk = rqd->private;
        struct nvm_tgt_dev *dev = pblk->dev;
-       struct nvm_geo *geo = &dev->geo;
        struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
        struct pblk_line *line = m_ctx->private;
        struct pblk_emeta *emeta = line->emeta;
-       int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
-       struct pblk_lun *rlun = &pblk->luns[pos];
        int sync;
 
-       up(&rlun->wr_sem);
+       pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
 
        if (rqd->error) {
                pblk_log_write_err(pblk, rqd);
@@ -203,6 +200,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
                                                                pblk->close_wq);
 
        bio_put(rqd->bio);
+       nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
        pblk_free_rqd(pblk, rqd, READ);
 
        atomic_dec(&pblk->inflight_io);
@@ -367,7 +365,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
        struct pblk_line_meta *lm = &pblk->lm;
        struct pblk_emeta *emeta = meta_line->emeta;
        struct pblk_g_ctx *m_ctx;
-       struct pblk_lun *rlun;
        struct bio *bio;
        struct nvm_rq *rqd;
        void *data;
@@ -411,13 +408,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
                        rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
        }
 
-       rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
-       ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
-       if (ret) {
-               pr_err("pblk: lun semaphore timed out (%d)\n", ret);
-               goto fail_free_bio;
-       }
-
        emeta->mem += rq_len;
        if (emeta->mem >= lm->emeta_len[0]) {
                spin_lock(&l_mg->close_lock);
@@ -427,6 +417,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
                spin_unlock(&l_mg->close_lock);
        }
 
+       pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+
        ret = pblk_submit_io(pblk, rqd);
        if (ret) {
                pr_err("pblk: emeta I/O submission failed: %d\n", ret);
@@ -436,10 +428,13 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
        return NVM_IO_OK;
 
 fail_rollback:
+       pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
        spin_lock(&l_mg->close_lock);
        pblk_dealloc_page(pblk, meta_line, rq_ppas);
        list_add(&meta_line->list, &meta_line->list);
        spin_unlock(&l_mg->close_lock);
+
+       nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
 fail_free_bio:
        if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
                bio_put(bio);
index 1593138..0c5692c 100644 (file)
@@ -739,8 +739,10 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
                   unsigned long secs_to_flush);
+void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
                  unsigned long *lun_bitmap);
+void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
                unsigned long *lun_bitmap);
 void pblk_end_bio_sync(struct bio *bio);