lightnvm: add a bitmap of luns
authorWenwei Tao <ww.tao0320@gmail.com>
Thu, 3 Mar 2016 14:06:38 +0000 (15:06 +0100)
committerJens Axboe <axboe@fb.com>
Sat, 19 Mar 2016 01:10:38 +0000 (18:10 -0700)
Add a bitmap of luns to indicate the status
of luns: inuse/available. When create targets
do the necessary check to avoid allocating luns
that are already allocated.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
Freed dev->lun_map if nvm_core_init later failed in the init process.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/lightnvm/core.c
drivers/lightnvm/gennvm.c
drivers/lightnvm/rrpc.c
include/linux/lightnvm.h

index 2925fd0..0dc9a80 100644 (file)
@@ -464,6 +464,10 @@ static int nvm_core_init(struct nvm_dev *dev)
        dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
 
        dev->total_secs = dev->nr_luns * dev->sec_per_lun;
+       dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+                                       sizeof(unsigned long), GFP_KERNEL);
+       if (!dev->lun_map)
+               return -ENOMEM;
        INIT_LIST_HEAD(&dev->online_targets);
        mutex_init(&dev->mlock);
        spin_lock_init(&dev->lock);
@@ -586,6 +590,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
 
        return 0;
 err_init:
+       kfree(dev->lun_map);
        kfree(dev);
        return ret;
 }
@@ -608,6 +613,7 @@ void nvm_unregister(char *disk_name)
        up_write(&nvm_lock);
 
        nvm_exit(dev);
+       kfree(dev->lun_map);
        kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
index d460b37..b97801c 100644 (file)
@@ -192,6 +192,9 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
                lun_id = div_u64(pba, dev->sec_per_lun);
                lun = &gn->luns[lun_id];
 
+               if (!test_bit(lun_id, dev->lun_map))
+                       __set_bit(lun_id, dev->lun_map);
+
                /* Calculate block offset into lun */
                pba = pba - (dev->sec_per_lun * lun_id);
                blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
@@ -482,10 +485,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
        return nvm_erase_ppa(dev, &addr, 1);
 }
 
+static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+{
+       return test_and_set_bit(lunid, dev->lun_map);
+}
+
+static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+{
+       WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+}
+
 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
 {
        struct gen_nvm *gn = dev->mp;
 
+       if (unlikely(lunid >= dev->nr_luns))
+               return NULL;
+
        return &gn->luns[lunid].vlun;
 }
 
@@ -527,6 +543,8 @@ static struct nvmm_type gennvm = {
        .erase_blk              = gennvm_erase_blk,
 
        .get_lun                = gennvm_get_lun,
+       .reserve_lun            = gennvm_reserve_lun,
+       .release_lun            = gennvm_release_lun,
        .lun_info_print         = gennvm_lun_info_print,
 
        .get_area               = gennvm_get_area,
index c1e3c83..3ab6495 100644 (file)
@@ -965,25 +965,11 @@ static void rrpc_requeue(struct work_struct *work)
 
 static void rrpc_gc_free(struct rrpc *rrpc)
 {
-       struct rrpc_lun *rlun;
-       int i;
-
        if (rrpc->krqd_wq)
                destroy_workqueue(rrpc->krqd_wq);
 
        if (rrpc->kgc_wq)
                destroy_workqueue(rrpc->kgc_wq);
-
-       if (!rrpc->luns)
-               return;
-
-       for (i = 0; i < rrpc->nr_luns; i++) {
-               rlun = &rrpc->luns[i];
-
-               if (!rlun->blocks)
-                       break;
-               vfree(rlun->blocks);
-       }
 }
 
 static int rrpc_gc_init(struct rrpc *rrpc)
@@ -1143,6 +1129,23 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
+       struct nvm_dev *dev = rrpc->dev;
+       struct nvm_lun *lun;
+       struct rrpc_lun *rlun;
+       int i;
+
+       if (!rrpc->luns)
+               return;
+
+       for (i = 0; i < rrpc->nr_luns; i++) {
+               rlun = &rrpc->luns[i];
+               lun = rlun->parent;
+               if (!lun)
+                       break;
+               dev->mt->release_lun(dev, lun->id);
+               vfree(rlun->blocks);
+       }
+
        kfree(rrpc->luns);
 }
 
@@ -1150,7 +1153,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 {
        struct nvm_dev *dev = rrpc->dev;
        struct rrpc_lun *rlun;
-       int i, j;
+       int i, j, ret = -EINVAL;
 
        if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
                pr_err("rrpc: number of pages per block too high.");
@@ -1166,25 +1169,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
        /* 1:1 mapping */
        for (i = 0; i < rrpc->nr_luns; i++) {
-               struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
-
-               rlun = &rrpc->luns[i];
-               rlun->rrpc = rrpc;
-               rlun->parent = lun;
-               INIT_LIST_HEAD(&rlun->prio_list);
-               INIT_LIST_HEAD(&rlun->open_list);
-               INIT_LIST_HEAD(&rlun->closed_list);
+               int lunid = lun_begin + i;
+               struct nvm_lun *lun;
 
-               INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
-               spin_lock_init(&rlun->lock);
+               if (dev->mt->reserve_lun(dev, lunid)) {
+                       pr_err("rrpc: lun %u is already allocated\n", lunid);
+                       goto err;
+               }
 
-               rrpc->total_blocks += dev->blks_per_lun;
-               rrpc->nr_sects += dev->sec_per_lun;
+               lun = dev->mt->get_lun(dev, lunid);
+               if (!lun)
+                       goto err;
 
+               rlun = &rrpc->luns[i];
+               rlun->parent = lun;
                rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
                                                rrpc->dev->blks_per_lun);
-               if (!rlun->blocks)
+               if (!rlun->blocks) {
+                       ret = -ENOMEM;
                        goto err;
+               }
 
                for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
                        struct rrpc_block *rblk = &rlun->blocks[j];
@@ -1195,11 +1199,23 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
                        INIT_LIST_HEAD(&rblk->prio);
                        spin_lock_init(&rblk->lock);
                }
+
+               rlun->rrpc = rrpc;
+               INIT_LIST_HEAD(&rlun->prio_list);
+               INIT_LIST_HEAD(&rlun->open_list);
+               INIT_LIST_HEAD(&rlun->closed_list);
+
+               INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+               spin_lock_init(&rlun->lock);
+
+               rrpc->total_blocks += dev->blks_per_lun;
+               rrpc->nr_sects += dev->sec_per_lun;
+
        }
 
        return 0;
 err:
-       return -ENOMEM;
+       return ret;
 }
 
 /* returns 0 on success and stores the beginning address in *begin */
index b466bd9..0ee2c2c 100644 (file)
@@ -346,6 +346,7 @@ struct nvm_dev {
        int nr_luns;
        unsigned max_pages_per_blk;
 
+       unsigned long *lun_map;
        void *ppalist_pool;
 
        struct nvm_id identity;
@@ -466,6 +467,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
                                                                unsigned long);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
+typedef void (nvmm_release_lun)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -492,6 +495,8 @@ struct nvmm_type {
 
        /* Configuration management */
        nvmm_get_lun_fn *get_lun;
+       nvmm_reserve_lun *reserve_lun;
+       nvmm_release_lun *release_lun;
 
        /* Statistics */
        nvmm_lun_info_print_fn *lun_info_print;