[ Upstream commit
50246693f81fe887f4db78bf7089051d7f1894cc ]
Split the successful and error return path, and use one goto label for each
resource to unwind. This also fixes some small errors like leaking the
module reference count in the reboot case (which seems entirely harmless)
or printing the wrong warning messages for early failures.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = -EINVAL;
- const char *err = "cannot allocate memory";
- char *path = NULL;
- struct cache_sb *sb = NULL;
+ const char *err;
+ char *path;
+ struct cache_sb *sb;
struct block_device *bdev = NULL;
struct block_device *bdev = NULL;
- struct page *sb_page = NULL;
+ struct page *sb_page;
+ ssize_t ret;
if (!try_module_get(THIS_MODULE))
if (!try_module_get(THIS_MODULE))
/* For latest state of bcache_is_reboot */
smp_mb();
if (bcache_is_reboot)
/* For latest state of bcache_is_reboot */
smp_mb();
if (bcache_is_reboot)
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
err = read_super(sb, bdev, &sb_page);
if (err)
err = read_super(sb, bdev, &sb_page);
if (err)
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_page, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_page, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ bdev = NULL;
+ goto out_put_sb_page;
+ }
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
/* blkdev_put() will be called in bch_cache_release() */
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err;
+ if (register_cache(sb, sb_page, bdev, ca) != 0) {
+ bdev = NULL;
+ goto out_put_sb_page;
+ }
-quiet_out:
- ret = size;
-out:
- if (sb_page)
- put_page(sb_page);
+
+ put_page(sb_page);
+done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
- return ret;
-
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
+ return size;
+
+out_put_sb_page:
+ put_page(sb_page);
+out_blkdev_put:
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+out_module_put:
+ module_put(THIS_MODULE);
+out:
pr_info("error %s: %s", path, err);
pr_info("error %s: %s", path, err);