diff options
Diffstat (limited to 'drivers/md/bcache/super.c')
-rw-r--r-- | drivers/md/bcache/super.c | 136 |
1 files changed, 68 insertions, 68 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 1e677af385..1402096b80 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, submit_bio(bio); } -static void bch_write_bdev_super_unlock(struct closure *cl) +static CLOSURE_CALLBACK(bch_write_bdev_super_unlock) { - struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); + closure_type(dc, struct cached_dev, sb_write); up(&dc->sb_write_mutex); } @@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio) closure_put(&ca->set->sb_write); } -static void bcache_write_super_unlock(struct closure *cl) +static CLOSURE_CALLBACK(bcache_write_super_unlock) { - struct cache_set *c = container_of(cl, struct cache_set, sb_write); + closure_type(c, struct cache_set, sb_write); up(&c->sb_write_mutex); } @@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio) closure_put(cl); } -static void uuid_io_unlock(struct closure *cl) +static CLOSURE_CALLBACK(uuid_io_unlock) { - struct cache_set *c = container_of(cl, struct cache_set, uuid_write); + closure_type(c, struct cache_set, uuid_write); up(&c->uuid_write_mutex); } @@ -1344,9 +1344,9 @@ void bch_cached_dev_release(struct kobject *kobj) module_put(THIS_MODULE); } -static void cached_dev_free(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_free) { - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); + closure_type(dc, struct cached_dev, disk.cl); if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) cancel_writeback_rate_update_dwork(dc); @@ -1370,17 +1370,17 @@ static void cached_dev_free(struct closure *cl) if (dc->sb_disk) put_page(virt_to_page(dc->sb_disk)); - if (!IS_ERR_OR_NULL(dc->bdev)) - blkdev_put(dc->bdev, dc); + if (dc->bdev_handle) + bdev_release(dc->bdev_handle); wake_up(&unregister_wait); kobject_put(&dc->disk.kobj); } -static void cached_dev_flush(struct closure *cl) +static CLOSURE_CALLBACK(cached_dev_flush) { - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); + closure_type(dc, struct cached_dev, disk.cl); struct bcache_device *d = &dc->disk; mutex_lock(&bch_register_lock); @@ -1446,7 +1446,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) /* Cached device - bcache superblock */ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, - struct block_device *bdev, + struct bdev_handle *bdev_handle, struct cached_dev *dc) { const char *err = "cannot allocate memory"; @@ -1454,14 +1454,15 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk, int ret = -ENOMEM; memcpy(&dc->sb, sb, sizeof(struct cache_sb)); - dc->bdev = bdev; + dc->bdev_handle = bdev_handle; + dc->bdev = bdev_handle->bdev; dc->sb_disk = sb_disk; if (cached_dev_init(dc, sb->block_size << 9)) goto err; err = "error creating kobject"; - if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache")) + if (kobject_add(&dc->disk.kobj, bdev_kobj(dc->bdev), "bcache")) goto err; if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) goto err; @@ -1498,9 +1499,9 @@ void bch_flash_dev_release(struct kobject *kobj) kfree(d); } -static void flash_dev_free(struct closure *cl) +static CLOSURE_CALLBACK(flash_dev_free) { - struct bcache_device *d = container_of(cl, struct bcache_device, cl); + closure_type(d, struct bcache_device, cl); mutex_lock(&bch_register_lock); atomic_long_sub(bcache_dev_sectors_dirty(d), @@ -1511,9 +1512,9 @@ static void flash_dev_free(struct closure *cl) kobject_put(&d->kobj); } -static void flash_dev_flush(struct closure *cl) +static CLOSURE_CALLBACK(flash_dev_flush) { - struct bcache_device *d = container_of(cl, struct bcache_device, cl); + closure_type(d, struct bcache_device, cl); mutex_lock(&bch_register_lock); bcache_device_unlink(d); @@ -1669,9 +1670,9 @@ void bch_cache_set_release(struct kobject *kobj) module_put(THIS_MODULE); } -static void cache_set_free(struct closure *cl) +static CLOSURE_CALLBACK(cache_set_free) { - struct cache_set *c = container_of(cl, struct cache_set, cl); + closure_type(c, struct cache_set, cl); struct cache *ca; debugfs_remove(c->debug); @@ -1710,9 +1711,9 @@ static void cache_set_free(struct closure *cl) kobject_put(&c->kobj); } -static void cache_set_flush(struct closure *cl) +static CLOSURE_CALLBACK(cache_set_flush) { - struct cache_set *c = container_of(cl, struct cache_set, caching); + closure_type(c, struct cache_set, caching); struct cache *ca = c->cache; struct btree *b; @@ -1807,9 +1808,9 @@ static void conditional_stop_bcache_device(struct cache_set *c, } } -static void __cache_set_unregister(struct closure *cl) +static CLOSURE_CALLBACK(__cache_set_unregister) { - struct cache_set *c = container_of(cl, struct cache_set, caching); + closure_type(c, struct cache_set, caching); struct cached_dev *dc; struct bcache_device *d; size_t i; @@ -2218,8 +2219,8 @@ void bch_cache_release(struct kobject *kobj) if (ca->sb_disk) put_page(virt_to_page(ca->sb_disk)); - if (!IS_ERR_OR_NULL(ca->bdev)) - blkdev_put(ca->bdev, ca); + if (ca->bdev_handle) + bdev_release(ca->bdev_handle); kfree(ca); module_put(THIS_MODULE); @@ -2339,38 +2340,42 @@ err_free: } static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, - struct block_device *bdev, struct cache *ca) + struct bdev_handle *bdev_handle, + struct cache *ca) { const char *err = NULL; /* must be set for any error case */ int ret = 0; memcpy(&ca->sb, sb, sizeof(struct cache_sb)); - ca->bdev = bdev; + ca->bdev_handle = bdev_handle; + ca->bdev = bdev_handle->bdev; ca->sb_disk = sb_disk; - if (bdev_max_discard_sectors((bdev))) + if (bdev_max_discard_sectors((bdev_handle->bdev))) ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(ca); if (ret != 0) { - /* - * If we failed here, it means ca->kobj is not initialized yet, - * kobject_put() won't be called and there is no chance to - * call blkdev_put() to bdev in bch_cache_release(). So we - * explicitly call blkdev_put() here. - */ - blkdev_put(bdev, ca); if (ret == -ENOMEM) err = "cache_alloc(): -ENOMEM"; else if (ret == -EPERM) err = "cache_alloc(): cache device is too small"; else err = "cache_alloc(): unknown error"; - goto err; + pr_notice("error %pg: %s\n", bdev_handle->bdev, err); + /* + * If we failed here, it means ca->kobj is not initialized yet, + * kobject_put() won't be called and there is no chance to + * call bdev_release() to bdev in bch_cache_release(). So + * we explicitly call bdev_release() here. + */ + bdev_release(bdev_handle); + return ret; } - if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) { - err = "error calling kobject_add"; + if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) { + pr_notice("error %pg: error calling kobject_add\n", + bdev_handle->bdev); ret = -ENOMEM; goto out; } @@ -2384,15 +2389,10 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, goto out; } - pr_info("registered cache device %pg\n", ca->bdev); + pr_info("registered cache device %pg\n", ca->bdev_handle->bdev); out: kobject_put(&ca->kobj); - -err: - if (err) - pr_notice("error %pg: %s\n", ca->bdev, err); - return ret; } @@ -2447,7 +2447,7 @@ struct async_reg_args { char *path; struct cache_sb *sb; struct cache_sb_disk *sb_disk; - struct block_device *bdev; + struct bdev_handle *bdev_handle; void *holder; }; @@ -2458,8 +2458,8 @@ static void register_bdev_worker(struct work_struct *work) container_of(work, struct async_reg_args, reg_work.work); mutex_lock(&bch_register_lock); - if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder) - < 0) + if (register_bdev(args->sb, args->sb_disk, args->bdev_handle, + args->holder) < 0) fail = true; mutex_unlock(&bch_register_lock); @@ -2479,7 +2479,8 @@ static void register_cache_worker(struct work_struct *work) container_of(work, struct async_reg_args, reg_work.work); /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder)) + if (register_cache(args->sb, args->sb_disk, args->bdev_handle, + args->holder)) fail = true; if (fail) @@ -2516,7 +2517,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, char *path = NULL; struct cache_sb *sb; struct cache_sb_disk *sb_disk; - struct block_device *bdev, *bdev2; + struct bdev_handle *bdev_handle, *bdev_handle2; void *holder = NULL; ssize_t ret; bool async_registration = false; @@ -2549,15 +2550,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ret = -EINVAL; err = "failed to open device"; - bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL); - if (IS_ERR(bdev)) + bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL); + if (IS_ERR(bdev_handle)) goto out_free_sb; err = "failed to set blocksize"; - if (set_blocksize(bdev, 4096)) + if (set_blocksize(bdev_handle->bdev, 4096)) goto out_blkdev_put; - err = read_super(sb, bdev, &sb_disk); + err = read_super(sb, bdev_handle->bdev, &sb_disk); if (err) goto out_blkdev_put; @@ -2569,13 +2570,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, } /* Now reopen in exclusive mode with proper holder */ - bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE, - holder, NULL); - blkdev_put(bdev, NULL); - bdev = bdev2; - if (IS_ERR(bdev)) { - ret = PTR_ERR(bdev); - bdev = NULL; + bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev, + BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL); + bdev_release(bdev_handle); + bdev_handle = bdev_handle2; + if (IS_ERR(bdev_handle)) { + ret = PTR_ERR(bdev_handle); + bdev_handle = NULL; if (ret == -EBUSY) { dev_t dev; @@ -2610,7 +2611,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, args->path = path; args->sb = sb; args->sb_disk = sb_disk; - args->bdev = bdev; + args->bdev_handle = bdev_handle; args->holder = holder; register_device_async(args); /* No wait and returns to user space */ @@ -2619,14 +2620,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (SB_IS_BDEV(sb)) { mutex_lock(&bch_register_lock); - ret = register_bdev(sb, sb_disk, bdev, holder); + ret = register_bdev(sb, sb_disk, bdev_handle, holder); mutex_unlock(&bch_register_lock); /* blkdev_put() will be called in cached_dev_free() */ if (ret < 0) goto out_free_sb; } else { /* blkdev_put() will be called in bch_cache_release() */ - ret = register_cache(sb, sb_disk, bdev, holder); + ret = register_cache(sb, sb_disk, bdev_handle, holder); if (ret) goto out_free_sb; } @@ -2642,8 +2643,8 @@ out_free_holder: out_put_sb_page: put_page(virt_to_page(sb_disk)); out_blkdev_put: - if (bdev) - blkdev_put(bdev, holder); + if (bdev_handle) + bdev_release(bdev_handle); out_free_sb: kfree(sb); out_free_path: @@ -2907,7 +2908,6 @@ static int __init bcache_init(void) goto err; bch_debug_init(); - closure_debug_init(); bcache_is_reboot = false; |