diff options
Diffstat (limited to 'fs/super.c')
-rw-r--r-- | fs/super.c | 102 |
1 files changed, 62 insertions, 40 deletions
diff --git a/fs/super.c b/fs/super.c index 2d762ce67f..076392396e 100644 --- a/fs/super.c +++ b/fs/super.c @@ -178,7 +178,7 @@ static void super_wake(struct super_block *sb, unsigned int flag) * One thing we have to be careful of with a per-sb shrinker is that we don't * drop the last active reference to the superblock from within the shrinker. * If that happens we could trigger unregistering the shrinker from within the - * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we + * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we * take a passive reference to the superblock to avoid this from occurring. */ static unsigned long super_cache_scan(struct shrinker *shrink, @@ -191,7 +191,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, long dentries; long inodes; - sb = container_of(shrink, struct super_block, s_shrink); + sb = shrink->private_data; /* * Deadlock avoidance. We may hold various FS locks, and we don't want @@ -244,7 +244,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, struct super_block *sb; long total_objects = 0; - sb = container_of(shrink, struct super_block, s_shrink); + sb = shrink->private_data; /* * We don't call super_trylock_shared() here as it is a scalability @@ -306,7 +306,7 @@ static void destroy_unused_super(struct super_block *s) security_sb_free(s); put_user_ns(s->s_user_ns); kfree(s->s_subtype); - free_prealloced_shrinker(&s->s_shrink); + shrinker_free(s->s_shrink); /* no delays needed */ destroy_super_work(&s->destroy_work); } @@ -383,16 +383,19 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, s->s_time_min = TIME64_MIN; s->s_time_max = TIME64_MAX; - s->s_shrink.seeks = DEFAULT_SEEKS; - s->s_shrink.scan_objects = super_cache_scan; - s->s_shrink.count_objects = super_cache_count; - s->s_shrink.batch = 1024; - s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; - if (prealloc_shrinker(&s->s_shrink, "sb-%s", type->name)) + s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, + "sb-%s", type->name); + if (!s->s_shrink) goto fail; - if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink)) + + s->s_shrink->scan_objects = super_cache_scan; + s->s_shrink->count_objects = super_cache_count; + s->s_shrink->batch = 1024; + s->s_shrink->private_data = s; + + if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink)) goto fail; - if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink)) + if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink)) goto fail; return s; @@ -477,7 +480,7 @@ void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { - unregister_shrinker(&s->s_shrink); + shrinker_free(s->s_shrink); fs->kill_sb(s); kill_super_notify(s); @@ -818,7 +821,7 @@ retry: hlist_add_head(&s->s_instances, &s->s_type->fs_supers); spin_unlock(&sb_lock); get_filesystem(s->s_type); - register_shrinker_prepared(&s->s_shrink); + shrinker_register(s->s_shrink); return s; share_extant_sb: @@ -901,7 +904,7 @@ retry: hlist_add_head(&s->s_instances, &type->fs_supers); spin_unlock(&sb_lock); get_filesystem(type); - register_shrinker_prepared(&s->s_shrink); + shrinker_register(s->s_shrink); return s; } EXPORT_SYMBOL(sget); @@ -1419,32 +1422,48 @@ EXPORT_SYMBOL(sget_dev); #ifdef CONFIG_BLOCK /* - * Lock a super block that the callers holds a reference to. + * Lock the superblock that is holder of the bdev. Returns the superblock + * pointer if we successfully locked the superblock and it is alive. Otherwise + * we return NULL and just unlock bdev->bd_holder_lock. * - * The caller needs to ensure that the super_block isn't being freed while - * calling this function, e.g. by holding a lock over the call to this function - * and the place that clears the pointer to the superblock used by this function - * before freeing the superblock. + * The function must be called with bdev->bd_holder_lock and releases it. */ -static bool super_lock_shared_active(struct super_block *sb) +static struct super_block *bdev_super_lock_shared(struct block_device *bdev) + __releases(&bdev->bd_holder_lock) { - bool born = super_lock_shared(sb); + struct super_block *sb = bdev->bd_holder; + bool born; + + lockdep_assert_held(&bdev->bd_holder_lock); + lockdep_assert_not_held(&sb->s_umount); + lockdep_assert_not_held(&bdev->bd_disk->open_mutex); + /* Make sure sb doesn't go away from under us */ + spin_lock(&sb_lock); + sb->s_count++; + spin_unlock(&sb_lock); + mutex_unlock(&bdev->bd_holder_lock); + + born = super_lock_shared(sb); if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) { super_unlock_shared(sb); - return false; + put_super(sb); + return NULL; } - return true; + /* + * The superblock is active and we hold s_umount, we can drop our + * temporary reference now. + */ + put_super(sb); + return sb; } static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise) { - struct super_block *sb = bdev->bd_holder; - - /* bd_holder_lock ensures that the sb isn't freed */ - lockdep_assert_held(&bdev->bd_holder_lock); + struct super_block *sb; - if (!super_lock_shared_active(sb)) + sb = bdev_super_lock_shared(bdev); + if (!sb) return; if (!surprise) @@ -1459,11 +1478,10 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise) static void fs_bdev_sync(struct block_device *bdev) { - struct super_block *sb = bdev->bd_holder; - - lockdep_assert_held(&bdev->bd_holder_lock); + struct super_block *sb; - if (!super_lock_shared_active(sb)) + sb = bdev_super_lock_shared(bdev); + if (!sb) return; sync_filesystem(sb); super_unlock_shared(sb); @@ -1479,14 +1497,16 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, struct fs_context *fc) { blk_mode_t mode = sb_open_mode(sb_flags); + struct bdev_handle *bdev_handle; struct block_device *bdev; - bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); - if (IS_ERR(bdev)) { + bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); + if (IS_ERR(bdev_handle)) { if (fc) errorf(fc, "%s: Can't open blockdev", fc->source); - return PTR_ERR(bdev); + return PTR_ERR(bdev_handle); } + bdev = bdev_handle->bdev; /* * This really should be in blkdev_get_by_dev, but right now can't due @@ -1494,7 +1514,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, * writable from userspace even for a read-only block device. */ if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { - blkdev_put(bdev, sb); + bdev_release(bdev_handle); return -EACCES; } @@ -1510,10 +1530,11 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, mutex_unlock(&bdev->bd_fsfreeze_mutex); if (fc) warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); - blkdev_put(bdev, sb); + bdev_release(bdev_handle); return -EBUSY; } spin_lock(&sb_lock); + sb->s_bdev_handle = bdev_handle; sb->s_bdev = bdev; sb->s_bdi = bdi_get(bdev->bd_disk->bdi); if (bdev_stable_writes(bdev)) @@ -1522,7 +1543,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, mutex_unlock(&bdev->bd_fsfreeze_mutex); snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); - shrinker_debugfs_rename(&sb->s_shrink, "sb-%s:%s", sb->s_type->name, + shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name, sb->s_id); sb_set_blocksize(sb, block_size(bdev)); return 0; @@ -1646,7 +1667,7 @@ void kill_block_super(struct super_block *sb) generic_shutdown_super(sb); if (bdev) { sync_blockdev(bdev); - blkdev_put(bdev, sb); + bdev_release(sb->s_bdev_handle); } } @@ -2139,3 +2160,4 @@ int sb_init_dio_done_wq(struct super_block *sb) destroy_workqueue(wq); return 0; } +EXPORT_SYMBOL_GPL(sb_init_dio_done_wq); |