summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-03 05:08:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-03 05:08:49 +0000
commit76047c0312414915035f4ebbaa533fe3817a21e0 (patch)
tree86f79ba3c1f753b1d39be793455035794fade031 /fs
parentAdding upstream version 6.8.11. (diff)
downloadlinux-76047c0312414915035f4ebbaa533fe3817a21e0.tar.xz
linux-76047c0312414915035f4ebbaa533fe3817a21e0.zip
Adding upstream version 6.8.12.upstream/6.8.12
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ioctl.c33
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/dlm/ast.c14
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/user.c15
-rw-r--r--fs/ecryptfs/keystore.c4
-rw-r--r--fs/eventpoll.c38
-rw-r--r--fs/exec.c11
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/mballoc.c1
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/gfs2/glock.c91
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/glops.c3
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/lock_dlm.c32
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/super.c3
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/jffs2/xattr.c3
-rw-r--r--fs/libfs.c147
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/segment.c63
-rw-r--r--fs/ntfs3/dir.c1
-rw-r--r--fs/ntfs3/index.c6
-rw-r--r--fs/ntfs3/inode.c7
-rw-r--r--fs/ntfs3/record.c11
-rw-r--r--fs/ntfs3/super.c2
-rw-r--r--fs/openpromfs/inode.c8
-rw-r--r--fs/smb/server/mgmt/share_config.c6
-rw-r--r--fs/smb/server/oplock.c21
32 files changed, 405 insertions, 152 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 6b93fae744..8851ba7a1e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3722,15 +3722,43 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
goto drop_write;
}
- down_write(&fs_info->subvol_sem);
-
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_enable(fs_info, sa);
+ up_write(&fs_info->subvol_sem);
break;
case BTRFS_QUOTA_CTL_DISABLE:
+ /*
+ * Lock the cleaner mutex to prevent races with concurrent
+ * relocation, because relocation may be building backrefs for
+ * blocks of the quota root while we are deleting the root. This
+ * is like dropping fs roots of deleted snapshots/subvolumes, we
+ * need the same protection.
+ *
+ * This also prevents races between concurrent tasks trying to
+ * disable quotas, because we will unlock and relock
+ * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ *
+ * We take this here because we have the dependency of
+ *
+ * inode_lock -> subvol_sem
+ *
+ * because of rename. With relocation we can prealloc extents,
+ * so that makes the dependency chain
+ *
+ * cleaner_mutex -> inode_lock -> subvol_sem
+ *
+ * so we must take the cleaner_mutex here before we take the
+ * subvol_sem. The deadlock can't actually happen, but this
+ * quiets lockdep.
+ */
+ mutex_lock(&fs_info->cleaner_mutex);
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_disable(fs_info);
+ up_write(&fs_info->subvol_sem);
+ mutex_unlock(&fs_info->cleaner_mutex);
break;
default:
ret = -EINVAL;
@@ -3738,7 +3766,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
}
kfree(sa);
- up_write(&fs_info->subvol_sem);
drop_write:
mnt_drop_write_file(file);
return ret;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 82d4559eb4..cacc12d0ff 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1342,16 +1342,10 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
lockdep_assert_held_write(&fs_info->subvol_sem);
/*
- * Lock the cleaner mutex to prevent races with concurrent relocation,
- * because relocation may be building backrefs for blocks of the quota
- * root while we are deleting the root. This is like dropping fs roots
- * of deleted snapshots/subvolumes, we need the same protection.
- *
- * This also prevents races between concurrent tasks trying to disable
- * quotas, because we will unlock and relock qgroup_ioctl_lock across
- * BTRFS_FS_QUOTA_ENABLED changes.
+ * Relocation will mess with backrefs, so make sure we have the
+ * cleaner_mutex held to protect us from relocate.
*/
- mutex_lock(&fs_info->cleaner_mutex);
+ lockdep_assert_held(&fs_info->cleaner_mutex);
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
@@ -1373,9 +1367,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
+ /*
+ * We have nothing held here and no trans handle, just return the error
+ * if there is one.
+ */
ret = flush_reservations(fs_info);
if (ret)
- goto out_unlock_cleaner;
+ return ret;
/*
* 1 For the root item
@@ -1439,9 +1437,6 @@ out:
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_commit_transaction(trans);
-out_unlock_cleaner:
- mutex_unlock(&fs_info->cleaner_mutex);
-
return ret;
}
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 1f2f70a1b8..decedc4ee1 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -12,6 +12,7 @@
#include <trace/events/dlm.h>
#include "dlm_internal.h"
+#include "lvb_table.h"
#include "memory.h"
#include "lock.h"
#include "user.h"
@@ -42,6 +43,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
struct dlm_callback *cb;
+ int copy_lvb = 0;
int prev_mode;
if (flags & DLM_CB_BAST) {
@@ -73,6 +75,17 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
goto out;
}
}
+ } else if (flags & DLM_CB_CAST) {
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+ if (lkb->lkb_last_cast)
+ prev_mode = lkb->lkb_last_cb->mode;
+ else
+ prev_mode = -1;
+
+ if (!status && lkb->lkb_lksb->sb_lvbptr &&
+ dlm_lvb_operations[prev_mode + 1][mode + 1])
+ copy_lvb = 1;
+ }
}
cb = dlm_allocate_cb();
@@ -85,6 +98,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
cb->mode = mode;
cb->sb_status = status;
cb->sb_flags = (sbflags & 0x000000FF);
+ cb->copy_lvb = copy_lvb;
kref_init(&cb->ref);
if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index dfc444dad3..511d0b984f 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -222,6 +222,7 @@ struct dlm_callback {
int sb_status; /* copy to lksb status */
uint8_t sb_flags; /* copy to lksb flags */
int8_t mode; /* rq mode of bast, gr mode of cast */
+ int copy_lvb;
struct list_head list;
struct kref ref;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 9f9b684488..12a483deee 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -21,7 +21,6 @@
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
-#include "lvb_table.h"
#include "user.h"
#include "ast.h"
#include "config.h"
@@ -806,8 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
struct dlm_callback *cb;
- int rv, ret, copy_lvb = 0;
- int old_mode, new_mode;
+ int rv, ret;
if (count == sizeof(struct dlm_device_version)) {
rv = copy_version_to_user(buf, count);
@@ -864,9 +862,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
- /* rem_lkb_callback sets a new lkb_last_cast */
- old_mode = lkb->lkb_last_cast->mode;
-
rv = dlm_dequeue_lkb_callback(lkb, &cb);
switch (rv) {
case DLM_DEQUEUE_CALLBACK_EMPTY:
@@ -895,12 +890,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
if (cb->flags & DLM_CB_BAST) {
trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
} else if (cb->flags & DLM_CB_CAST) {
- new_mode = cb->mode;
-
- if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
- dlm_lvb_operations[old_mode + 1][new_mode + 1])
- copy_lvb = 1;
-
lkb->lkb_lksb->sb_status = cb->sb_status;
lkb->lkb_lksb->sb_flags = cb->sb_flags;
trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
@@ -908,7 +897,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
ret = copy_result_to_user(lkb->lkb_ua,
test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- cb->flags, cb->mode, copy_lvb, buf, count);
+ cb->flags, cb->mode, cb->copy_lvb, buf, count);
kref_put(&cb->ref, dlm_release_callback);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3fe41964c0..7f9f68c00e 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -300,9 +300,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
* | Key Identifier Size | 1 or 2 bytes |
* | Key Identifier | arbitrary |
* | File Encryption Key Size | 1 or 2 bytes |
+ * | Cipher Code | 1 byte |
* | File Encryption Key | arbitrary |
+ * | Checksum | 2 bytes |
*/
- data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
*packet = kmalloc(data_len, GFP_KERNEL);
message = *packet;
if (!message) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3534d36a14..c5a9a483fb 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -876,6 +876,34 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
}
/*
+ * The ffd.file pointer may be in the process of being torn down due to
+ * being closed, but we may not have finished eventpoll_release() yet.
+ *
+ * Normally, even with the atomic_long_inc_not_zero, the file may have
+ * been free'd and then gotten re-allocated to something else (since
+ * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
+ *
+ * But for epoll, users hold the ep->mtx mutex, and as such any file in
+ * the process of being free'd will block in eventpoll_release_file()
+ * and thus the underlying file allocation will not be free'd, and the
+ * file re-use cannot happen.
+ *
+ * For the same reason we can avoid a rcu_read_lock() around the
+ * operation - 'ffd.file' cannot go away even if the refcount has
+ * reached zero (but we must still not call out to ->poll() functions
+ * etc).
+ */
+static struct file *epi_fget(const struct epitem *epi)
+{
+ struct file *file;
+
+ file = epi->ffd.file;
+ if (!atomic_long_inc_not_zero(&file->f_count))
+ file = NULL;
+ return file;
+}
+
+/*
* Differs from ep_eventpoll_poll() in that internal callers already have
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
@@ -883,14 +911,22 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
int depth)
{
- struct file *file = epi->ffd.file;
+ struct file *file = epi_fget(epi);
__poll_t res;
+ /*
+ * We could return EPOLLERR | EPOLLHUP or something, but let's
+ * treat this more as "file doesn't exist, poll didn't happen".
+ */
+ if (!file)
+ return 0;
+
pt->_key = epi->event.events;
if (!is_file_epoll(file))
res = vfs_poll(file, pt);
else
res = __ep_eventpoll_poll(file, pt, depth);
+ fput(file);
return res & epi->event.events;
}
diff --git a/fs/exec.c b/fs/exec.c
index 5ee2545c3e..f11cfd7bce 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -67,6 +67,7 @@
#include <linux/time_namespace.h>
#include <linux/user_events.h>
#include <linux/rseq.h>
+#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -268,6 +269,14 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
}
/*
+ * Need to be called with mmap write lock
+ * held, to avoid race with ksmd.
+ */
+ err = ksm_execve(mm);
+ if (err)
+ goto err_ksm;
+
+ /*
* Place the stack at the largest stack address the architecture
* supports. Later, we'll move this to an appropriate place. We don't
* use STACK_TOP because that can depend on attributes which aren't
@@ -288,6 +297,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
+ ksm_exit(mm);
+err_ksm:
mmap_write_unlock(mm);
err_free:
bprm->vma = NULL;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2ccf3b5e3a..31604907af 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2887,9 +2887,6 @@ retry:
if (IS_ERR(folio))
return PTR_ERR(folio);
- /* In case writeback began while the folio was unlocked */
- folio_wait_stable(folio);
-
#ifdef CONFIG_FS_ENCRYPTION
ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
#else
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index cf407425d0..00b0839b53 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -6119,6 +6119,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
ext4_mb_mark_bb(sb, block, 1, true);
ar->len = 1;
+ *errp = 0;
return block;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 05b647e6bc..58fee3c6fe 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2898,7 +2898,7 @@ retry:
inode = ext4_new_inode_start_handle(idmap, dir, mode,
NULL, 0, NULL,
EXT4_HT_DIR,
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
+ EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
4 + EXT4_XATTR_TRANS_BLOCKS);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 34540f9d01..2507fe34cb 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
return true;
}
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void __gfs2_glock_free(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ __gfs2_glock_free(gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
+}
+
+void gfs2_glock_free_later(struct gfs2_glock *gl) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ spin_lock(&lru_lock);
+ list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
+ spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
}
+static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
+{
+ struct list_head *list = &sdp->sd_dead_glocks;
+
+ while(!list_empty(list)) {
+ struct gfs2_glock *gl;
+
+ gl = list_first_entry(list, struct gfs2_glock, gl_lru);
+ list_del_init(&gl->gl_lru);
+ __gfs2_glock_free(gl);
+ }
+}
+
/**
* gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold
@@ -591,7 +617,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
- spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
@@ -639,7 +664,6 @@ retry:
gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
- spin_unlock(&gl->gl_lockref.lock);
return;
}
@@ -662,7 +686,6 @@ retry:
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -690,6 +713,7 @@ __acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
@@ -718,6 +742,9 @@ __acquires(&gl->gl_lockref.lock)
(gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
+ if (!glops->go_inval && !glops->go_sync)
+ goto skip_inval;
+
spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync) {
ret = glops->go_sync(gl);
@@ -730,6 +757,7 @@ __acquires(&gl->gl_lockref.lock)
fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true);
}
+ spin_lock(&gl->gl_lockref.lock);
goto skip_inval;
}
}
@@ -750,9 +778,10 @@ __acquires(&gl->gl_lockref.lock)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
+ spin_lock(&gl->gl_lockref.lock);
skip_inval:
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
/*
* Check for an error encountered since we called go_sync and go_inval.
* If so, we can't withdraw from the glock code because the withdraw
@@ -794,31 +823,37 @@ skip_inval:
*/
clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
- gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
- goto out;
+ __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+ return;
} else {
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
- if (sdp->sd_lockstruct.ls_ops->lm_lock) {
- /* lock_dlm */
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
+ if (ls->ls_ops->lm_lock) {
+ spin_unlock(&gl->gl_lockref.lock);
+ ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
+ spin_lock(&gl->gl_lockref.lock);
+
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
- test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ /*
+ * The lockspace has been released and the lock has
+ * been unlocked implicitly.
+ */
} else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
- GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
+ target = gl->gl_state | LM_OUT_ERROR;
+ } else {
+ /* The operation will be completed asynchronously. */
+ return;
}
- } else { /* lock_nolock */
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
}
-out:
- spin_lock(&gl->gl_lockref.lock);
+
+ /* Complete the operation now. */
+ finish_xmote(gl, target);
+ __gfs2_glock_queue_work(gl, 0);
}
/**
@@ -1071,11 +1106,12 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
- spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2148,8 +2184,11 @@ static void thaw_glock(struct gfs2_glock *gl)
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
+
+ spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
/**
@@ -2225,6 +2264,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
+ gfs2_lm_unmount(sdp);
+ gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 0114f3e0eb..86987a59a0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl);
+void gfs2_glock_free_later(struct gfs2_glock *gl);
int __init gfs2_glock_init(void);
void gfs2_glock_exit(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 45653cbc8a..e0e8dfeee7 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
+
+ if (gfs2_withdrawing(sdp))
+ gfs2_withdraw(sdp);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 95a334d64d..60abd7050c 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -838,6 +838,7 @@ struct gfs2_sbd {
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_mutex;
+ struct list_head sd_dead_glocks;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
char sd_table_name[GFS2_FSNAME_LEN];
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index d1ac5d0679..e028e55e67 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
+ /* If the glock is dead, we only react to a dlm_unlock() reply. */
+ if (__lockref_is_dead(&gl->gl_lockref) &&
+ gl->gl_lksb.sb_status != -DLM_EUNLOCK)
+ return;
+
gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
{
struct gfs2_glock *gl = arg;
+ if (__lockref_is_dead(&gl->gl_lockref))
+ return;
+
switch (mode) {
case DLM_LOCK_EX:
gfs2_glock_cb(gl, LM_ST_UNLOCKED);
@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- if (gl->gl_lksb.sb_lkid == 0)
- goto out_free;
+ BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
+
+ if (gl->gl_lksb.sb_lkid == 0) {
+ gfs2_glock_free(gl);
+ return;
+ }
clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
@@ -300,13 +312,17 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_update_request_times(gl);
/* don't want to call dlm if we've unmounted the lock protocol */
- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
- goto out_free;
+ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ gfs2_glock_free(gl);
+ return;
+ }
/* don't want to skip dlm_unlock writing the lvb when lock has one */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !gl->gl_lksb.sb_lvbptr)
- goto out_free;
+ !gl->gl_lksb.sb_lvbptr) {
+ gfs2_glock_free_later(gl);
+ return;
+ }
again:
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
@@ -321,10 +337,6 @@ again:
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
}
- return;
-
-out_free:
- gfs2_glock_free(gl);
}
static void gdlm_cancel(struct gfs2_glock *gl)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1281e60be6..db0df091a6 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait);
mutex_init(&sdp->sd_freeze_mutex);
+ INIT_LIST_HEAD(&sdp->sd_dead_glocks);
return sdp;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e5f7946634..2d780b4701 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -646,10 +646,7 @@ restart:
gfs2_gl_hash_clear(sdp);
truncate_inode_pages_final(&sdp->sd_aspace);
gfs2_delete_debugfs_file(sdp);
- /* Unmount the locking protocol */
- gfs2_lm_unmount(sdp);
- /* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp);
free_sbd(sdp);
}
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f52141ce94..fc3ecb180a 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
fs_err(sdp, "telling LM to unmount\n");
lm->lm_unmount(sdp);
}
- set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
fs_err(sdp, "File system withdrawn\n");
dump_stack();
clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 00224f3a8d..defb4162c3 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -1110,6 +1110,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
return rc;
request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
+ if (request > c->sector_size - c->cleanmarker_size)
+ return -ERANGE;
+
rc = jffs2_reserve_space(c, request, &length,
ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
if (rc) {
diff --git a/fs/libfs.c b/fs/libfs.c
index eec6031b01..ef700d39f0 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -240,17 +240,22 @@ const struct inode_operations simple_dir_inode_operations = {
};
EXPORT_SYMBOL(simple_dir_inode_operations);
-static void offset_set(struct dentry *dentry, u32 offset)
+/* 0 is '.', 1 is '..', so always start with offset 2 or more */
+enum {
+ DIR_OFFSET_MIN = 2,
+};
+
+static void offset_set(struct dentry *dentry, long offset)
{
- dentry->d_fsdata = (void *)((uintptr_t)(offset));
+ dentry->d_fsdata = (void *)offset;
}
-static u32 dentry2offset(struct dentry *dentry)
+static long dentry2offset(struct dentry *dentry)
{
- return (u32)((uintptr_t)(dentry->d_fsdata));
+ return (long)dentry->d_fsdata;
}
-static struct lock_class_key simple_offset_xa_lock;
+static struct lock_class_key simple_offset_lock_class;
/**
* simple_offset_init - initialize an offset_ctx
@@ -259,11 +264,9 @@ static struct lock_class_key simple_offset_xa_lock;
*/
void simple_offset_init(struct offset_ctx *octx)
{
- xa_init_flags(&octx->xa, XA_FLAGS_ALLOC1);
- lockdep_set_class(&octx->xa.xa_lock, &simple_offset_xa_lock);
-
- /* 0 is '.', 1 is '..', so always start with offset 2 */
- octx->next_offset = 2;
+ mt_init_flags(&octx->mt, MT_FLAGS_ALLOC_RANGE);
+ lockdep_set_class(&octx->mt.ma_lock, &simple_offset_lock_class);
+ octx->next_offset = DIR_OFFSET_MIN;
}
/**
@@ -271,20 +274,19 @@ void simple_offset_init(struct offset_ctx *octx)
* @octx: directory offset ctx to be updated
* @dentry: new dentry being added
*
- * Returns zero on success. @so_ctx and the dentry offset are updated.
+ * Returns zero on success. @octx and the dentry's offset are updated.
* Otherwise, a negative errno value is returned.
*/
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
{
- static const struct xa_limit limit = XA_LIMIT(2, U32_MAX);
- u32 offset;
+ unsigned long offset;
int ret;
if (dentry2offset(dentry) != 0)
return -EBUSY;
- ret = xa_alloc_cyclic(&octx->xa, &offset, dentry, limit,
- &octx->next_offset, GFP_KERNEL);
+ ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
+ LONG_MAX, &octx->next_offset, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -292,6 +294,18 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
return 0;
}
+static int simple_offset_replace(struct offset_ctx *octx, struct dentry *dentry,
+ long offset)
+{
+ int ret;
+
+ ret = mtree_store(&octx->mt, offset, dentry, GFP_KERNEL);
+ if (ret)
+ return ret;
+ offset_set(dentry, offset);
+ return 0;
+}
+
/**
* simple_offset_remove - Remove an entry to a directory's offset map
* @octx: directory offset ctx to be updated
@@ -300,23 +314,88 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
*/
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
{
- u32 offset;
+ long offset;
offset = dentry2offset(dentry);
if (offset == 0)
return;
- xa_erase(&octx->xa, offset);
+ mtree_erase(&octx->mt, offset);
offset_set(dentry, 0);
}
/**
+ * simple_offset_empty - Check if a dentry can be unlinked
+ * @dentry: dentry to be tested
+ *
+ * Returns 0 if @dentry is a non-empty directory; otherwise returns 1.
+ */
+int simple_offset_empty(struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+ struct offset_ctx *octx;
+ struct dentry *child;
+ unsigned long index;
+ int ret = 1;
+
+ if (!inode || !S_ISDIR(inode->i_mode))
+ return ret;
+
+ index = DIR_OFFSET_MIN;
+ octx = inode->i_op->get_offset_ctx(inode);
+ mt_for_each(&octx->mt, child, index, LONG_MAX) {
+ spin_lock(&child->d_lock);
+ if (simple_positive(child)) {
+ spin_unlock(&child->d_lock);
+ ret = 0;
+ break;
+ }
+ spin_unlock(&child->d_lock);
+ }
+
+ return ret;
+}
+
+/**
+ * simple_offset_rename - handle directory offsets for rename
+ * @old_dir: parent directory of source entry
+ * @old_dentry: dentry of source entry
+ * @new_dir: parent_directory of destination entry
+ * @new_dentry: dentry of destination
+ *
+ * Caller provides appropriate serialization.
+ *
+ * User space expects the directory offset value of the replaced
+ * (new) directory entry to be unchanged after a rename.
+ *
+ * Returns zero on success, a negative errno value on failure.
+ */
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
+ struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
+ long new_offset = dentry2offset(new_dentry);
+
+ simple_offset_remove(old_ctx, old_dentry);
+
+ if (new_offset) {
+ offset_set(new_dentry, 0);
+ return simple_offset_replace(new_ctx, old_dentry, new_offset);
+ }
+ return simple_offset_add(new_ctx, old_dentry);
+}
+
+/**
* simple_offset_rename_exchange - exchange rename with directory offsets
* @old_dir: parent of dentry being moved
* @old_dentry: dentry being moved
* @new_dir: destination parent
* @new_dentry: destination dentry
*
+ * This API preserves the directory offset values. Caller provides
+ * appropriate serialization.
+ *
* Returns zero on success. Otherwise a negative errno is returned and the
* rename is rolled back.
*/
@@ -327,18 +406,18 @@ int simple_offset_rename_exchange(struct inode *old_dir,
{
struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
- u32 old_index = dentry2offset(old_dentry);
- u32 new_index = dentry2offset(new_dentry);
+ long old_index = dentry2offset(old_dentry);
+ long new_index = dentry2offset(new_dentry);
int ret;
simple_offset_remove(old_ctx, old_dentry);
simple_offset_remove(new_ctx, new_dentry);
- ret = simple_offset_add(new_ctx, old_dentry);
+ ret = simple_offset_replace(new_ctx, old_dentry, new_index);
if (ret)
goto out_restore;
- ret = simple_offset_add(old_ctx, new_dentry);
+ ret = simple_offset_replace(old_ctx, new_dentry, old_index);
if (ret) {
simple_offset_remove(new_ctx, old_dentry);
goto out_restore;
@@ -353,10 +432,8 @@ int simple_offset_rename_exchange(struct inode *old_dir,
return 0;
out_restore:
- offset_set(old_dentry, old_index);
- xa_store(&old_ctx->xa, old_index, old_dentry, GFP_KERNEL);
- offset_set(new_dentry, new_index);
- xa_store(&new_ctx->xa, new_index, new_dentry, GFP_KERNEL);
+ (void)simple_offset_replace(old_ctx, old_dentry, old_index);
+ (void)simple_offset_replace(new_ctx, new_dentry, new_index);
return ret;
}
@@ -369,7 +446,7 @@ out_restore:
*/
void simple_offset_destroy(struct offset_ctx *octx)
{
- xa_destroy(&octx->xa);
+ mtree_destroy(&octx->mt);
}
/**
@@ -399,15 +476,16 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
/* In this case, ->private_data is protected by f_pos_lock */
file->private_data = NULL;
- return vfs_setpos(file, offset, U32_MAX);
+ return vfs_setpos(file, offset, LONG_MAX);
}
-static struct dentry *offset_find_next(struct xa_state *xas)
+static struct dentry *offset_find_next(struct offset_ctx *octx, loff_t offset)
{
+ MA_STATE(mas, &octx->mt, offset, offset);
struct dentry *child, *found = NULL;
rcu_read_lock();
- child = xas_next_entry(xas, U32_MAX);
+ child = mas_find(&mas, LONG_MAX);
if (!child)
goto out;
spin_lock(&child->d_lock);
@@ -421,8 +499,8 @@ out:
static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
{
- u32 offset = dentry2offset(dentry);
struct inode *inode = d_inode(dentry);
+ long offset = dentry2offset(dentry);
return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
inode->i_ino, fs_umode_to_dtype(inode->i_mode));
@@ -430,12 +508,11 @@ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
{
- struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode);
- XA_STATE(xas, &so_ctx->xa, ctx->pos);
+ struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
struct dentry *dentry;
while (true) {
- dentry = offset_find_next(&xas);
+ dentry = offset_find_next(octx, ctx->pos);
if (!dentry)
return ERR_PTR(-ENOENT);
@@ -444,8 +521,8 @@ static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
break;
}
+ ctx->pos = dentry2offset(dentry) + 1;
dput(dentry);
- ctx->pos = xas.xa_index + 1;
}
return NULL;
}
@@ -481,7 +558,7 @@ static int offset_readdir(struct file *file, struct dir_context *ctx)
return 0;
/* In this case, ->private_data is protected by f_pos_lock */
- if (ctx->pos == 2)
+ if (ctx->pos == DIR_OFFSET_MIN)
file->private_data = NULL;
else if (file->private_data == ERR_PTR(-ENOENT))
return 0;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index ea3c811424..d8f54eb745 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -48,12 +48,10 @@ enum {
NFSD_MaxBlkSize,
NFSD_MaxConnections,
NFSD_Filecache,
-#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
NFSD_Gracetime,
NFSD_RecoveryDir,
NFSD_V4EndGrace,
-#endif
NFSD_MaxReserved
};
@@ -1359,7 +1357,9 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
#endif
/* last one */ {""}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index cfb6aca5ec..d6e3f20e88 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -60,7 +60,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
if (argv->v_nmembs == 0)
return 0;
- if (argv->v_size > PAGE_SIZE)
+ if ((size_t)argv->v_size > PAGE_SIZE)
return -EINVAL;
/*
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 2bfb08052d..5b3f530016 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2161,8 +2161,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
{
spin_lock(&sci->sc_state_lock);
if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
- sci->sc_timer.expires = jiffies + sci->sc_interval;
- add_timer(&sci->sc_timer);
+ if (sci->sc_task) {
+ sci->sc_timer.expires = jiffies + sci->sc_interval;
+ add_timer(&sci->sc_timer);
+ }
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
}
spin_unlock(&sci->sc_state_lock);
@@ -2209,19 +2211,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
struct nilfs_segctor_wait_request wait_req;
int err = 0;
- spin_lock(&sci->sc_state_lock);
init_wait(&wait_req.wq);
wait_req.err = 0;
atomic_set(&wait_req.done, 0);
+ init_waitqueue_entry(&wait_req.wq, current);
+
+ /*
+ * To prevent a race issue where completion notifications from the
+ * log writer thread are missed, increment the request sequence count
+ * "sc_seq_request" and insert a wait queue entry using the current
+ * sequence number into the "sc_wait_request" queue at the same time
+ * within the lock section of "sc_state_lock".
+ */
+ spin_lock(&sci->sc_state_lock);
wait_req.seq = ++sci->sc_seq_request;
+ add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
spin_unlock(&sci->sc_state_lock);
- init_waitqueue_entry(&wait_req.wq, current);
- add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
- set_current_state(TASK_INTERRUPTIBLE);
wake_up(&sci->sc_wait_daemon);
for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /*
+ * Synchronize only while the log writer thread is alive.
+ * Leave flushing out after the log writer thread exits to
+ * the cleanup work in nilfs_segctor_destroy().
+ */
+ if (!sci->sc_task)
+ break;
+
if (atomic_read(&wait_req.done)) {
err = wait_req.err;
break;
@@ -2237,7 +2256,7 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
return err;
}
-static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
+static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
{
struct nilfs_segctor_wait_request *wrq, *n;
unsigned long flags;
@@ -2245,7 +2264,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
if (!atomic_read(&wrq->done) &&
- nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
+ (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
wrq->err = err;
atomic_set(&wrq->done, 1);
}
@@ -2363,10 +2382,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
*/
static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
+ bool thread_is_alive;
+
spin_lock(&sci->sc_state_lock);
sci->sc_seq_accepted = sci->sc_seq_request;
+ thread_is_alive = (bool)sci->sc_task;
spin_unlock(&sci->sc_state_lock);
- del_timer_sync(&sci->sc_timer);
+
+ /*
+ * This function does not race with the log writer thread's
+ * termination. Therefore, deleting sc_timer, which should not be
+ * done after the log writer thread exits, can be done safely outside
+ * the area protected by sc_state_lock.
+ */
+ if (thread_is_alive)
+ del_timer_sync(&sci->sc_timer);
}
/**
@@ -2383,7 +2413,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
sci->sc_seq_done = sci->sc_seq_accepted;
- nilfs_segctor_wakeup(sci, err);
+ nilfs_segctor_wakeup(sci, err, false);
sci->sc_flush_request = 0;
} else {
if (mode == SC_FLUSH_FILE)
@@ -2392,7 +2422,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
- if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
+ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
time_before(jiffies, sci->sc_timer.expires))
add_timer(&sci->sc_timer);
}
@@ -2582,6 +2612,7 @@ static int nilfs_segctor_thread(void *arg)
int timeout = 0;
sci->sc_timer_task = current;
+ timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
/* start sync. */
sci->sc_task = current;
@@ -2649,6 +2680,7 @@ static int nilfs_segctor_thread(void *arg)
end_thread:
/* end sync. */
sci->sc_task = NULL;
+ timer_shutdown_sync(&sci->sc_timer);
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
@@ -2712,7 +2744,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_iput_queue);
INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
- timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
@@ -2766,6 +2797,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
+ /*
+ * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
+ * be called from delayed iput() via nilfs_evict_inode() and can race
+ * with the above log writer thread termination.
+ */
+ nilfs_segctor_wakeup(sci, 0, true);
+
if (flush_work(&sci->sc_iput_work))
flag = true;
@@ -2791,7 +2829,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
down_write(&nilfs->ns_segctor_sem);
- timer_shutdown_sync(&sci->sc_timer);
kfree(sci);
}
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 5cf3d9decf..45e556fd7c 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -475,6 +475,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
vbo = (u64)bit << index_bits;
if (vbo >= i_size) {
ntfs_inode_err(dir, "Looks like your dir is corrupt");
+ ctx->pos = eod;
err = -EINVAL;
goto out;
}
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index daabaad63a..14284f0ed4 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1533,6 +1533,11 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
goto out1;
}
+ if (data_size <= le64_to_cpu(alloc->nres.data_size)) {
+ /* Reuse index. */
+ goto out;
+ }
+
/* Increase allocation. */
err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
&indx->alloc_run, data_size, &data_size, true,
@@ -1546,6 +1551,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
if (in->name == I30_NAME)
i_size_write(&ni->vfs_inode, data_size);
+out:
*vbn = bit << indx->idx2vbn_bits;
return 0;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index eb7a8c9fba..05f169018c 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -37,7 +37,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
bool is_dir;
unsigned long ino = inode->i_ino;
u32 rp_fa = 0, asize, t32;
- u16 roff, rsize, names = 0;
+ u16 roff, rsize, names = 0, links = 0;
const struct ATTR_FILE_NAME *fname = NULL;
const struct INDEX_ROOT *root;
struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
@@ -200,11 +200,12 @@ next_attr:
rsize < SIZEOF_ATTRIBUTE_FILENAME)
goto out;
+ names += 1;
fname = Add2Ptr(attr, roff);
if (fname->type == FILE_NAME_DOS)
goto next_attr;
- names += 1;
+ links += 1;
if (name && name->len == fname->name_len &&
!ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
NULL, false))
@@ -429,7 +430,7 @@ end_enum:
ni->mi.dirty = true;
}
- set_nlink(inode, names);
+ set_nlink(inode, links);
if (S_ISDIR(mode)) {
ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 6aa3a9d44d..6c76503edc 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -534,16 +534,9 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
if (aoff + asize > used)
return false;
- if (ni && is_attr_indexed(attr)) {
+ if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
- struct ATTR_FILE_NAME *fname =
- attr->type != ATTR_NAME ?
- NULL :
- resident_data_ex(attr,
- SIZEOF_ATTRIBUTE_FILENAME);
- if (fname && fname->type == FILE_NAME_DOS) {
- /* Do not decrease links count deleting DOS name. */
- } else if (!links) {
+ if (!links) {
/* minor error. Not critical. */
} else {
ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index cef5467fd9..4643b06b15 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -1804,8 +1804,6 @@ static int __init init_ntfs_fs(void)
{
int err;
- pr_info("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
-
if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index c4b65a6d41..5cf1809d47 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -355,10 +355,10 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
return inode;
}
-static int openprom_remount(struct super_block *sb, int *flags, char *data)
+static int openpromfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_NOATIME;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_NOATIME;
return 0;
}
@@ -366,7 +366,6 @@ static const struct super_operations openprom_sops = {
.alloc_inode = openprom_alloc_inode,
.free_inode = openprom_free_inode,
.statfs = simple_statfs,
- .remount_fs = openprom_remount,
};
static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
@@ -415,6 +414,7 @@ static int openpromfs_get_tree(struct fs_context *fc)
static const struct fs_context_operations openpromfs_context_ops = {
.get_tree = openpromfs_get_tree,
+ .reconfigure = openpromfs_reconfigure,
};
static int openpromfs_init_fs_context(struct fs_context *fc)
diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
index a2f0a2edce..e0a6b75809 100644
--- a/fs/smb/server/mgmt/share_config.c
+++ b/fs/smb/server/mgmt/share_config.c
@@ -165,8 +165,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
GFP_KERNEL);
- if (share->path)
+ if (share->path) {
share->path_sz = strlen(share->path);
+ while (share->path_sz > 1 &&
+ share->path[share->path_sz - 1] == '/')
+ share->path[--share->path_sz] = '\0';
+ }
share->create_mask = resp->create_mask;
share->directory_mask = resp->directory_mask;
share->force_create_mode = resp->force_create_mode;
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index dc729ab980..2292ca6ff0 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -613,19 +613,24 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
if (opinfo->op_state == OPLOCK_CLOSING)
return -ENOENT;
else if (opinfo->level <= req_op_level) {
- if (opinfo->is_lease &&
- opinfo->o_lease->state !=
- (SMB2_LEASE_HANDLE_CACHING_LE |
- SMB2_LEASE_READ_CACHING_LE))
+ if (opinfo->is_lease == false)
+ return 1;
+
+ if (opinfo->o_lease->state !=
+ (SMB2_LEASE_HANDLE_CACHING_LE |
+ SMB2_LEASE_READ_CACHING_LE))
return 1;
}
}
if (opinfo->level <= req_op_level) {
- if (opinfo->is_lease &&
- opinfo->o_lease->state !=
- (SMB2_LEASE_HANDLE_CACHING_LE |
- SMB2_LEASE_READ_CACHING_LE)) {
+ if (opinfo->is_lease == false) {
+ wake_up_oplock_break(opinfo);
+ return 1;
+ }
+ if (opinfo->o_lease->state !=
+ (SMB2_LEASE_HANDLE_CACHING_LE |
+ SMB2_LEASE_READ_CACHING_LE)) {
wake_up_oplock_break(opinfo);
return 1;
}