summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:06:00 +0000
commitb15a952c52a6825376d3e7f6c1bf5c886c6d8b74 (patch)
tree1500f2f8f276908a36d8126cb632c0d6b1276764 /debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch
parentAdding upstream version 5.10.209. (diff)
downloadlinux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.tar.xz
linux-b15a952c52a6825376d3e7f6c1bf5c886c6d8b74.zip
Adding debian version 5.10.209-2.debian/5.10.209-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch')
-rw-r--r--debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch147
1 files changed, 147 insertions, 0 deletions
diff --git a/debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch b/debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch
new file mode 100644
index 000000000..41738b8a2
--- /dev/null
+++ b/debian/patches-rt/0112-shmem-Use-raw_spinlock_t-for-stat_lock.patch
@@ -0,0 +1,147 @@
+From 35616363024adeffc42af4d2ca91e4550d6db59c Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 14 Aug 2020 18:53:34 +0200
+Subject: [PATCH 112/323] shmem: Use raw_spinlock_t for ->stat_lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Each CPU has SHMEM_INO_BATCH inodes available in `->ino_batch' which is
+per-CPU. Access here is serialized by disabling preemption. If the pool is
+empty, it gets reloaded from `->next_ino'. Access here is serialized by
+->stat_lock which is a spinlock_t and can not be acquired with disabled
+preemption.
+One way around it would make per-CPU ino_batch struct containing the inode
+number a local_lock_t.
+Another sollution is to promote ->stat_lock to a raw_spinlock_t. The critical
+sections are short. The mpol_put() should be moved outside of the critical
+section to avoid invoking the destrutor with disabled preemption.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/shmem_fs.h | 2 +-
+ mm/shmem.c | 31 +++++++++++++++++--------------
+ 2 files changed, 18 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index a5a5d1d4d7b1..0470d1582b09 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -31,7 +31,7 @@ struct shmem_sb_info {
+ struct percpu_counter used_blocks; /* How many are allocated */
+ unsigned long max_inodes; /* How many inodes are allowed */
+ unsigned long free_inodes; /* How many are left for allocation */
+- spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
++ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ umode_t mode; /* Mount mode for root directory */
+ unsigned char huge; /* Whether to try for hugepages */
+ kuid_t uid; /* Mount uid for root directory */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index e173d83b4448..94c0964f0d1f 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -278,10 +278,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
+ ino_t ino;
+
+ if (!(sb->s_flags & SB_KERNMOUNT)) {
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ if (sbinfo->max_inodes) {
+ if (!sbinfo->free_inodes) {
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ return -ENOSPC;
+ }
+ sbinfo->free_inodes--;
+@@ -304,7 +304,7 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
+ }
+ *inop = ino;
+ }
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ } else if (inop) {
+ /*
+ * __shmem_file_setup, one of our callers, is lock-free: it
+@@ -319,13 +319,14 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
+ * to worry about things like glibc compatibility.
+ */
+ ino_t *next_ino;
++
+ next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
+ ino = *next_ino;
+ if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ ino = sbinfo->next_ino;
+ sbinfo->next_ino += SHMEM_INO_BATCH;
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ if (unlikely(is_zero_ino(ino)))
+ ino++;
+ }
+@@ -341,9 +342,9 @@ static void shmem_free_inode(struct super_block *sb)
+ {
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ if (sbinfo->max_inodes) {
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_inodes++;
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ }
+ }
+
+@@ -1484,10 +1485,10 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+ {
+ struct mempolicy *mpol = NULL;
+ if (sbinfo->mpol) {
+- spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
++ raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
+ mpol = sbinfo->mpol;
+ mpol_get(mpol);
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ }
+ return mpol;
+ }
+@@ -3613,9 +3614,10 @@ static int shmem_reconfigure(struct fs_context *fc)
+ struct shmem_options *ctx = fc->fs_private;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
+ unsigned long inodes;
++ struct mempolicy *mpol = NULL;
+ const char *err;
+
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ inodes = sbinfo->max_inodes - sbinfo->free_inodes;
+ if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
+ if (!sbinfo->max_blocks) {
+@@ -3660,14 +3662,15 @@ static int shmem_reconfigure(struct fs_context *fc)
+ * Preserve previous mempolicy unless mpol remount option was specified.
+ */
+ if (ctx->mpol) {
+- mpol_put(sbinfo->mpol);
++ mpol = sbinfo->mpol;
+ sbinfo->mpol = ctx->mpol; /* transfers initial ref */
+ ctx->mpol = NULL;
+ }
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
++ mpol_put(mpol);
+ return 0;
+ out:
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ return invalfc(fc, "%s", err);
+ }
+
+@@ -3784,7 +3787,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
+ sbinfo->mpol = ctx->mpol;
+ ctx->mpol = NULL;
+
+- spin_lock_init(&sbinfo->stat_lock);
++ raw_spin_lock_init(&sbinfo->stat_lock);
+ if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
+ goto failed;
+ spin_lock_init(&sbinfo->shrinklist_lock);
+--
+2.43.0
+