summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0211-mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--debian/patches-rt/0211-mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch150
1 files changed, 150 insertions, 0 deletions
diff --git a/debian/patches-rt/0211-mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch b/debian/patches-rt/0211-mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
new file mode 100644
index 000000000..772cff44a
--- /dev/null
+++ b/debian/patches-rt/0211-mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
@@ -0,0 +1,150 @@
+From 5bfa3eb76d50818c90253df970e7afbc747211c2 Mon Sep 17 00:00:00 2001
+From: "Luis Claudio R. Goncalves" <lgoncalv@redhat.com>
+Date: Tue, 25 Jun 2019 11:28:04 -0300
+Subject: [PATCH 211/323] mm/zswap: Use local lock to protect per-CPU data
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+zwap uses per-CPU compression. The per-CPU data pointer is acquired with
+get_cpu_ptr() which implicitly disables preemption. It allocates
+memory inside the preempt disabled region which conflicts with the
+PREEMPT_RT semantics.
+
+Replace the implicit preemption control with an explicit local lock.
+This allows RT kernels to substitute it with a real per CPU lock, which
+serializes the access but keeps the code section preemptible. On non RT
+kernels this maps to preempt_disable() as before, i.e. no functional
+change.
+
+[bigeasy: Use local_lock(), additional hunks, patch description]
+
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Dan Streetman <ddstreet@ieee.org>
+Cc: Vitaly Wool <vitaly.wool@konsulko.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mm@kvack.org
+Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/zswap.c | 43 ++++++++++++++++++++++++++++---------------
+ 1 file changed, 28 insertions(+), 15 deletions(-)
+
+diff --git a/mm/zswap.c b/mm/zswap.c
+index fbb782924ccc..b24f761b9241 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -18,6 +18,7 @@
+ #include <linux/highmem.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/local_lock.h>
+ #include <linux/types.h>
+ #include <linux/atomic.h>
+ #include <linux/frontswap.h>
+@@ -387,27 +388,37 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
+ /*********************************
+ * per-cpu code
+ **********************************/
+-static DEFINE_PER_CPU(u8 *, zswap_dstmem);
++struct zswap_comp {
++ /* Used for per-CPU dstmem and tfm */
++ local_lock_t lock;
++ u8 *dstmem;
++};
++
++static DEFINE_PER_CPU(struct zswap_comp, zswap_comp) = {
++ .lock = INIT_LOCAL_LOCK(lock),
++};
+
+ static int zswap_dstmem_prepare(unsigned int cpu)
+ {
++ struct zswap_comp *zcomp;
+ u8 *dst;
+
+ dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!dst)
+ return -ENOMEM;
+
+- per_cpu(zswap_dstmem, cpu) = dst;
++ zcomp = per_cpu_ptr(&zswap_comp, cpu);
++ zcomp->dstmem = dst;
+ return 0;
+ }
+
+ static int zswap_dstmem_dead(unsigned int cpu)
+ {
+- u8 *dst;
++ struct zswap_comp *zcomp;
+
+- dst = per_cpu(zswap_dstmem, cpu);
+- kfree(dst);
+- per_cpu(zswap_dstmem, cpu) = NULL;
++ zcomp = per_cpu_ptr(&zswap_comp, cpu);
++ kfree(zcomp->dstmem);
++ zcomp->dstmem = NULL;
+
+ return 0;
+ }
+@@ -919,10 +930,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
+ dlen = PAGE_SIZE;
+ src = (u8 *)zhdr + sizeof(struct zswap_header);
+ dst = kmap_atomic(page);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ local_lock(&zswap_comp.lock);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ ret = crypto_comp_decompress(tfm, src, entry->length,
+ dst, &dlen);
+- put_cpu_ptr(entry->pool->tfm);
++ local_unlock(&zswap_comp.lock);
+ kunmap_atomic(dst);
+ BUG_ON(ret);
+ BUG_ON(dlen != PAGE_SIZE);
+@@ -1074,12 +1086,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
+ }
+
+ /* compress */
+- dst = get_cpu_var(zswap_dstmem);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ local_lock(&zswap_comp.lock);
++ dst = *this_cpu_ptr(&zswap_comp.dstmem);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ src = kmap_atomic(page);
+ ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
+ kunmap_atomic(src);
+- put_cpu_ptr(entry->pool->tfm);
+ if (ret) {
+ ret = -EINVAL;
+ goto put_dstmem;
+@@ -1103,7 +1115,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
+ memcpy(buf, &zhdr, hlen);
+ memcpy(buf + hlen, dst, dlen);
+ zpool_unmap_handle(entry->pool->zpool, handle);
+- put_cpu_var(zswap_dstmem);
++ local_unlock(&zswap_comp.lock);
+
+ /* populate entry */
+ entry->offset = offset;
+@@ -1131,7 +1143,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
+ return 0;
+
+ put_dstmem:
+- put_cpu_var(zswap_dstmem);
++ local_unlock(&zswap_comp.lock);
+ zswap_pool_put(entry->pool);
+ freepage:
+ zswap_entry_cache_free(entry);
+@@ -1176,9 +1188,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
+ if (zpool_evictable(entry->pool->zpool))
+ src += sizeof(struct zswap_header);
+ dst = kmap_atomic(page);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ local_lock(&zswap_comp.lock);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
+- put_cpu_ptr(entry->pool->tfm);
++ local_unlock(&zswap_comp.lock);
+ kunmap_atomic(dst);
+ zpool_unmap_handle(entry->pool->zpool, entry->handle);
+ BUG_ON(ret);
+--
+2.43.0
+