diff options
Diffstat (limited to 'debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch')
-rw-r--r-- | debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch b/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch new file mode 100644 index 0000000000..81f4f3c5dd --- /dev/null +++ b/debian/patches-rt/0001-zram-Replace-bit-spinlocks-with-a-spinlock_t.patch @@ -0,0 +1,84 @@ +From: Mike Galbraith <umgwanakikbuti@gmail.com> +Date: Thu, 31 Mar 2016 04:08:28 +0200 +Subject: [PATCH 1/3] zram: Replace bit spinlocks with a spinlock_t. +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.10/older/patches-6.10.2-rt14.tar.xz + +The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping +lock on PREEMPT_RT and it can not be acquired in this context. In this locked +section, zs_free() acquires a zs_pool::lock, and there is access to +zram::wb_limit_lock. + +Add a spinlock_t for locking. Keep the set/ clear ZRAM_LOCK bit after +the lock has been acquired/ dropped. The size of struct zram_table_entry +increases by 4 bytes due to lock and additional 4 bytes padding with +CONFIG_ZRAM_TRACK_ENTRY_ACTIME enabled. + +Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> +Link: https://lore.kernel.org/r/20240620153556.777272-2-bigeasy@linutronix.de +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de +Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de +Link: https://lore.kernel.org/20240619150814.BRAvaziM@linutronix.de +--- + drivers/block/zram/zram_drv.c | 22 +++++++++++++++++++--- + drivers/block/zram/zram_drv.h | 1 + + 2 files changed, 20 insertions(+), 3 deletions(-) + +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -57,19 +57,34 @@ static void zram_free_page(struct zram * + static int zram_read_page(struct zram *zram, struct page *page, u32 index, + struct bio *parent); + ++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) ++{ ++ size_t index; ++ ++ for (index = 0; index < num_pages; index++) ++ spin_lock_init(&zram->table[index].lock); ++} ++ + static int zram_slot_trylock(struct zram *zram, u32 index) + { +- return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); ++ int ret; ++ ++ ret = spin_trylock(&zram->table[index].lock); ++ if (ret) ++ __set_bit(ZRAM_LOCK, &zram->table[index].flags); ++ return ret; + } + + static void zram_slot_lock(struct zram *zram, u32 index) + { +- bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); ++ spin_lock(&zram->table[index].lock); ++ __set_bit(ZRAM_LOCK, &zram->table[index].flags); + } + + static void zram_slot_unlock(struct zram *zram, u32 index) + { +- bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); ++ __clear_bit(ZRAM_LOCK, &zram->table[index].flags); ++ spin_unlock(&zram->table[index].lock); + } + + static inline bool init_done(struct zram *zram) +@@ -1226,6 +1241,7 @@ static bool zram_meta_alloc(struct zram + + if (!huge_class_size) + huge_class_size = zs_huge_class_size(zram->mem_pool); ++ zram_meta_init_table_locks(zram, num_pages); + return true; + } + +--- a/drivers/block/zram/zram_drv.h ++++ b/drivers/block/zram/zram_drv.h +@@ -69,6 +69,7 @@ struct zram_table_entry { + unsigned long element; + }; + unsigned long flags; ++ spinlock_t lock; + #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME + ktime_t ac_time; + #endif |