diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:38 +0000 |
commit | 08b74a000942a380fe028845f92cd3a0dee827d5 (patch) | |
tree | aa78b4e12607c3e1fcce8d5cc42df4330792f118 /debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch | |
parent | Adding upstream version 4.19.249. (diff) | |
download | linux-08b74a000942a380fe028845f92cd3a0dee827d5.tar.xz linux-08b74a000942a380fe028845f92cd3a0dee827d5.zip |
Adding debian version 4.19.249-2.debian/4.19.249-2debian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch')
-rw-r--r-- | debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch | 203 |
1 files changed, 203 insertions, 0 deletions
diff --git a/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch new file mode 100644 index 000000000..a3f158052 --- /dev/null +++ b/debian/patches-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch @@ -0,0 +1,203 @@ +From 7041bc73c6e3c572934b9c42968151577521955e Mon Sep 17 00:00:00 2001 +From: Mike Galbraith <umgwanakikbuti@gmail.com> +Date: Tue, 22 Mar 2016 11:16:09 +0100 +Subject: [PATCH 086/347] mm/zsmalloc: copy with get_cpu_var() and locking +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +get_cpu_var() disables preemption and triggers a might_sleep() splat later. +This is replaced with get_locked_var(). +This bitspinlocks are replaced with a proper mutex which requires a slightly +larger struct to allocate. + +Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> +[bigeasy: replace the bitspin_lock() with a mutex, get_locked_var(). Mike then +fixed the size magic] +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + mm/zsmalloc.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 74 insertions(+), 6 deletions(-) + +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c +index 4d71356ea66a..50c5c3922b07 100644 +--- a/mm/zsmalloc.c ++++ b/mm/zsmalloc.c +@@ -56,6 +56,7 @@ + #include <linux/wait.h> + #include <linux/pagemap.h> + #include <linux/fs.h> ++#include <linux/locallock.h> + + #define ZSPAGE_MAGIC 0x58 + +@@ -73,9 +74,22 @@ + */ + #define ZS_MAX_ZSPAGE_ORDER 2 + #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) +- + #define ZS_HANDLE_SIZE (sizeof(unsigned long)) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct zsmalloc_handle { ++ unsigned long addr; ++ struct mutex lock; ++}; ++ ++#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) ++ ++#else ++ ++#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) ++#endif ++ + /* + * Object location (<PFN>, <obj_idx>) is encoded as + * as single (unsigned long) handle value. +@@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} + + static int create_cache(struct zs_pool *pool) + { +- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, ++ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, + 0, 0, NULL); + if (!pool->handle_cachep) + return 1; +@@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool) + + static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) + { +- return (unsigned long)kmem_cache_alloc(pool->handle_cachep, +- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); ++ void *p; ++ ++ p = kmem_cache_alloc(pool->handle_cachep, ++ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (p) { ++ struct zsmalloc_handle *zh = p; ++ ++ mutex_init(&zh->lock); ++ } ++#endif ++ return (unsigned long)p; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) ++{ ++ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); ++} ++#endif ++ + static void cache_free_handle(struct zs_pool *pool, unsigned long handle) + { + kmem_cache_free(pool->handle_cachep, (void *)handle); +@@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) + + static void record_obj(unsigned long handle, unsigned long obj) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ WRITE_ONCE(zh->addr, obj); ++#else + /* + * lsb of @obj represents handle lock while other bits + * represent object value the handle is pointing so + * updating shouldn't do store tearing. + */ + WRITE_ONCE(*(unsigned long *)handle, obj); ++#endif + } + + /* zpool driver */ +@@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc"); + + /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ + static DEFINE_PER_CPU(struct mapping_area, zs_map_area); ++static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); + + static bool is_zspage_isolated(struct zspage *zspage) + { +@@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) + + static unsigned long handle_to_obj(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return zh->addr; ++#else + return *(unsigned long *)handle; ++#endif + } + + static unsigned long obj_to_head(struct page *page, void *obj) +@@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) + + static inline int testpin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_is_locked(&zh->lock); ++#else + return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static inline int trypin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_trylock(&zh->lock); ++#else + return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void pin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_lock(&zh->lock); ++#else + bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void unpin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_unlock(&zh->lock); ++#else + bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void reset_page(struct page *page) +@@ -1342,7 +1410,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, + class = pool->size_class[class_idx]; + off = (class->size * obj_idx) & ~PAGE_MASK; + +- area = &get_cpu_var(zs_map_area); ++ area = &get_locked_var(zs_map_area_lock, zs_map_area); + area->vm_mm = mm; + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ +@@ -1396,7 +1464,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) + + __zs_unmap_object(area, pages, off, class->size); + } +- put_cpu_var(zs_map_area); ++ put_locked_var(zs_map_area_lock, zs_map_area); + + migrate_read_unlock(zspage); + unpin_tag(handle); +-- +2.36.1 + |