summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0062-highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0062-highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch')
-rw-r--r--debian/patches-rt/0062-highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch71
1 files changed, 71 insertions, 0 deletions
diff --git a/debian/patches-rt/0062-highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch b/debian/patches-rt/0062-highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
new file mode 100644
index 000000000..cecb34498
--- /dev/null
+++ b/debian/patches-rt/0062-highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
@@ -0,0 +1,71 @@
+From 09d7bcb6eee1e7c9351535a20f60929daca8632d Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 30 Oct 2020 13:59:06 +0100
+Subject: [PATCH 062/323] highmem: Don't disable preemption on RT in
+ kmap_atomic()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Disabling preemption makes it impossible to acquire sleeping locks within
+kmap_atomic() section.
+For PREEMPT_RT it is sufficient to disable migration.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
+index bd15bf9164c2..f9bc6acd3679 100644
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -90,7 +90,10 @@ static inline void __kunmap_local(void *vaddr)
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return __kmap_local_page_prot(page, kmap_prot);
+ }
+@@ -99,7 +102,10 @@ static inline void __kunmap_atomic(void *addr)
+ {
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ unsigned int __nr_free_highpages(void);
+@@ -172,7 +178,10 @@ static inline void __kunmap_local(void *addr)
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+ }
+@@ -183,7 +192,10 @@ static inline void __kunmap_atomic(void *addr)
+ kunmap_flush_on_unmap(addr);
+ #endif
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ static inline unsigned int nr_free_highpages(void) { return 0; }
+--
+2.43.0
+