blob: c86edbe0fc706e9ffc04515bacdd5f3ee4373d64 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
From 4a710599720e4ca33cf2161432d93605dd091b8c Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 30 Oct 2020 13:59:06 +0100
Subject: [PATCH 062/323] highmem: Don't disable preemption on RT in
kmap_atomic()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.215-rt107.tar.xz
Disabling preemption makes it impossible to acquire sleeping locks within
kmap_atomic() section.
For PREEMPT_RT it is sufficient to disable migration.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/highmem-internal.h | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index bd15bf9164c23..f9bc6acd36798 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -90,7 +90,10 @@ static inline void __kunmap_local(void *vaddr)
static inline void *kmap_atomic(struct page *page)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
pagefault_disable();
return __kmap_local_page_prot(page, kmap_prot);
}
@@ -99,7 +102,10 @@ static inline void __kunmap_atomic(void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
}
unsigned int __nr_free_highpages(void);
@@ -172,7 +178,10 @@ static inline void __kunmap_local(void *addr)
static inline void *kmap_atomic(struct page *page)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
pagefault_disable();
return page_address(page);
}
@@ -183,7 +192,10 @@ static inline void __kunmap_atomic(void *addr)
kunmap_flush_on_unmap(addr);
#endif
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
}
static inline unsigned int nr_free_highpages(void) { return 0; }
--
2.44.0
|