summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0141-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
blob: 23300e884afce66cd3cb51115576160ab128a183 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
From 1b3711a9fe0b85e927a131441083be32b7879496 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:36:39 +0200
Subject: [PATCH 141/347] rtmutex: export lockdep-less version of rt_mutex's
 lock, trylock and unlock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz

Required for lock implementation ontop of rtmutex.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/locking/rtmutex.c        | 67 +++++++++++++++++++++------------
 kernel/locking/rtmutex_common.h |  3 ++
 2 files changed, 46 insertions(+), 24 deletions(-)

diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index e9255b28de8f..e24699290bb7 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1494,12 +1494,33 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
 		rt_mutex_postunlock(&wake_q);
 }
 
-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
 {
 	might_sleep();
+	return rt_mutex_fastlock(lock, state, rt_mutex_slowlock);
+}
+
+/**
+ * rt_mutex_lock_state - lock a rt_mutex with a given state
+ *
+ * @lock:      The rt_mutex to be locked
+ * @state:     The state to set when blocking on the rt_mutex
+ */
+static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
+					      unsigned int subclass, int state)
+{
+	int ret;
 
 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	ret = __rt_mutex_lock_state(lock, state);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+	return ret;
+}
+
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+{
+	rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1540,16 +1561,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
  */
 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
-	if (ret)
-		mutex_release(&lock->dep_map, 1, _RET_IP_);
-
-	return ret;
+	return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
@@ -1575,13 +1587,10 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
  * Returns:
  *  0          on success
  * -EINTR      when interrupted by a signal
- * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
  */
 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
 {
-	might_sleep();
-
-	return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
+	return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
 
@@ -1616,6 +1625,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (WARN_ON_ONCE(in_irq() || in_nmi()))
+#else
+	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
+#endif
+		return 0;
+
+	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+}
+
 /**
  * rt_mutex_trylock - try to lock a rt_mutex
  *
@@ -1631,14 +1652,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
 	int ret;
 
-#ifdef CONFIG_PREEMPT_RT_FULL
-	if (WARN_ON_ONCE(in_irq() || in_nmi()))
-#else
-	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
-#endif
-		return 0;
-
-	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	ret = __rt_mutex_trylock(lock);
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 
@@ -1646,6 +1660,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
 }
 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 
+void __sched __rt_mutex_unlock(struct rt_mutex *lock)
+{
+	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
+}
+
 /**
  * rt_mutex_unlock - unlock a rt_mutex
  *
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 66deb94765fb..53a8c5d03d28 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -161,6 +161,9 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
 extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
 /* RW semaphore special interface */
 
+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
+extern int __rt_mutex_trylock(struct rt_mutex *lock);
+extern void __rt_mutex_unlock(struct rt_mutex *lock);
 int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
 				     struct hrtimer_sleeper *timeout,
 				     enum rtmutex_chainwalk chwalk,
-- 
2.36.1