1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jun 2011 09:21:04 +0200
Subject: [PATCH 109/353] sched: Add saved_state for tasks blocked on sleeping
locks
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=30034b98c640a6a9c7b194126aece2584e08ed87
Spinlocks are state preserving in !RT. RT changes the state when a
task gets blocked on a lock. So we need to remember the state before
the lock contention. If a regular wakeup (not a RTmutex related
wakeup) happens, the saved_state is updated to running. When the lock
sleep is done, the saved state is restored.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 3 +++
kernel/sched/core.c | 33 ++++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 1 +
3 files changed, 36 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d627469bab35..44a5e80481bb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -604,6 +604,8 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
+ /* saved state for "spinlock sleepers" */
+ volatile long saved_state;
/*
* This begins the randomizable portion of task_struct. Only
@@ -1625,6 +1627,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_lock_sleeper(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 61b5db727d1b..b84f883d052c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2005,8 +2005,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
- if (!(p->state & state))
+ if (!(p->state & state)) {
+ /*
+ * The task might be running due to a spinlock sleeper
+ * wakeup. Check the saved state and set it to running
+ * if the wakeup condition is true.
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
+ if (p->saved_state & state) {
+ p->saved_state = TASK_RUNNING;
+ success = 1;
+ }
+ }
goto out;
+ }
+
+ /*
+ * If this is a regular wakeup, then we can unconditionally
+ * clear the saved state of a "lock sleeper".
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER))
+ p->saved_state = TASK_RUNNING;
trace_sched_waking(p);
@@ -2170,6 +2189,18 @@ int wake_up_process(struct task_struct *p)
}
EXPORT_SYMBOL(wake_up_process);
+/**
+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
+ * @p: The process to be woken up.
+ *
+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
+ * the nature of the wakeup.
+ */
+int wake_up_lock_sleeper(struct task_struct *p)
+{
+ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
+}
+
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 55e695080fc6..637c408fb2dc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1448,6 +1448,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
|