diff options
Diffstat (limited to 'debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch')
-rw-r--r-- | debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 56d89ace2..1732dfed8 100644 --- a/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/debian/patches-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -1,11 +1,11 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Tue, 4 Apr 2017 12:50:16 +0200 -Subject: [PATCH 026/342] kernel: sched: Provide a pointer to the valid CPU +Subject: [PATCH 026/351] kernel: sched: Provide a pointer to the valid CPU mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=3a742715efe880a8606d4b57cd2d1ee4715afa3c +Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=b52972c9d7da09d15e44aad1448655ebdbb87553 In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not @@ -270,10 +270,10 @@ index 9eb99a43f849..e4d0cfebaac5 100644 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) diff --git a/include/linux/sched.h b/include/linux/sched.h -index f92d5ae6d04e..fc5f476c2aca 100644 +index fd4899236037..6d5937fc782e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -660,7 +660,8 @@ struct task_struct { +@@ -664,7 +664,8 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; @@ -283,7 +283,7 @@ index f92d5ae6d04e..fc5f476c2aca 100644 #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -1398,7 +1399,7 @@ extern struct pid *cad_pid; +@@ -1402,7 +1403,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ @@ -333,7 +333,7 @@ index b65871600507..a18d695259af 100644 /* * One for us, one for whoever does the "release_task()" (usually diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 32af895bd86b..3fb7638a8863 100644 +index a03464249771..985cfa10fda5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) @@ -456,7 +456,7 @@ index 32af895bd86b..3fb7638a8863 100644 * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4322,7 +4322,7 @@ static int __sched_setscheduler(struct task_struct *p, +@@ -4323,7 +4323,7 @@ static int __sched_setscheduler(struct task_struct *p, * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ @@ -465,7 +465,7 @@ index 32af895bd86b..3fb7638a8863 100644 rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; -@@ -4921,7 +4921,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) +@@ -4922,7 +4922,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -474,7 +474,7 @@ index 32af895bd86b..3fb7638a8863 100644 raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: -@@ -5498,7 +5498,7 @@ int task_can_attach(struct task_struct *p, +@@ -5499,7 +5499,7 @@ int task_can_attach(struct task_struct *p, * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks @@ -483,7 +483,7 @@ index 32af895bd86b..3fb7638a8863 100644 */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; -@@ -5525,7 +5525,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) +@@ -5526,7 +5526,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (curr_cpu == target_cpu) return 0; @@ -492,7 +492,7 @@ index 32af895bd86b..3fb7638a8863 100644 return -EINVAL; /* TODO: This is not properly updating schedstats */ -@@ -5664,7 +5664,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) +@@ -5665,7 +5665,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) put_prev_task(rq, next); /* @@ -541,10 +541,10 @@ index daaadf939ccb..f7d2c10b4c92 100644 /* * We have to ensure that we have at least one bit diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index beec5081a55a..95ebbb2074c7 100644 +index 29ed5d8d30d6..9243d0049714 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p +@@ -561,7 +561,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p * If we cannot preempt any rq, fall back to pick any * online CPU: */ @@ -553,7 +553,7 @@ index beec5081a55a..95ebbb2074c7 100644 if (cpu >= nr_cpu_ids) { /* * Failed to find any suitable CPU. -@@ -1857,7 +1857,7 @@ static void set_curr_task_dl(struct rq *rq) +@@ -1887,7 +1887,7 @@ static void set_curr_task_dl(struct rq *rq) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -562,7 +562,7 @@ index beec5081a55a..95ebbb2074c7 100644 return 1; return 0; } -@@ -2007,7 +2007,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) +@@ -2037,7 +2037,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || @@ -719,10 +719,10 @@ index e84a056f783f..16940416d526 100644 flags); env.flags |= LBF_ALL_PINNED; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index 70e8cd395474..52b55144d8ad 100644 +index 9c6c3572b131..a068884c369f 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c -@@ -1620,7 +1620,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +@@ -1623,7 +1623,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -731,7 +731,7 @@ index 70e8cd395474..52b55144d8ad 100644 return 1; return 0; -@@ -1757,7 +1757,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) +@@ -1760,7 +1760,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || |