summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch')
-rw-r--r--debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch14
1 files changed, 7 insertions, 7 deletions
diff --git a/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
index 148b94e4b..90a497e14 100644
--- a/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
+++ b/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 6 Apr 2010 16:51:31 +0200
-Subject: [PATCH 181/342] md: raid5: Make raid5_percpu handling RT aware
-Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ee5b72719d2eaaef95179f02ba27140ecc010e87
+Subject: [PATCH 181/351] md: raid5: Make raid5_percpu handling RT aware
+Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=215c75ad291ac658b1b6756853fcf92b7e781daa
__raid_run_ops() disables preemption with get_cpu() around the access
to the raid5_percpu variables. That causes scheduling while atomic
@@ -19,10 +19,10 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index dad426cc0f90..73083d826cb5 100644
+index 7fe0619c487a..fdbe17d1b1c4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+@@ -2070,8 +2070,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -33,7 +33,7 @@ index dad426cc0f90..73083d826cb5 100644
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+@@ -2130,7 +2131,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -43,7 +43,7 @@ index dad426cc0f90..73083d826cb5 100644
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -6816,6 +6818,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
+@@ -6828,6 +6830,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
__func__, cpu);
return -ENOMEM;
}
@@ -51,7 +51,7 @@ index dad426cc0f90..73083d826cb5 100644
return 0;
}
-@@ -6826,7 +6829,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+@@ -6838,7 +6841,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;