diff options
Diffstat (limited to 'debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch')
-rw-r--r-- | debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch new file mode 100644 index 000000000..e1f7c3fce --- /dev/null +++ b/debian/patches-rt/0181-md-raid5-Make-raid5_percpu-handling-RT-aware.patch @@ -0,0 +1,77 @@ +From 17db75361d2ecbea4026acde76a744fdecc4c475 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 6 Apr 2010 16:51:31 +0200 +Subject: [PATCH 181/347] md: raid5: Make raid5_percpu handling RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +__raid_run_ops() disables preemption with get_cpu() around the access +to the raid5_percpu variables. That causes scheduling while atomic +spews on RT. + +Serialize the access to the percpu data with a lock and keep the code +preemptible. + +Reported-by: Udo van den Heuvel <udovdh@xs4all.nl> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Tested-by: Udo van den Heuvel <udovdh@xs4all.nl> +--- + drivers/md/raid5.c | 8 +++++--- + drivers/md/raid5.h | 1 + + 2 files changed, 6 insertions(+), 3 deletions(-) + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index c7bda4b0bced..8eb234732c9b 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) +@@ -6816,6 +6818,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) + __func__, cpu); + return -ENOMEM; + } ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + return 0; + } + +@@ -6826,7 +6829,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) + conf->percpu = alloc_percpu(struct raid5_percpu); + if (!conf->percpu) + return -ENOMEM; +- + err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); + if (!err) { + conf->scribble_disks = max(conf->raid_disks, +diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h +index 8474c224127b..a3bf907ab2af 100644 +--- a/drivers/md/raid5.h ++++ b/drivers/md/raid5.h +@@ -637,6 +637,7 @@ struct r5conf { + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + struct flex_array *scribble; /* space for constructing buffer + * lists and performing address +-- +2.36.1 + |