diff options
Diffstat (limited to 'debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch')
-rw-r--r-- | debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch | 203 |
1 files changed, 203 insertions, 0 deletions
diff --git a/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch b/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch new file mode 100644 index 000000000..2d5f5508f --- /dev/null +++ b/debian/patches-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch @@ -0,0 +1,203 @@ +From 3384b42fd998f06c79b4536609c48e3147adb1eb Mon Sep 17 00:00:00 2001 +From: Julia Cartwright <julia@ni.com> +Date: Fri, 28 Sep 2018 21:03:51 +0000 +Subject: [PATCH 009/347] kthread: convert worker lock to raw spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +In order to enable the queuing of kthread work items from hardirq +context even when PREEMPT_RT_FULL is enabled, convert the worker +spin_lock to a raw_spin_lock. + +This is only acceptable to do because the work performed under the lock +is well-bounded and minimal. + +Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Cc: Guenter Roeck <linux@roeck-us.net> +Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> +Reported-by: Tim Sander <tim@krieglstein.org> +Signed-off-by: Julia Cartwright <julia@ni.com> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/kthread.h | 2 +- + kernel/kthread.c | 42 ++++++++++++++++++++--------------------- + 2 files changed, 22 insertions(+), 22 deletions(-) + +diff --git a/include/linux/kthread.h b/include/linux/kthread.h +index 72308c38e06c..e0498e46d642 100644 +--- a/include/linux/kthread.h ++++ b/include/linux/kthread.h +@@ -88,7 +88,7 @@ enum { + + struct kthread_worker { + unsigned int flags; +- spinlock_t lock; ++ raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 9750f4f7f901..c8cf4731ced8 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -637,7 +637,7 @@ void __kthread_init_worker(struct kthread_worker *worker, + struct lock_class_key *key) + { + memset(worker, 0, sizeof(struct kthread_worker)); +- spin_lock_init(&worker->lock); ++ raw_spin_lock_init(&worker->lock); + lockdep_set_class_and_name(&worker->lock, key, name); + INIT_LIST_HEAD(&worker->work_list); + INIT_LIST_HEAD(&worker->delayed_work_list); +@@ -679,21 +679,21 @@ int kthread_worker_fn(void *worker_ptr) + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + worker->task = NULL; +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + return 0; + } + + work = NULL; +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + if (!list_empty(&worker->work_list)) { + work = list_first_entry(&worker->work_list, + struct kthread_work, node); + list_del_init(&work->node); + } + worker->current_work = work; +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + + if (work) { + __set_current_state(TASK_RUNNING); +@@ -850,12 +850,12 @@ bool kthread_queue_work(struct kthread_worker *worker, + bool ret = false; + unsigned long flags; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + if (!queuing_blocked(worker, work)) { + kthread_insert_work(worker, work, &worker->work_list); + ret = true; + } +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_queue_work); +@@ -881,7 +881,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) + if (WARN_ON_ONCE(!worker)) + return; + +- spin_lock(&worker->lock); ++ raw_spin_lock(&worker->lock); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -891,7 +891,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) + if (!work->canceling) + kthread_insert_work(worker, work, &worker->work_list); + +- spin_unlock(&worker->lock); ++ raw_spin_unlock(&worker->lock); + } + EXPORT_SYMBOL(kthread_delayed_work_timer_fn); + +@@ -947,14 +947,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker, + unsigned long flags; + bool ret = false; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + + if (!queuing_blocked(worker, work)) { + __kthread_queue_delayed_work(worker, dwork, delay); + ret = true; + } + +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); +@@ -990,7 +990,7 @@ void kthread_flush_work(struct kthread_work *work) + if (!worker) + return; + +- spin_lock_irq(&worker->lock); ++ raw_spin_lock_irq(&worker->lock); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -1002,7 +1002,7 @@ void kthread_flush_work(struct kthread_work *work) + else + noop = true; + +- spin_unlock_irq(&worker->lock); ++ raw_spin_unlock_irq(&worker->lock); + + if (!noop) + wait_for_completion(&fwork.done); +@@ -1030,9 +1030,9 @@ static void kthread_cancel_delayed_work_timer(struct kthread_work *work, + * any queuing is blocked by setting the canceling counter. + */ + work->canceling++; +- spin_unlock_irqrestore(&worker->lock, *flags); ++ raw_spin_unlock_irqrestore(&worker->lock, *flags); + del_timer_sync(&dwork->timer); +- spin_lock_irqsave(&worker->lock, *flags); ++ raw_spin_lock_irqsave(&worker->lock, *flags); + work->canceling--; + } + +@@ -1094,7 +1094,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, + unsigned long flags; + int ret = false; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + + /* Do not bother with canceling when never queued. */ + if (!work->worker) +@@ -1123,7 +1123,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, + fast_queue: + __kthread_queue_delayed_work(worker, dwork, delay); + out: +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); +@@ -1137,7 +1137,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) + if (!worker) + goto out; + +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +@@ -1154,13 +1154,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) + * In the meantime, block any queuing by setting the canceling counter. + */ + work->canceling++; +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + kthread_flush_work(work); +- spin_lock_irqsave(&worker->lock, flags); ++ raw_spin_lock_irqsave(&worker->lock, flags); + work->canceling--; + + out_fast: +- spin_unlock_irqrestore(&worker->lock, flags); ++ raw_spin_unlock_irqrestore(&worker->lock, flags); + out: + return ret; + } +-- +2.36.1 + |