summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch')
-rw-r--r--debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch246
1 files changed, 246 insertions, 0 deletions
diff --git a/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch b/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch
new file mode 100644
index 000000000..4c0335469
--- /dev/null
+++ b/debian/patches-rt/0093-work-simple-Simple-work-queue-implemenation.patch
@@ -0,0 +1,246 @@
+From fd404eb0247d5c8b5574eb138485984a7e7568ef Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <daniel.wagner@bmw-carit.de>
+Date: Fri, 11 Jul 2014 15:26:11 +0200
+Subject: [PATCH 093/347] work-simple: Simple work queue implemenation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Provides a framework for enqueuing callbacks from irq context
+PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
+
+Bases on wait-simple.
+
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
+---
+ include/linux/swork.h | 24 ++++++
+ kernel/sched/Makefile | 2 +-
+ kernel/sched/swork.c | 173 ++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 198 insertions(+), 1 deletion(-)
+ create mode 100644 include/linux/swork.h
+ create mode 100644 kernel/sched/swork.c
+
+diff --git a/include/linux/swork.h b/include/linux/swork.h
+new file mode 100644
+index 000000000000..f175fa9a6016
+--- /dev/null
++++ b/include/linux/swork.h
+@@ -0,0 +1,24 @@
++#ifndef _LINUX_SWORK_H
++#define _LINUX_SWORK_H
++
++#include <linux/list.h>
++
++struct swork_event {
++ struct list_head item;
++ unsigned long flags;
++ void (*func)(struct swork_event *);
++};
++
++static inline void INIT_SWORK(struct swork_event *event,
++ void (*func)(struct swork_event *))
++{
++ event->flags = 0;
++ event->func = func;
++}
++
++bool swork_queue(struct swork_event *sev);
++
++int swork_get(void);
++void swork_put(void);
++
++#endif /* _LINUX_SWORK_H */
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 7fe183404c38..2b765aa4e2c4 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -18,7 +18,7 @@ endif
+
+ obj-y += core.o loadavg.o clock.o cputime.o
+ obj-y += idle.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o
++obj-y += wait.o wait_bit.o swait.o swork.o completion.o
+
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
+new file mode 100644
+index 000000000000..a5b89fdacf19
+--- /dev/null
++++ b/kernel/sched/swork.c
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
++ *
++ * Provides a framework for enqueuing callbacks from irq context
++ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
++ */
++
++#include <linux/swait.h>
++#include <linux/swork.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/export.h>
++
++#define SWORK_EVENT_PENDING (1 << 0)
++
++static DEFINE_MUTEX(worker_mutex);
++static struct sworker *glob_worker;
++
++struct sworker {
++ struct list_head events;
++ struct swait_queue_head wq;
++
++ raw_spinlock_t lock;
++
++ struct task_struct *task;
++ int refs;
++};
++
++static bool swork_readable(struct sworker *worker)
++{
++ bool r;
++
++ if (kthread_should_stop())
++ return true;
++
++ raw_spin_lock_irq(&worker->lock);
++ r = !list_empty(&worker->events);
++ raw_spin_unlock_irq(&worker->lock);
++
++ return r;
++}
++
++static int swork_kthread(void *arg)
++{
++ struct sworker *worker = arg;
++
++ for (;;) {
++ swait_event_interruptible_exclusive(worker->wq,
++ swork_readable(worker));
++ if (kthread_should_stop())
++ break;
++
++ raw_spin_lock_irq(&worker->lock);
++ while (!list_empty(&worker->events)) {
++ struct swork_event *sev;
++
++ sev = list_first_entry(&worker->events,
++ struct swork_event, item);
++ list_del(&sev->item);
++ raw_spin_unlock_irq(&worker->lock);
++
++ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
++ &sev->flags));
++ sev->func(sev);
++ raw_spin_lock_irq(&worker->lock);
++ }
++ raw_spin_unlock_irq(&worker->lock);
++ }
++ return 0;
++}
++
++static struct sworker *swork_create(void)
++{
++ struct sworker *worker;
++
++ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
++ if (!worker)
++ return ERR_PTR(-ENOMEM);
++
++ INIT_LIST_HEAD(&worker->events);
++ raw_spin_lock_init(&worker->lock);
++ init_swait_queue_head(&worker->wq);
++
++ worker->task = kthread_run(swork_kthread, worker, "kswork");
++ if (IS_ERR(worker->task)) {
++ kfree(worker);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return worker;
++}
++
++static void swork_destroy(struct sworker *worker)
++{
++ kthread_stop(worker->task);
++
++ WARN_ON(!list_empty(&worker->events));
++ kfree(worker);
++}
++
++/**
++ * swork_queue - queue swork
++ *
++ * Returns %false if @work was already on a queue, %true otherwise.
++ *
++ * The work is queued and processed on a random CPU
++ */
++bool swork_queue(struct swork_event *sev)
++{
++ unsigned long flags;
++
++ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
++ return false;
++
++ raw_spin_lock_irqsave(&glob_worker->lock, flags);
++ list_add_tail(&sev->item, &glob_worker->events);
++ raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
++
++ swake_up_one(&glob_worker->wq);
++ return true;
++}
++EXPORT_SYMBOL_GPL(swork_queue);
++
++/**
++ * swork_get - get an instance of the sworker
++ *
++ * Returns an negative error code if the initialization if the worker did not
++ * work, %0 otherwise.
++ *
++ */
++int swork_get(void)
++{
++ struct sworker *worker;
++
++ mutex_lock(&worker_mutex);
++ if (!glob_worker) {
++ worker = swork_create();
++ if (IS_ERR(worker)) {
++ mutex_unlock(&worker_mutex);
++ return -ENOMEM;
++ }
++
++ glob_worker = worker;
++ }
++
++ glob_worker->refs++;
++ mutex_unlock(&worker_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(swork_get);
++
++/**
++ * swork_put - puts an instance of the sworker
++ *
++ * Will destroy the sworker thread. This function must not be called until all
++ * queued events have been completed.
++ */
++void swork_put(void)
++{
++ mutex_lock(&worker_mutex);
++
++ glob_worker->refs--;
++ if (glob_worker->refs > 0)
++ goto out;
++
++ swork_destroy(glob_worker);
++ glob_worker = NULL;
++out:
++ mutex_unlock(&worker_mutex);
++}
++EXPORT_SYMBOL_GPL(swork_put);
+--
+2.36.1
+