diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0119-softirq-Check-preemption-after-reenabling-interrupts.patch | 180 |
1 files changed, 180 insertions, 0 deletions
diff --git a/debian/patches-rt/0119-softirq-Check-preemption-after-reenabling-interrupts.patch b/debian/patches-rt/0119-softirq-Check-preemption-after-reenabling-interrupts.patch new file mode 100644 index 000000000..fbf761018 --- /dev/null +++ b/debian/patches-rt/0119-softirq-Check-preemption-after-reenabling-interrupts.patch @@ -0,0 +1,180 @@ +From 45feb7680d31346f7a6da9926cdb23a37b640f47 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner <tglx@linutronix.de> +Date: Sun, 13 Nov 2011 17:17:09 +0100 +Subject: [PATCH 119/347] softirq: Check preemption after reenabling interrupts +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +raise_softirq_irqoff() disables interrupts and wakes the softirq +daemon, but after reenabling interrupts there is no preemption check, +so the execution of the softirq thread might be delayed arbitrarily. + +In principle we could add that check to local_irq_enable/restore, but +that's overkill as the rasie_softirq_irqoff() sections are the only +ones which show this behaviour. + +Reported-by: Carsten Emde <cbe@osadl.org> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +--- + block/blk-softirq.c | 3 +++ + include/linux/preempt.h | 3 +++ + lib/irq_poll.c | 5 +++++ + net/core/dev.c | 7 +++++++ + 4 files changed, 18 insertions(+) + +diff --git a/block/blk-softirq.c b/block/blk-softirq.c +index 15c1f5e12eb8..1628277885a1 100644 +--- a/block/blk-softirq.c ++++ b/block/blk-softirq.c +@@ -53,6 +53,7 @@ static void trigger_softirq(void *data) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu) + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + return 0; + } +@@ -143,6 +145,7 @@ void __blk_complete_request(struct request *req) + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__blk_complete_request); + +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index b7fe717eb1f4..9984f2b75b73 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -187,8 +187,10 @@ do { \ + + #ifdef CONFIG_PREEMPT_RT_BASE + # define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() preempt_check_resched() + #else + # define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() barrier(); + #endif + + #define preemptible() (preempt_count() == 0 && !irqs_disabled()) +@@ -275,6 +277,7 @@ do { \ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + #define preemptible() 0 + + #define migrate_disable() barrier() +diff --git a/lib/irq_poll.c b/lib/irq_poll.c +index 86a709954f5a..9c069ef83d6d 100644 +--- a/lib/irq_poll.c ++++ b/lib/irq_poll.c +@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop) + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(irq_poll_sched); + +@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop) + local_irq_save(flags); + __irq_poll_complete(iop); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(irq_poll_complete); + +@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) + } + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new +@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu) + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + return 0; + } +diff --git a/net/core/dev.c b/net/core/dev.c +index 42f6ff8b9703..b8e3ae050bcc 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2725,6 +2725,7 @@ static void __netif_reschedule(struct Qdisc *q) + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2787,6 +2788,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -4269,6 +4271,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -5820,12 +5823,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) + sd->rps_ipi_list = NULL; + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Send pending IPI's to kick RPS processing on remote cpus. */ + net_rps_send_ipi(remsd); + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +@@ -5903,6 +5908,7 @@ void __napi_schedule(struct napi_struct *n) + local_irq_save(flags); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -9527,6 +9533,7 @@ static int dev_cpu_dead(unsigned int oldcpu) + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + #ifdef CONFIG_RPS + remsd = oldsd->rps_ipi_list; +-- +2.36.1 + |