summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0282-revert-block.patch
blob: 64822be0fcdb2dedd87fce59a83fd338cd33f1dc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Date: Fri, 20 Sep 2019 17:50:54 -0400
Subject: [PATCH 282/353] revert-block
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=2f49ff6cc5e6ab3714c16d7862f65881092c8f64

Revert swork version of: block: blk-mq: move blk_queue_usage_counter_release() into process context

In order to switch to upstream, we need to revert the swork code.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 block/blk-core.c       | 14 +-------------
 include/linux/blkdev.h |  2 --
 2 files changed, 1 insertion(+), 15 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 3872abb222be..24b1d8b195d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -973,21 +973,12 @@ void blk_queue_exit(struct request_queue *q)
 	percpu_ref_put(&q->q_usage_counter);
 }
 
-static void blk_queue_usage_counter_release_swork(struct swork_event *sev)
-{
-	struct request_queue *q =
-		container_of(sev, struct request_queue, mq_pcpu_wake);
-
-	wake_up_all(&q->mq_freeze_wq);
-}
-
 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
 {
 	struct request_queue *q =
 		container_of(ref, struct request_queue, q_usage_counter);
 
-	if (wq_has_sleeper(&q->mq_freeze_wq))
-		swork_queue(&q->mq_pcpu_wake);
+	wake_up_all(&q->mq_freeze_wq);
 }
 
 static void blk_rq_timed_out_timer(struct timer_list *t)
@@ -1087,7 +1078,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 	queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
 
 	init_waitqueue_head(&q->mq_freeze_wq);
-	INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork);
 
 	/*
 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -3974,8 +3964,6 @@ int __init blk_dev_init(void)
 	if (!kblockd_workqueue)
 		panic("Failed to create kblockd\n");
 
-	BUG_ON(swork_get());
-
 	request_cachep = kmem_cache_create("blkdev_requests",
 			sizeof(struct request), 0, SLAB_PANIC, NULL);
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0313098a428d..39d90bf9b5fa 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,7 +27,6 @@
 #include <linux/percpu-refcount.h>
 #include <linux/scatterlist.h>
 #include <linux/blkzoned.h>
-#include <linux/swork.h>
 
 struct module;
 struct scsi_ioctl_command;
@@ -665,7 +664,6 @@ struct request_queue {
 #endif
 	struct rcu_head		rcu_head;
 	wait_queue_head_t	mq_freeze_wq;
-	struct swork_event	mq_pcpu_wake;
 	struct percpu_ref	q_usage_counter;
 	struct list_head	all_q_node;