From: Sebastian Andrzej Siewior Date: Thu, 29 Jan 2015 15:10:08 +0100 Subject: [PATCH 180/353] block/mq: don't complete requests via IPI Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=ddc500f1fab91f0709b7c883b688b12a63366890 The IPI runs in hardirq context and there are sleeping locks. This patch moves the completion into a workqueue. Signed-off-by: Sebastian Andrzej Siewior --- block/blk-core.c | 3 +++ block/blk-mq.c | 23 +++++++++++++++++++++++ include/linux/blk-mq.h | 2 +- include/linux/blkdev.h | 3 +++ 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index 4fbf915d9cb0..24b1d8b195d8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); +#ifdef CONFIG_PREEMPT_RT_FULL + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; diff --git a/block/blk-mq.c b/block/blk-mq.c index ea41d67e4db5..0f8dcb68b8a8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->extra_len = 0; rq->__deadline = 0; +#ifdef CONFIG_PREEMPT_RT_FULL + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; @@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); +#ifdef CONFIG_PREEMPT_RT_FULL + +void __blk_mq_complete_request_remote_work(struct work_struct *work) +{ + struct request *rq = container_of(work, struct request, work); + + rq->q->softirq_done_fn(rq); +} + +#else + static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; rq->q->softirq_done_fn(rq); } +#endif static void __blk_mq_complete_request(struct request *rq) { @@ -575,10 +590,18 @@ static void __blk_mq_complete_request(struct request *rq) shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * We could force QUEUE_FLAG_SAME_FORCE then we would not get in + * here. But we could try to invoke it one the CPU like this. + */ + schedule_work_on(ctx->cpu, &rq->work); +#else rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; smp_call_function_single_async(ctx->cpu, &rq->csd); +#endif } else { rq->q->softirq_done_fn(rq); } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2885dce1ad49..8dbb9ecf9993 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -256,7 +256,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } - +void __blk_mq_complete_request_remote_work(struct work_struct *work); int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56fe682d9beb..39d90bf9b5fa 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -157,6 +157,9 @@ enum mq_rq_state { */ struct request { struct request_queue *q; +#ifdef CONFIG_PREEMPT_RT_FULL + struct work_struct work; +#endif struct blk_mq_ctx *mq_ctx; int cpu;