diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0224-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch | 85 |
1 files changed, 85 insertions, 0 deletions
diff --git a/debian/patches-rt/0224-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/debian/patches-rt/0224-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch new file mode 100644 index 000000000..85f584f79 --- /dev/null +++ b/debian/patches-rt/0224-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch @@ -0,0 +1,85 @@ +From 7a1d28b6b70837f1554c28799195274823fc1e72 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Thu, 26 Jul 2018 18:52:00 +0200 +Subject: [PATCH 224/347] crypto: cryptd - add a lock instead + preempt_disable/local_bh_disable +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +cryptd has a per-CPU lock which protected with local_bh_disable() and +preempt_disable(). +Add an explicit spin_lock to make the locking context more obvious and +visible to lockdep. Since it is a per-CPU lock, there should be no lock +contention on the actual spinlock. +There is a small race-window where we could be migrated to another CPU +after the cpu_queue has been obtain. This is not a problem because the +actual ressource is protected by the spinlock. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + crypto/cryptd.c | 19 +++++++++---------- + 1 file changed, 9 insertions(+), 10 deletions(-) + +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index e0c8e907b086..e079f9a70201 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); + struct cryptd_cpu_queue { + struct crypto_queue queue; + struct work_struct work; ++ spinlock_t qlock; + }; + + struct cryptd_queue { +@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, + cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); + crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); + INIT_WORK(&cpu_queue->work, cryptd_queue_worker); ++ spin_lock_init(&cpu_queue->qlock); + } + pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); + return 0; +@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, + struct cryptd_cpu_queue *cpu_queue; + atomic_t *refcnt; + +- cpu = get_cpu(); +- cpu_queue = this_cpu_ptr(queue->cpu_queue); ++ cpu_queue = raw_cpu_ptr(queue->cpu_queue); ++ spin_lock_bh(&cpu_queue->qlock); ++ cpu = smp_processor_id(); ++ + err = crypto_enqueue_request(&cpu_queue->queue, request); + + refcnt = crypto_tfm_ctx(request->tfm); +@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, + atomic_inc(refcnt); + + out_put_cpu: +- put_cpu(); ++ spin_unlock_bh(&cpu_queue->qlock); + + return err; + } +@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work) + cpu_queue = container_of(work, struct cryptd_cpu_queue, work); + /* + * Only handle one request at a time to avoid hogging crypto workqueue. +- * preempt_disable/enable is used to prevent being preempted by +- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent +- * cryptd_enqueue_request() being accessed from software interrupts. + */ +- local_bh_disable(); +- preempt_disable(); ++ spin_lock_bh(&cpu_queue->qlock); + backlog = crypto_get_backlog(&cpu_queue->queue); + req = crypto_dequeue_request(&cpu_queue->queue); +- preempt_enable(); +- local_bh_enable(); ++ spin_unlock_bh(&cpu_queue->qlock); + + if (!req) + return; +-- +2.36.1 + |