1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 18:52:00 +0200
Subject: [PATCH 224/351] crypto: cryptd - add a lock instead
preempt_disable/local_bh_disable
Origin: https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit?id=728b5a4f8b9b88f5fc53c480dcaedc107740554c
cryptd has a per-CPU lock which protected with local_bh_disable() and
preempt_disable().
Add an explicit spin_lock to make the locking context more obvious and
visible to lockdep. Since it is a per-CPU lock, there should be no lock
contention on the actual spinlock.
There is a small race-window where we could be migrated to another CPU
after the cpu_queue has been obtain. This is not a problem because the
actual ressource is protected by the spinlock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
crypto/cryptd.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index e0c8e907b086..e079f9a70201 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
struct cryptd_cpu_queue {
struct crypto_queue queue;
struct work_struct work;
+ spinlock_t qlock;
};
struct cryptd_queue {
@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ spin_lock_init(&cpu_queue->qlock);
}
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt;
- cpu = get_cpu();
- cpu_queue = this_cpu_ptr(queue->cpu_queue);
+ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+ spin_lock_bh(&cpu_queue->qlock);
+ cpu = smp_processor_id();
+
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
atomic_inc(refcnt);
out_put_cpu:
- put_cpu();
+ spin_unlock_bh(&cpu_queue->qlock);
return err;
}
@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
- * preempt_disable/enable is used to prevent being preempted by
- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
- * cryptd_enqueue_request() being accessed from software interrupts.
*/
- local_bh_disable();
- preempt_disable();
+ spin_lock_bh(&cpu_queue->qlock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
- local_bh_enable();
+ spin_unlock_bh(&cpu_queue->qlock);
if (!req)
return;
|