crypto: cryptd - Protect per-CPU resource by disabling BH.
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Wed, 4 May 2022 15:07:36 +0000 (17:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Jun 2022 08:23:11 +0000 (10:23 +0200)
[ Upstream commit 91e8bcd7b4da182e09ea19a2c73167345fe14c98 ]

The access to cryptd_queue::cpu_queue is synchronized by disabling
preemption in cryptd_enqueue_request() and disabling BH in
cryptd_queue_worker(). This implies that access is allowed from BH.

If cryptd_enqueue_request() is invoked from preemptible context _and_
soft interrupt then this can lead to list corruption since
cryptd_enqueue_request() is not protected against access from
soft interrupt.

Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable()
to ensure BH is always disabled.
Remove preempt_disable() from cryptd_queue_worker() since it is not
needed because local_bh_disable() ensures synchronisation.

Fixes: 254eff771441 ("crypto: cryptd - Per-CPU thread implementation...")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
crypto/cryptd.c

index a1bea0f..668095e 100644 (file)
@@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
 };
 
 struct cryptd_queue {
+       /*
+        * Protected by disabling BH to allow enqueueing from softinterrupt and
+        * dequeuing from kworker (cryptd_queue_worker()).
+        */
        struct cryptd_cpu_queue __percpu *cpu_queue;
 };
 
@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
 static int cryptd_enqueue_request(struct cryptd_queue *queue,
                                  struct crypto_async_request *request)
 {
-       int cpu, err;
+       int err;
        struct cryptd_cpu_queue *cpu_queue;
        refcount_t *refcnt;
 
-       cpu = get_cpu();
+       local_bh_disable();
        cpu_queue = this_cpu_ptr(queue->cpu_queue);
        err = crypto_enqueue_request(&cpu_queue->queue, request);
 
        refcnt = crypto_tfm_ctx(request->tfm);
 
        if (err == -ENOSPC)
-               goto out_put_cpu;
+               goto out;
 
-       queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
+       queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
 
        if (!refcount_read(refcnt))
-               goto out_put_cpu;
+               goto out;
 
        refcount_inc(refcnt);
 
-out_put_cpu:
-       put_cpu();
+out:
+       local_bh_enable();
 
        return err;
 }
@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
        cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
        /*
         * Only handle one request at a time to avoid hogging crypto workqueue.
-        * preempt_disable/enable is used to prevent being preempted by
-        * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
-        * cryptd_enqueue_request() being accessed from software interrupts.
         */
        local_bh_disable();
-       preempt_disable();
        backlog = crypto_get_backlog(&cpu_queue->queue);
        req = crypto_dequeue_request(&cpu_queue->queue);
-       preempt_enable();
        local_bh_enable();
 
        if (!req)