crypto: qat - remove dma_free_coherent() for DH
authorGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Mon, 9 May 2022 13:34:13 +0000 (14:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Jul 2022 15:25:28 +0000 (17:25 +0200)
[ Upstream commit 029aa4624a7fe35233bdd3d1354dc7be260380bf ]

The functions qat_dh_compute_value() allocates memory with
dma_alloc_coherent() if the source or the destination buffers are made
of multiple flat buffers or of a size that is not compatible with the
hardware.
This memory is then freed with dma_free_coherent() in the context of a
tasklet invoked to handle the response for the corresponding request.

According to Documentation/core-api/dma-api-howto.rst, the function
dma_free_coherent() cannot be called in an interrupt context.

Replace allocations with dma_alloc_coherent() in the function
qat_dh_compute_value() with kmalloc() + dma_map_single().

Cc: stable@vger.kernel.org
Fixes: c9839143ebbf ("crypto: qat - Add DH support")
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Adam Guerin <adam.guerin@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/crypto/qat/qat_common/qat_asym_algs.c

index b31372b..25bbd22 100644 (file)
@@ -164,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
        err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
 
        if (areq->src) {
-               if (req->src_align)
-                       dma_free_coherent(dev, req->ctx.dh->p_size,
-                                         req->src_align, req->in.dh.in.b);
-               else
-                       dma_unmap_single(dev, req->in.dh.in.b,
-                                        req->ctx.dh->p_size, DMA_TO_DEVICE);
+               dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
+                                DMA_TO_DEVICE);
+               kfree_sensitive(req->src_align);
        }
 
        areq->dst_len = req->ctx.dh->p_size;
        if (req->dst_align) {
                scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
                                         areq->dst_len, 1);
-
-               dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
-                                 req->out.dh.r);
-       } else {
-               dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
-                                DMA_FROM_DEVICE);
+               kfree_sensitive(req->dst_align);
        }
 
+       dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+                        DMA_FROM_DEVICE);
+
        dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
                         DMA_TO_DEVICE);
        dma_unmap_single(dev, req->phy_out,
@@ -231,6 +226,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
        struct icp_qat_fw_pke_request *msg = &qat_req->req;
        int ret;
        int n_input_params = 0;
+       u8 *vaddr;
 
        if (unlikely(!ctx->xa))
                return -EINVAL;
@@ -287,27 +283,24 @@ static int qat_dh_compute_value(struct kpp_request *req)
                 */
                if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
                        qat_req->src_align = NULL;
-                       qat_req->in.dh.in.b = dma_map_single(dev,
-                                                            sg_virt(req->src),
-                                                            req->src_len,
-                                                            DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(dev,
-                                                      qat_req->in.dh.in.b)))
-                               return ret;
-
+                       vaddr = sg_virt(req->src);
                } else {
                        int shift = ctx->p_size - req->src_len;
 
-                       qat_req->src_align = dma_alloc_coherent(dev,
-                                                               ctx->p_size,
-                                                               &qat_req->in.dh.in.b,
-                                                               GFP_KERNEL);
+                       qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
                        if (unlikely(!qat_req->src_align))
                                return ret;
 
                        scatterwalk_map_and_copy(qat_req->src_align + shift,
                                                 req->src, 0, req->src_len, 0);
+
+                       vaddr = qat_req->src_align;
                }
+
+               qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
+                                                    DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
+                       goto unmap_src;
        }
        /*
         * dst can be of any size in valid range, but HW expects it to be the
@@ -318,20 +311,18 @@ static int qat_dh_compute_value(struct kpp_request *req)
         */
        if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
                qat_req->dst_align = NULL;
-               qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
-                                                  req->dst_len,
-                                                  DMA_FROM_DEVICE);
-
-               if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
-                       goto unmap_src;
-
+               vaddr = sg_virt(req->dst);
        } else {
-               qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
-                                                       &qat_req->out.dh.r,
-                                                       GFP_KERNEL);
+               qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
                if (unlikely(!qat_req->dst_align))
                        goto unmap_src;
+
+               vaddr = qat_req->dst_align;
        }
+       qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
+                                          DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+               goto unmap_dst;
 
        qat_req->in.dh.in_tab[n_input_params] = 0;
        qat_req->out.dh.out_tab[1] = 0;
@@ -371,23 +362,17 @@ unmap_in_params:
                                 sizeof(struct qat_dh_input_params),
                                 DMA_TO_DEVICE);
 unmap_dst:
-       if (qat_req->dst_align)
-               dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
-                                 qat_req->out.dh.r);
-       else
-               if (!dma_mapping_error(dev, qat_req->out.dh.r))
-                       dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
-                                        DMA_FROM_DEVICE);
+       if (!dma_mapping_error(dev, qat_req->out.dh.r))
+               dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+                                DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
 unmap_src:
        if (req->src) {
-               if (qat_req->src_align)
-                       dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
-                                         qat_req->in.dh.in.b);
-               else
-                       if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
-                               dma_unmap_single(dev, qat_req->in.dh.in.b,
-                                                ctx->p_size,
-                                                DMA_TO_DEVICE);
+               if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+                       dma_unmap_single(dev, qat_req->in.dh.in.b,
+                                        ctx->p_size,
+                                        DMA_TO_DEVICE);
+               kfree_sensitive(qat_req->src_align);
        }
        return ret;
 }