OSDN Git Service

crypto: rockchip - use a rk_crypto_info variable instead of lot of indirection
authorCorentin Labbe <clabbe@baylibre.com>
Tue, 27 Sep 2022 07:55:00 +0000 (07:55 +0000)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 28 Oct 2022 04:36:33 +0000 (12:36 +0800)
Instead of using lot of ctx->dev->xx indirections, use an intermediate
variable for rk_crypto_info.
This will help later, when 2 different rk_crypto_info would be used.

Reviewed-by: John Keeping <john@metanate.com>
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/rockchip/rk3288_crypto_ahash.c
drivers/crypto/rockchip/rk3288_crypto_skcipher.c

index fae779d..636dbcd 100644 (file)
@@ -226,9 +226,10 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
        struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+       struct rk_crypto_info *rkc = tctx->dev;
        int ret;
 
-       ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+       ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
        if (ret <= 0)
                return -EINVAL;
 
@@ -243,8 +244,9 @@ static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
        struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+       struct rk_crypto_info *rkc = tctx->dev;
 
-       dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+       dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
        return 0;
 }
 
@@ -257,6 +259,7 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
        struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
        struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
        struct scatterlist *sg = areq->src;
+       struct rk_crypto_info *rkc = tctx->dev;
        int err = 0;
        int i;
        u32 v;
@@ -283,13 +286,13 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
        rk_ahash_reg_init(areq);
 
        while (sg) {
-               reinit_completion(&tctx->dev->complete);
-               tctx->dev->status = 0;
-               crypto_ahash_dma_start(tctx->dev, sg);
-               wait_for_completion_interruptible_timeout(&tctx->dev->complete,
+               reinit_completion(&rkc->complete);
+               rkc->status = 0;
+               crypto_ahash_dma_start(rkc, sg);
+               wait_for_completion_interruptible_timeout(&rkc->complete,
                                                          msecs_to_jiffies(2000));
-               if (!tctx->dev->status) {
-                       dev_err(tctx->dev->dev, "DMA timeout\n");
+               if (!rkc->status) {
+                       dev_err(rkc->dev, "DMA timeout\n");
                        err = -EFAULT;
                        goto theend;
                }
@@ -306,10 +309,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
         * efficiency, and make it response quickly when dma
         * complete.
         */
-       readl_poll_timeout(tctx->dev->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
+       readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
 
        for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
-               v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
+               v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
                put_unaligned_le32(v, areq->result + i * 4);
        }
 
index 3187869..6a1bea9 100644 (file)
@@ -303,6 +303,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
        unsigned int todo;
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+       struct rk_crypto_info *rkc = ctx->dev;
 
        algt->stat_req++;
 
@@ -330,49 +331,49 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
                        scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
                }
                if (sgs == sgd) {
-                       err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+                       err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
                        if (err <= 0) {
                                err = -EINVAL;
                                goto theend_iv;
                        }
                } else {
-                       err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
+                       err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
                        if (err <= 0) {
                                err = -EINVAL;
                                goto theend_iv;
                        }
-                       err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+                       err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
                        if (err <= 0) {
                                err = -EINVAL;
                                goto theend_sgs;
                        }
                }
                err = 0;
-               rk_cipher_hw_init(ctx->dev, areq);
+               rk_cipher_hw_init(rkc, areq);
                if (ivsize) {
                        if (ivsize == DES_BLOCK_SIZE)
-                               memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
+                               memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
                        else
-                               memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
+                               memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
                }
-               reinit_completion(&ctx->dev->complete);
-               ctx->dev->status = 0;
+               reinit_completion(&rkc->complete);
+               rkc->status = 0;
 
                todo = min(sg_dma_len(sgs), len);
                len -= todo;
-               crypto_dma_start(ctx->dev, sgs, sgd, todo / 4);
-               wait_for_completion_interruptible_timeout(&ctx->dev->complete,
+               crypto_dma_start(rkc, sgs, sgd, todo / 4);
+               wait_for_completion_interruptible_timeout(&rkc->complete,
                                                          msecs_to_jiffies(2000));
-               if (!ctx->dev->status) {
-                       dev_err(ctx->dev->dev, "DMA timeout\n");
+               if (!rkc->status) {
+                       dev_err(rkc->dev, "DMA timeout\n");
                        err = -EFAULT;
                        goto theend;
                }
                if (sgs == sgd) {
-                       dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+                       dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
                } else {
-                       dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
-                       dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+                       dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+                       dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
                }
                if (rctx->mode & RK_CRYPTO_DEC) {
                        memcpy(iv, biv, ivsize);
@@ -405,10 +406,10 @@ theend:
 
 theend_sgs:
        if (sgs == sgd) {
-               dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
+               dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
        } else {
-               dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
-               dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
+               dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
+               dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
        }
 theend_iv:
        return err;