OSDN Git Service

crypto: arm/aes-ce - yield the SIMD unit between scatterwalk steps
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 3 Sep 2019 16:43:24 +0000 (09:43 -0700)
committerHerbert Xu <herbert@gondor.apana.org.au>
Mon, 9 Sep 2019 07:35:28 +0000 (17:35 +1000)
Reduce the scope of the kernel_neon_begin/end regions so that the SIMD
unit is released (and thus preemption re-enabled) if the crypto operation
cannot be completed in a single scatterwalk step. This avoids scheduling
blackouts due to preemption being enabled for unbounded periods, resulting
in a more responsive system.

After this change, we can also permit the cipher_walk infrastructure to
sleep, so set the 'atomic' parameter to skcipher_walk_virt() to false as
well.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm/crypto/aes-ce-glue.c
arch/arm/crypto/aes-neonbs-glue.c

index 75d2ff0..486e862 100644 (file)
@@ -177,15 +177,15 @@ static int ecb_encrypt(struct skcipher_request *req)
        unsigned int blocks;
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+               kernel_neon_begin();
                ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key_enc, num_rounds(ctx), blocks);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
        return err;
 }
 
@@ -197,15 +197,15 @@ static int ecb_decrypt(struct skcipher_request *req)
        unsigned int blocks;
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+               kernel_neon_begin();
                ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key_dec, num_rounds(ctx), blocks);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
        return err;
 }
 
@@ -217,16 +217,16 @@ static int cbc_encrypt(struct skcipher_request *req)
        unsigned int blocks;
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+               kernel_neon_begin();
                ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key_enc, num_rounds(ctx), blocks,
                                   walk.iv);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
        return err;
 }
 
@@ -238,16 +238,16 @@ static int cbc_decrypt(struct skcipher_request *req)
        unsigned int blocks;
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+               kernel_neon_begin();
                ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key_dec, num_rounds(ctx), blocks,
                                   walk.iv);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
        return err;
 }
 
@@ -258,13 +258,14 @@ static int ctr_encrypt(struct skcipher_request *req)
        struct skcipher_walk walk;
        int err, blocks;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+               kernel_neon_begin();
                ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key_enc, num_rounds(ctx), blocks,
                                   walk.iv);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        if (walk.nbytes) {
@@ -278,13 +279,13 @@ static int ctr_encrypt(struct skcipher_request *req)
                 */
                blocks = -1;
 
+               kernel_neon_begin();
                ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
                                   blocks, walk.iv);
+               kernel_neon_end();
                crypto_xor_cpy(tdst, tsrc, tail, nbytes);
                err = skcipher_walk_done(&walk, 0);
        }
-       kernel_neon_end();
-
        return err;
 }
 
@@ -319,17 +320,16 @@ static int xts_encrypt(struct skcipher_request *req)
        struct skcipher_walk walk;
        unsigned int blocks;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+               kernel_neon_begin();
                ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key1.key_enc, rounds, blocks, walk.iv,
                                   ctx->key2.key_enc, first);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
-
        return err;
 }
 
@@ -341,17 +341,16 @@ static int xts_decrypt(struct skcipher_request *req)
        struct skcipher_walk walk;
        unsigned int blocks;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+               kernel_neon_begin();
                ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                   ctx->key1.key_dec, rounds, blocks, walk.iv,
                                   ctx->key2.key_enc, first);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
-
        return err;
 }
 
index 45cd981..9000d07 100644 (file)
@@ -90,9 +90,8 @@ static int __ecb_crypt(struct skcipher_request *req,
        struct skcipher_walk walk;
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while (walk.nbytes >= AES_BLOCK_SIZE) {
                unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
 
@@ -100,12 +99,13 @@ static int __ecb_crypt(struct skcipher_request *req,
                        blocks = round_down(blocks,
                                            walk.stride / AES_BLOCK_SIZE);
 
+               kernel_neon_begin();
                fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
                   ctx->rounds, blocks);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk,
                                         walk.nbytes - blocks * AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
 
        return err;
 }
@@ -159,9 +159,8 @@ static int cbc_decrypt(struct skcipher_request *req)
        struct skcipher_walk walk;
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while (walk.nbytes >= AES_BLOCK_SIZE) {
                unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
 
@@ -169,13 +168,14 @@ static int cbc_decrypt(struct skcipher_request *req)
                        blocks = round_down(blocks,
                                            walk.stride / AES_BLOCK_SIZE);
 
+               kernel_neon_begin();
                aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                  ctx->key.rk, ctx->key.rounds, blocks,
                                  walk.iv);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk,
                                         walk.nbytes - blocks * AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
 
        return err;
 }
@@ -223,9 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
        u8 buf[AES_BLOCK_SIZE];
        int err;
 
-       err = skcipher_walk_virt(&walk, req, true);
+       err = skcipher_walk_virt(&walk, req, false);
 
-       kernel_neon_begin();
        while (walk.nbytes > 0) {
                unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
                u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
@@ -236,8 +235,10 @@ static int ctr_encrypt(struct skcipher_request *req)
                        final = NULL;
                }
 
+               kernel_neon_begin();
                aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                  ctx->rk, ctx->rounds, blocks, walk.iv, final);
+               kernel_neon_end();
 
                if (final) {
                        u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
@@ -252,7 +253,6 @@ static int ctr_encrypt(struct skcipher_request *req)
                err = skcipher_walk_done(&walk,
                                         walk.nbytes - blocks * AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
 
        return err;
 }
@@ -329,7 +329,6 @@ static int __xts_crypt(struct skcipher_request *req,
 
        crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
 
-       kernel_neon_begin();
        while (walk.nbytes >= AES_BLOCK_SIZE) {
                unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
 
@@ -337,12 +336,13 @@ static int __xts_crypt(struct skcipher_request *req,
                        blocks = round_down(blocks,
                                            walk.stride / AES_BLOCK_SIZE);
 
+               kernel_neon_begin();
                fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
                   ctx->key.rounds, blocks, walk.iv);
+               kernel_neon_end();
                err = skcipher_walk_done(&walk,
                                         walk.nbytes - blocks * AES_BLOCK_SIZE);
        }
-       kernel_neon_end();
 
        return err;
 }