OSDN Git Service

crypto: inside-secure - fix hash when length is a multiple of a block
authorAntoine Ténart <antoine.tenart@free-electrons.com>
Tue, 26 Dec 2017 16:21:17 +0000 (17:21 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 5 Jan 2018 07:43:06 +0000 (18:43 +1100)
This patch fixes the hash support in the SafeXcel driver when the update
size is a multiple of a block size, and when a final call is made just
after with a size of 0. In such cases the driver should cache the last
block from the update to avoid handling 0 length data on the final call
(that's a hardware limitation).

Cc: stable@vger.kernel.org
Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel_hash.c

index fbc03d6..122a2a5 100644 (file)
@@ -189,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
        else
                cache_len = queued - areq->nbytes;
 
-       /*
-        * If this is not the last request and the queued data does not fit
-        * into full blocks, cache it for the next send() call.
-        */
-       extra = queued & (crypto_ahash_blocksize(ahash) - 1);
-       if (!req->last_req && extra) {
-               sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
-                                  req->cache_next, extra, areq->nbytes - extra);
+       if (!req->last_req) {
+               /* If this is not the last request and the queued data does not
+                * fit into full blocks, cache it for the next send() call.
+                */
+               extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+               if (!extra)
+                       /* If this is not the last request and the queued data
+                        * is a multiple of a block, cache the last one for now.
+                        */
+                       extra = queued - crypto_ahash_blocksize(ahash);
 
-               queued -= extra;
-               len -= extra;
+               if (extra) {
+                       sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+                                          req->cache_next, extra,
+                                          areq->nbytes - extra);
+
+                       queued -= extra;
+                       len -= extra;
+
+                       if (!queued) {
+                               *commands = 0;
+                               *results = 0;
+                               return 0;
+                       }
+               }
        }
 
        spin_lock_bh(&priv->ring[ring].egress_lock);