1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/hmac.h>
13 #include <crypto/md5.h>
14 #include <crypto/sha.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
20 struct mv_cesa_ahash_dma_iter {
21 struct mv_cesa_dma_iter base;
22 struct mv_cesa_sg_dma_iter src;
26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req)
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes + creq->cache_ptr;
33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37 iter->src.op_offset = creq->cache_ptr;
41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43 iter->src.op_offset = 0;
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
49 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
60 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
69 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
83 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
93 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
100 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106 mv_cesa_dma_cleanup(&creq->base);
109 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114 mv_cesa_ahash_dma_cleanup(req);
117 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122 mv_cesa_ahash_dma_last_cleanup(req);
125 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
127 unsigned int index, padlen;
129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
135 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
140 /* Pad out to 56 mod 64 */
141 padlen = mv_cesa_ahash_pad_len(creq);
142 memset(buf + 1, 0, padlen - 1);
145 __le64 bits = cpu_to_le64(creq->len << 3);
147 memcpy(buf + padlen, &bits, sizeof(bits));
149 __be64 bits = cpu_to_be64(creq->len << 3);
151 memcpy(buf + padlen, &bits, sizeof(bits));
157 static void mv_cesa_ahash_std_step(struct ahash_request *req)
159 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
160 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
161 struct mv_cesa_engine *engine = creq->base.engine;
162 struct mv_cesa_op_ctx *op;
163 unsigned int new_cache_ptr = 0;
166 unsigned int digsize;
169 mv_cesa_adjust_op(engine, &creq->op_tmpl);
170 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
173 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
174 for (i = 0; i < digsize / 4; i++)
175 writel_relaxed(creq->state[i],
176 engine->regs + CESA_IVDIG(i));
180 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
181 creq->cache, creq->cache_ptr);
183 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
184 CESA_SA_SRAM_PAYLOAD_SIZE);
186 if (!creq->last_req) {
187 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
188 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
191 if (len - creq->cache_ptr)
192 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
194 CESA_SA_DATA_SRAM_OFFSET +
196 len - creq->cache_ptr,
201 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
203 if (creq->last_req && sreq->offset == req->nbytes &&
204 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
205 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
206 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
207 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
208 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
211 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
212 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
214 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
215 mv_cesa_set_mac_op_total_len(op, creq->len);
217 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
219 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
220 len &= CESA_HASH_BLOCK_SIZE_MSK;
221 new_cache_ptr = 64 - trailerlen;
222 memcpy_fromio(creq->cache,
224 CESA_SA_DATA_SRAM_OFFSET + len,
227 len += mv_cesa_ahash_pad_req(creq,
229 CESA_SA_DATA_SRAM_OFFSET);
232 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
233 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
235 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
239 mv_cesa_set_mac_op_frag_len(op, len);
240 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
242 /* FIXME: only update enc_len field */
243 memcpy_toio(engine->sram, op, sizeof(*op));
245 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
246 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
247 CESA_SA_DESC_CFG_FRAG_MSK);
249 creq->cache_ptr = new_cache_ptr;
251 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
252 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
253 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
254 CESA_SA_CMD_EN_CESA_SA_ACCL0);
255 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
258 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
260 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
261 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
263 if (sreq->offset < (req->nbytes - creq->cache_ptr))
269 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
271 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
272 struct mv_cesa_req *basereq = &creq->base;
274 mv_cesa_dma_prepare(basereq, basereq->engine);
277 static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
279 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
280 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
285 static void mv_cesa_ahash_dma_step(struct ahash_request *req)
287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
288 struct mv_cesa_req *base = &creq->base;
290 /* We must explicitly set the digest state. */
291 if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
292 struct mv_cesa_engine *engine = base->engine;
295 /* Set the hash state in the IVDIG regs. */
296 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
297 writel_relaxed(creq->state[i], engine->regs +
301 mv_cesa_dma_step(base);
304 static void mv_cesa_ahash_step(struct crypto_async_request *req)
306 struct ahash_request *ahashreq = ahash_request_cast(req);
307 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
309 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
310 mv_cesa_ahash_dma_step(ahashreq);
312 mv_cesa_ahash_std_step(ahashreq);
315 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
317 struct ahash_request *ahashreq = ahash_request_cast(req);
318 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
320 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
321 return mv_cesa_dma_process(&creq->base, status);
323 return mv_cesa_ahash_std_process(ahashreq, status);
326 static void mv_cesa_ahash_complete(struct crypto_async_request *req)
328 struct ahash_request *ahashreq = ahash_request_cast(req);
329 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
330 struct mv_cesa_engine *engine = creq->base.engine;
331 unsigned int digsize;
334 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
336 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
337 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
342 * Result is already in the correct endianness when the SA is
345 data = creq->base.chain.last->op->ctx.hash.hash;
346 for (i = 0; i < digsize / 4; i++)
347 creq->state[i] = cpu_to_le32(data[i]);
349 memcpy(ahashreq->result, data, digsize);
351 for (i = 0; i < digsize / 4; i++)
352 creq->state[i] = readl_relaxed(engine->regs +
354 if (creq->last_req) {
356 * Hardware's MD5 digest is in little endian format, but
357 * SHA in big endian format
360 __le32 *result = (void *)ahashreq->result;
362 for (i = 0; i < digsize / 4; i++)
363 result[i] = cpu_to_le32(creq->state[i]);
365 __be32 *result = (void *)ahashreq->result;
367 for (i = 0; i < digsize / 4; i++)
368 result[i] = cpu_to_be32(creq->state[i]);
373 atomic_sub(ahashreq->nbytes, &engine->load);
376 static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
377 struct mv_cesa_engine *engine)
379 struct ahash_request *ahashreq = ahash_request_cast(req);
380 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
382 creq->base.engine = engine;
384 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
385 mv_cesa_ahash_dma_prepare(ahashreq);
387 mv_cesa_ahash_std_prepare(ahashreq);
390 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
392 struct ahash_request *ahashreq = ahash_request_cast(req);
393 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
396 mv_cesa_ahash_last_cleanup(ahashreq);
398 mv_cesa_ahash_cleanup(ahashreq);
401 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
404 ahashreq->nbytes - creq->cache_ptr);
407 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
408 .step = mv_cesa_ahash_step,
409 .process = mv_cesa_ahash_process,
410 .cleanup = mv_cesa_ahash_req_cleanup,
411 .complete = mv_cesa_ahash_complete,
414 static void mv_cesa_ahash_init(struct ahash_request *req,
415 struct mv_cesa_op_ctx *tmpl, bool algo_le)
417 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
419 memset(creq, 0, sizeof(*creq));
420 mv_cesa_update_op_cfg(tmpl,
421 CESA_SA_DESC_CFG_OP_MAC_ONLY |
422 CESA_SA_DESC_CFG_FIRST_FRAG,
423 CESA_SA_DESC_CFG_OP_MSK |
424 CESA_SA_DESC_CFG_FRAG_MSK);
425 mv_cesa_set_mac_op_total_len(tmpl, 0);
426 mv_cesa_set_mac_op_frag_len(tmpl, 0);
427 creq->op_tmpl = *tmpl;
429 creq->algo_le = algo_le;
432 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
434 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
436 ctx->base.ops = &mv_cesa_ahash_req_ops;
438 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
439 sizeof(struct mv_cesa_ahash_req));
443 static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
445 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
448 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
455 sg_pcopy_to_buffer(req->src, creq->src_nents,
456 creq->cache + creq->cache_ptr,
459 creq->cache_ptr += req->nbytes;
465 static struct mv_cesa_op_ctx *
466 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
467 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
470 struct mv_cesa_op_ctx *op;
473 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
477 /* Set the operation block fragment length. */
478 mv_cesa_set_mac_op_frag_len(op, frag_len);
480 /* Append dummy desc to launch operation */
481 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
485 if (mv_cesa_mac_op_is_first_frag(tmpl))
486 mv_cesa_update_op_cfg(tmpl,
487 CESA_SA_DESC_CFG_MID_FRAG,
488 CESA_SA_DESC_CFG_FRAG_MSK);
494 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
495 struct mv_cesa_ahash_req *creq,
498 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
501 if (!creq->cache_ptr)
504 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
508 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
510 return mv_cesa_dma_add_data_transfer(chain,
511 CESA_SA_DATA_SRAM_OFFSET,
512 ahashdreq->cache_dma,
514 CESA_TDMA_DST_IN_SRAM,
518 static struct mv_cesa_op_ctx *
519 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
520 struct mv_cesa_ahash_dma_iter *dma_iter,
521 struct mv_cesa_ahash_req *creq,
522 unsigned int frag_len, gfp_t flags)
524 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
525 unsigned int len, trailerlen, padoff = 0;
526 struct mv_cesa_op_ctx *op;
530 * If the transfer is smaller than our maximum length, and we have
531 * some data outstanding, we can ask the engine to finish the hash.
533 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
534 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
539 mv_cesa_set_mac_op_total_len(op, creq->len);
540 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
541 CESA_SA_DESC_CFG_NOT_FRAG :
542 CESA_SA_DESC_CFG_LAST_FRAG,
543 CESA_SA_DESC_CFG_FRAG_MSK);
545 ret = mv_cesa_dma_add_result_op(chain,
546 CESA_SA_CFG_SRAM_OFFSET,
547 CESA_SA_DATA_SRAM_OFFSET,
548 CESA_TDMA_SRC_IN_SRAM, flags);
550 return ERR_PTR(-ENOMEM);
555 * The request is longer than the engine can handle, or we have
556 * no data outstanding. Manually generate the padding, adding it
557 * as a "mid" fragment.
559 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
563 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
565 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
567 ret = mv_cesa_dma_add_data_transfer(chain,
568 CESA_SA_DATA_SRAM_OFFSET +
570 ahashdreq->padding_dma,
571 len, CESA_TDMA_DST_IN_SRAM,
576 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
581 if (len == trailerlen)
587 ret = mv_cesa_dma_add_data_transfer(chain,
588 CESA_SA_DATA_SRAM_OFFSET,
589 ahashdreq->padding_dma +
592 CESA_TDMA_DST_IN_SRAM,
597 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
601 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
603 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
604 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
605 GFP_KERNEL : GFP_ATOMIC;
606 struct mv_cesa_req *basereq = &creq->base;
607 struct mv_cesa_ahash_dma_iter iter;
608 struct mv_cesa_op_ctx *op = NULL;
609 unsigned int frag_len;
610 bool set_state = false;
614 basereq->chain.first = NULL;
615 basereq->chain.last = NULL;
617 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
620 if (creq->src_nents) {
621 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
629 mv_cesa_tdma_desc_iter_init(&basereq->chain);
630 mv_cesa_ahash_req_iter_init(&iter, req);
633 * Add the cache (left-over data from a previous block) first.
634 * This will never overflow the SRAM size.
636 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
642 * Add all the new data, inserting an operation block and
643 * launch command between each full SRAM block-worth of
644 * data. We intentionally do not add the final op block.
647 ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
653 frag_len = iter.base.op_len;
655 if (!mv_cesa_ahash_req_iter_next_op(&iter))
658 op = mv_cesa_dma_add_frag(&basereq->chain,
667 /* Account for the data that was in the cache. */
668 frag_len = iter.base.op_len;
672 * At this point, frag_len indicates whether we have any data
673 * outstanding which needs an operation. Queue up the final
674 * operation, which depends whether this is the final request.
677 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
680 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
689 * If results are copied via DMA, this means that this
690 * request can be directly processed by the engine,
691 * without partial updates. So we can chain it at the
692 * DMA level with other requests.
694 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
696 if (op && type != CESA_TDMA_RESULT) {
697 /* Add dummy desc to wait for crypto operation end */
698 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
704 creq->cache_ptr = req->nbytes + creq->cache_ptr -
709 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
711 if (type != CESA_TDMA_RESULT)
712 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
716 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
717 * let the step logic know that the IVDIG registers should be
718 * explicitly set before launching a TDMA chain.
720 basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
726 mv_cesa_dma_cleanup(basereq);
727 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
730 mv_cesa_ahash_last_cleanup(req);
735 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
737 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
739 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
740 if (creq->src_nents < 0) {
741 dev_err(cesa_dev->dev, "Invalid number of src SG");
742 return creq->src_nents;
745 *cached = mv_cesa_ahash_cache_req(req);
750 if (cesa_dev->caps->has_tdma)
751 return mv_cesa_ahash_dma_req_init(req);
756 static int mv_cesa_ahash_queue_req(struct ahash_request *req)
758 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
759 struct mv_cesa_engine *engine;
763 ret = mv_cesa_ahash_req_init(req, &cached);
770 engine = mv_cesa_select_engine(req->nbytes);
771 mv_cesa_ahash_prepare(&req->base, engine);
773 ret = mv_cesa_queue_req(&req->base, &creq->base);
775 if (mv_cesa_req_needs_cleanup(&req->base, ret))
776 mv_cesa_ahash_cleanup(req);
781 static int mv_cesa_ahash_update(struct ahash_request *req)
783 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
785 creq->len += req->nbytes;
787 return mv_cesa_ahash_queue_req(req);
790 static int mv_cesa_ahash_final(struct ahash_request *req)
792 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
793 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
795 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
796 creq->last_req = true;
799 return mv_cesa_ahash_queue_req(req);
802 static int mv_cesa_ahash_finup(struct ahash_request *req)
804 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
805 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
807 creq->len += req->nbytes;
808 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
809 creq->last_req = true;
811 return mv_cesa_ahash_queue_req(req);
814 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
815 u64 *len, void *cache)
817 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
818 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
819 unsigned int digsize = crypto_ahash_digestsize(ahash);
820 unsigned int blocksize;
822 blocksize = crypto_ahash_blocksize(ahash);
825 memcpy(hash, creq->state, digsize);
826 memset(cache, 0, blocksize);
827 memcpy(cache, creq->cache, creq->cache_ptr);
832 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
833 u64 len, const void *cache)
835 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
836 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
837 unsigned int digsize = crypto_ahash_digestsize(ahash);
838 unsigned int blocksize;
839 unsigned int cache_ptr;
842 ret = crypto_ahash_init(req);
846 blocksize = crypto_ahash_blocksize(ahash);
847 if (len >= blocksize)
848 mv_cesa_update_op_cfg(&creq->op_tmpl,
849 CESA_SA_DESC_CFG_MID_FRAG,
850 CESA_SA_DESC_CFG_FRAG_MSK);
853 memcpy(creq->state, hash, digsize);
856 cache_ptr = do_div(len, blocksize);
860 memcpy(creq->cache, cache, cache_ptr);
861 creq->cache_ptr = cache_ptr;
866 static int mv_cesa_md5_init(struct ahash_request *req)
868 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
869 struct mv_cesa_op_ctx tmpl = { };
871 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
873 mv_cesa_ahash_init(req, &tmpl, true);
875 creq->state[0] = MD5_H0;
876 creq->state[1] = MD5_H1;
877 creq->state[2] = MD5_H2;
878 creq->state[3] = MD5_H3;
883 static int mv_cesa_md5_export(struct ahash_request *req, void *out)
885 struct md5_state *out_state = out;
887 return mv_cesa_ahash_export(req, out_state->hash,
888 &out_state->byte_count, out_state->block);
891 static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
893 const struct md5_state *in_state = in;
895 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
899 static int mv_cesa_md5_digest(struct ahash_request *req)
903 ret = mv_cesa_md5_init(req);
907 return mv_cesa_ahash_finup(req);
910 struct ahash_alg mv_md5_alg = {
911 .init = mv_cesa_md5_init,
912 .update = mv_cesa_ahash_update,
913 .final = mv_cesa_ahash_final,
914 .finup = mv_cesa_ahash_finup,
915 .digest = mv_cesa_md5_digest,
916 .export = mv_cesa_md5_export,
917 .import = mv_cesa_md5_import,
919 .digestsize = MD5_DIGEST_SIZE,
920 .statesize = sizeof(struct md5_state),
923 .cra_driver_name = "mv-md5",
925 .cra_flags = CRYPTO_ALG_ASYNC |
926 CRYPTO_ALG_ALLOCATES_MEMORY |
927 CRYPTO_ALG_KERN_DRIVER_ONLY,
928 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
929 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
930 .cra_init = mv_cesa_ahash_cra_init,
931 .cra_module = THIS_MODULE,
936 static int mv_cesa_sha1_init(struct ahash_request *req)
938 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
939 struct mv_cesa_op_ctx tmpl = { };
941 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
943 mv_cesa_ahash_init(req, &tmpl, false);
945 creq->state[0] = SHA1_H0;
946 creq->state[1] = SHA1_H1;
947 creq->state[2] = SHA1_H2;
948 creq->state[3] = SHA1_H3;
949 creq->state[4] = SHA1_H4;
954 static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
956 struct sha1_state *out_state = out;
958 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
962 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
964 const struct sha1_state *in_state = in;
966 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
970 static int mv_cesa_sha1_digest(struct ahash_request *req)
974 ret = mv_cesa_sha1_init(req);
978 return mv_cesa_ahash_finup(req);
981 struct ahash_alg mv_sha1_alg = {
982 .init = mv_cesa_sha1_init,
983 .update = mv_cesa_ahash_update,
984 .final = mv_cesa_ahash_final,
985 .finup = mv_cesa_ahash_finup,
986 .digest = mv_cesa_sha1_digest,
987 .export = mv_cesa_sha1_export,
988 .import = mv_cesa_sha1_import,
990 .digestsize = SHA1_DIGEST_SIZE,
991 .statesize = sizeof(struct sha1_state),
994 .cra_driver_name = "mv-sha1",
996 .cra_flags = CRYPTO_ALG_ASYNC |
997 CRYPTO_ALG_ALLOCATES_MEMORY |
998 CRYPTO_ALG_KERN_DRIVER_ONLY,
999 .cra_blocksize = SHA1_BLOCK_SIZE,
1000 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1001 .cra_init = mv_cesa_ahash_cra_init,
1002 .cra_module = THIS_MODULE,
1007 static int mv_cesa_sha256_init(struct ahash_request *req)
1009 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1010 struct mv_cesa_op_ctx tmpl = { };
1012 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1014 mv_cesa_ahash_init(req, &tmpl, false);
1016 creq->state[0] = SHA256_H0;
1017 creq->state[1] = SHA256_H1;
1018 creq->state[2] = SHA256_H2;
1019 creq->state[3] = SHA256_H3;
1020 creq->state[4] = SHA256_H4;
1021 creq->state[5] = SHA256_H5;
1022 creq->state[6] = SHA256_H6;
1023 creq->state[7] = SHA256_H7;
1028 static int mv_cesa_sha256_digest(struct ahash_request *req)
1032 ret = mv_cesa_sha256_init(req);
1036 return mv_cesa_ahash_finup(req);
1039 static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1041 struct sha256_state *out_state = out;
1043 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1047 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1049 const struct sha256_state *in_state = in;
1051 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1055 struct ahash_alg mv_sha256_alg = {
1056 .init = mv_cesa_sha256_init,
1057 .update = mv_cesa_ahash_update,
1058 .final = mv_cesa_ahash_final,
1059 .finup = mv_cesa_ahash_finup,
1060 .digest = mv_cesa_sha256_digest,
1061 .export = mv_cesa_sha256_export,
1062 .import = mv_cesa_sha256_import,
1064 .digestsize = SHA256_DIGEST_SIZE,
1065 .statesize = sizeof(struct sha256_state),
1067 .cra_name = "sha256",
1068 .cra_driver_name = "mv-sha256",
1069 .cra_priority = 300,
1070 .cra_flags = CRYPTO_ALG_ASYNC |
1071 CRYPTO_ALG_ALLOCATES_MEMORY |
1072 CRYPTO_ALG_KERN_DRIVER_ONLY,
1073 .cra_blocksize = SHA256_BLOCK_SIZE,
1074 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1075 .cra_init = mv_cesa_ahash_cra_init,
1076 .cra_module = THIS_MODULE,
1081 struct mv_cesa_ahash_result {
1082 struct completion completion;
1086 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1089 struct mv_cesa_ahash_result *result = req->data;
1091 if (error == -EINPROGRESS)
1094 result->error = error;
1095 complete(&result->completion);
1098 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1099 void *state, unsigned int blocksize)
1101 struct mv_cesa_ahash_result result;
1102 struct scatterlist sg;
1105 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1106 mv_cesa_hmac_ahash_complete, &result);
1107 sg_init_one(&sg, pad, blocksize);
1108 ahash_request_set_crypt(req, &sg, pad, blocksize);
1109 init_completion(&result.completion);
1111 ret = crypto_ahash_init(req);
1115 ret = crypto_ahash_update(req);
1116 if (ret && ret != -EINPROGRESS)
1119 wait_for_completion_interruptible(&result.completion);
1121 return result.error;
1123 ret = crypto_ahash_export(req, state);
1130 static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1131 const u8 *key, unsigned int keylen,
1133 unsigned int blocksize)
1135 struct mv_cesa_ahash_result result;
1136 struct scatterlist sg;
1140 if (keylen <= blocksize) {
1141 memcpy(ipad, key, keylen);
1143 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1148 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1149 mv_cesa_hmac_ahash_complete,
1151 sg_init_one(&sg, keydup, keylen);
1152 ahash_request_set_crypt(req, &sg, ipad, keylen);
1153 init_completion(&result.completion);
1155 ret = crypto_ahash_digest(req);
1156 if (ret == -EINPROGRESS) {
1157 wait_for_completion_interruptible(&result.completion);
1161 /* Set the memory region to 0 to avoid any leak. */
1162 kfree_sensitive(keydup);
1167 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1170 memset(ipad + keylen, 0, blocksize - keylen);
1171 memcpy(opad, ipad, blocksize);
1173 for (i = 0; i < blocksize; i++) {
1174 ipad[i] ^= HMAC_IPAD_VALUE;
1175 opad[i] ^= HMAC_OPAD_VALUE;
1181 static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1182 const u8 *key, unsigned int keylen,
1183 void *istate, void *ostate)
1185 struct ahash_request *req;
1186 struct crypto_ahash *tfm;
1187 unsigned int blocksize;
1192 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1194 return PTR_ERR(tfm);
1196 req = ahash_request_alloc(tfm, GFP_KERNEL);
1202 crypto_ahash_clear_flags(tfm, ~0);
1204 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1206 ipad = kcalloc(2, blocksize, GFP_KERNEL);
1212 opad = ipad + blocksize;
1214 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1218 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1222 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1227 ahash_request_free(req);
1229 crypto_free_ahash(tfm);
1234 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1236 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1238 ctx->base.ops = &mv_cesa_ahash_req_ops;
1240 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1241 sizeof(struct mv_cesa_ahash_req));
1245 static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1247 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1248 struct mv_cesa_op_ctx tmpl = { };
1250 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1251 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1253 mv_cesa_ahash_init(req, &tmpl, true);
1258 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1259 unsigned int keylen)
1261 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1262 struct md5_state istate, ostate;
1265 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1269 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1270 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1272 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1273 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1278 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1282 ret = mv_cesa_ahmac_md5_init(req);
1286 return mv_cesa_ahash_finup(req);
1289 struct ahash_alg mv_ahmac_md5_alg = {
1290 .init = mv_cesa_ahmac_md5_init,
1291 .update = mv_cesa_ahash_update,
1292 .final = mv_cesa_ahash_final,
1293 .finup = mv_cesa_ahash_finup,
1294 .digest = mv_cesa_ahmac_md5_digest,
1295 .setkey = mv_cesa_ahmac_md5_setkey,
1296 .export = mv_cesa_md5_export,
1297 .import = mv_cesa_md5_import,
1299 .digestsize = MD5_DIGEST_SIZE,
1300 .statesize = sizeof(struct md5_state),
1302 .cra_name = "hmac(md5)",
1303 .cra_driver_name = "mv-hmac-md5",
1304 .cra_priority = 300,
1305 .cra_flags = CRYPTO_ALG_ASYNC |
1306 CRYPTO_ALG_ALLOCATES_MEMORY |
1307 CRYPTO_ALG_KERN_DRIVER_ONLY,
1308 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1309 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1310 .cra_init = mv_cesa_ahmac_cra_init,
1311 .cra_module = THIS_MODULE,
1316 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1318 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1319 struct mv_cesa_op_ctx tmpl = { };
1321 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1322 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1324 mv_cesa_ahash_init(req, &tmpl, false);
1329 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1330 unsigned int keylen)
1332 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1333 struct sha1_state istate, ostate;
1336 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1340 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1341 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1343 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1344 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1349 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1353 ret = mv_cesa_ahmac_sha1_init(req);
1357 return mv_cesa_ahash_finup(req);
1360 struct ahash_alg mv_ahmac_sha1_alg = {
1361 .init = mv_cesa_ahmac_sha1_init,
1362 .update = mv_cesa_ahash_update,
1363 .final = mv_cesa_ahash_final,
1364 .finup = mv_cesa_ahash_finup,
1365 .digest = mv_cesa_ahmac_sha1_digest,
1366 .setkey = mv_cesa_ahmac_sha1_setkey,
1367 .export = mv_cesa_sha1_export,
1368 .import = mv_cesa_sha1_import,
1370 .digestsize = SHA1_DIGEST_SIZE,
1371 .statesize = sizeof(struct sha1_state),
1373 .cra_name = "hmac(sha1)",
1374 .cra_driver_name = "mv-hmac-sha1",
1375 .cra_priority = 300,
1376 .cra_flags = CRYPTO_ALG_ASYNC |
1377 CRYPTO_ALG_ALLOCATES_MEMORY |
1378 CRYPTO_ALG_KERN_DRIVER_ONLY,
1379 .cra_blocksize = SHA1_BLOCK_SIZE,
1380 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1381 .cra_init = mv_cesa_ahmac_cra_init,
1382 .cra_module = THIS_MODULE,
1387 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1388 unsigned int keylen)
1390 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1391 struct sha256_state istate, ostate;
1394 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1398 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1399 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1401 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1402 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1407 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1409 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1410 struct mv_cesa_op_ctx tmpl = { };
1412 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1413 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1415 mv_cesa_ahash_init(req, &tmpl, false);
1420 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1424 ret = mv_cesa_ahmac_sha256_init(req);
1428 return mv_cesa_ahash_finup(req);
1431 struct ahash_alg mv_ahmac_sha256_alg = {
1432 .init = mv_cesa_ahmac_sha256_init,
1433 .update = mv_cesa_ahash_update,
1434 .final = mv_cesa_ahash_final,
1435 .finup = mv_cesa_ahash_finup,
1436 .digest = mv_cesa_ahmac_sha256_digest,
1437 .setkey = mv_cesa_ahmac_sha256_setkey,
1438 .export = mv_cesa_sha256_export,
1439 .import = mv_cesa_sha256_import,
1441 .digestsize = SHA256_DIGEST_SIZE,
1442 .statesize = sizeof(struct sha256_state),
1444 .cra_name = "hmac(sha256)",
1445 .cra_driver_name = "mv-hmac-sha256",
1446 .cra_priority = 300,
1447 .cra_flags = CRYPTO_ALG_ASYNC |
1448 CRYPTO_ALG_ALLOCATES_MEMORY |
1449 CRYPTO_ALG_KERN_DRIVER_ONLY,
1450 .cra_blocksize = SHA256_BLOCK_SIZE,
1451 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1452 .cra_init = mv_cesa_ahmac_cra_init,
1453 .cra_module = THIS_MODULE,