1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
22 #define CAAM_CRA_PRIORITY 2000
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 SHA512_DIGEST_SIZE * 2)
28 #if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
30 EXPORT_SYMBOL(caam_little_end);
32 EXPORT_SYMBOL(caam_imx);
36 * This is a a cache of buffers, from which the users of CAAM QI driver
37 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
38 * NOTE: A more elegant solution would be to have some headroom in the frames
39 * being processed. This can be added by the dpaa2-eth driver. This would
40 * pose a problem for userspace application processing which cannot
41 * know of this limitation. So for now, this will work.
42 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
44 static struct kmem_cache *qi_cache;
46 struct caam_alg_entry {
54 struct caam_aead_alg {
56 struct caam_alg_entry caam;
60 struct caam_skcipher_alg {
61 struct skcipher_alg skcipher;
62 struct caam_alg_entry caam;
67 * caam_ctx - per-session context
68 * @flc: Flow Contexts array
69 * @key: [authentication key], encryption key
70 * @flc_dma: I/O virtual addresses of the Flow Contexts
71 * @key_dma: I/O virtual address of the key
72 * @dir: DMA direction for mapping key and Flow Contexts
74 * @adata: authentication algorithm details
75 * @cdata: encryption algorithm details
76 * @authsize: authentication tag (a.k.a. ICV / MAC) size
79 struct caam_flc flc[NUM_OP];
80 u8 key[CAAM_MAX_KEY_SIZE];
81 dma_addr_t flc_dma[NUM_OP];
83 enum dma_data_direction dir;
87 unsigned int authsize;
90 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
93 phys_addr_t phys_addr;
95 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
98 return phys_to_virt(phys_addr);
102 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
104 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
105 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
106 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
107 * hosting 16 SG entries.
109 * @flags - flags that would be used for the equivalent kmalloc(..) call
111 * Returns a pointer to a retrieved buffer on success or NULL on failure.
113 static inline void *qi_cache_zalloc(gfp_t flags)
115 return kmem_cache_zalloc(qi_cache, flags);
119 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
121 * @obj - buffer previously allocated by qi_cache_zalloc
123 * No checking is being done, the call is a passthrough call to
124 * kmem_cache_free(...)
126 static inline void qi_cache_free(void *obj)
128 kmem_cache_free(qi_cache, obj);
131 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
133 switch (crypto_tfm_alg_type(areq->tfm)) {
134 case CRYPTO_ALG_TYPE_SKCIPHER:
135 return skcipher_request_ctx(skcipher_request_cast(areq));
136 case CRYPTO_ALG_TYPE_AEAD:
137 return aead_request_ctx(container_of(areq, struct aead_request,
139 case CRYPTO_ALG_TYPE_AHASH:
140 return ahash_request_ctx(ahash_request_cast(areq));
142 return ERR_PTR(-EINVAL);
146 static void caam_unmap(struct device *dev, struct scatterlist *src,
147 struct scatterlist *dst, int src_nents,
148 int dst_nents, dma_addr_t iv_dma, int ivsize,
149 dma_addr_t qm_sg_dma, int qm_sg_bytes)
153 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
156 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
160 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
163 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
166 static int aead_set_sh_desc(struct crypto_aead *aead)
168 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 struct caam_ctx *ctx = crypto_aead_ctx(aead);
171 unsigned int ivsize = crypto_aead_ivsize(aead);
172 struct device *dev = ctx->dev;
173 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
174 struct caam_flc *flc;
178 unsigned int data_len[2];
180 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
181 OP_ALG_AAI_CTR_MOD128);
182 const bool is_rfc3686 = alg->caam.rfc3686;
184 if (!ctx->cdata.keylen || !ctx->authsize)
188 * AES-CTR needs to load IV in CONTEXT1 reg
189 * at an offset of 128bits (16bytes)
190 * CONTEXT1[255:128] = IV
197 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
200 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
201 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
202 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
205 data_len[0] = ctx->adata.keylen_pad;
206 data_len[1] = ctx->cdata.keylen;
208 /* aead_encrypt shared descriptor */
209 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
210 DESC_QI_AEAD_ENC_LEN) +
211 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
212 DESC_JOB_IO_LEN, data_len, &inl_mask,
213 ARRAY_SIZE(data_len)) < 0)
217 ctx->adata.key_virt = ctx->key;
219 ctx->adata.key_dma = ctx->key_dma;
222 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
224 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
226 ctx->adata.key_inline = !!(inl_mask & 1);
227 ctx->cdata.key_inline = !!(inl_mask & 2);
229 flc = &ctx->flc[ENCRYPT];
233 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
234 ivsize, ctx->authsize, is_rfc3686,
235 nonce, ctx1_iv_off, true,
238 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
239 ivsize, ctx->authsize, is_rfc3686, nonce,
240 ctx1_iv_off, true, priv->sec_attr.era);
242 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
243 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
244 sizeof(flc->flc) + desc_bytes(desc),
247 /* aead_decrypt shared descriptor */
248 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
249 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
250 DESC_JOB_IO_LEN, data_len, &inl_mask,
251 ARRAY_SIZE(data_len)) < 0)
255 ctx->adata.key_virt = ctx->key;
257 ctx->adata.key_dma = ctx->key_dma;
260 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
262 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
264 ctx->adata.key_inline = !!(inl_mask & 1);
265 ctx->cdata.key_inline = !!(inl_mask & 2);
267 flc = &ctx->flc[DECRYPT];
269 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
270 ivsize, ctx->authsize, alg->caam.geniv,
271 is_rfc3686, nonce, ctx1_iv_off, true,
273 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
274 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
275 sizeof(flc->flc) + desc_bytes(desc),
281 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
283 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
285 ctx->authsize = authsize;
286 aead_set_sh_desc(authenc);
291 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
294 struct caam_ctx *ctx = crypto_aead_ctx(aead);
295 struct device *dev = ctx->dev;
296 struct crypto_authenc_keys keys;
298 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
301 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
302 keys.authkeylen + keys.enckeylen, keys.enckeylen,
304 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
305 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
307 ctx->adata.keylen = keys.authkeylen;
308 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
311 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
314 memcpy(ctx->key, keys.authkey, keys.authkeylen);
315 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
316 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
317 keys.enckeylen, ctx->dir);
318 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
319 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
320 ctx->adata.keylen_pad + keys.enckeylen, 1);
322 ctx->cdata.keylen = keys.enckeylen;
324 memzero_explicit(&keys, sizeof(keys));
325 return aead_set_sh_desc(aead);
327 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
328 memzero_explicit(&keys, sizeof(keys));
332 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
335 struct crypto_aead *aead = crypto_aead_reqtfm(req);
336 struct caam_request *req_ctx = aead_request_ctx(req);
337 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
338 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
339 struct caam_ctx *ctx = crypto_aead_ctx(aead);
340 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
342 struct device *dev = ctx->dev;
343 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
344 GFP_KERNEL : GFP_ATOMIC;
345 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
346 struct aead_edesc *edesc;
347 dma_addr_t qm_sg_dma, iv_dma = 0;
349 unsigned int authsize = ctx->authsize;
350 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
352 struct dpaa2_sg_entry *sg_table;
354 /* allocate space for base edesc, link tables and IV */
355 edesc = qi_cache_zalloc(GFP_DMA | flags);
356 if (unlikely(!edesc)) {
357 dev_err(dev, "could not allocate extended descriptor\n");
358 return ERR_PTR(-ENOMEM);
361 if (unlikely(req->dst != req->src)) {
362 src_nents = sg_nents_for_len(req->src, req->assoclen +
364 if (unlikely(src_nents < 0)) {
365 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
366 req->assoclen + req->cryptlen);
367 qi_cache_free(edesc);
368 return ERR_PTR(src_nents);
371 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
373 (encrypt ? authsize :
375 if (unlikely(dst_nents < 0)) {
376 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
377 req->assoclen + req->cryptlen +
378 (encrypt ? authsize : (-authsize)));
379 qi_cache_free(edesc);
380 return ERR_PTR(dst_nents);
384 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
386 if (unlikely(!mapped_src_nents)) {
387 dev_err(dev, "unable to map source\n");
388 qi_cache_free(edesc);
389 return ERR_PTR(-ENOMEM);
392 mapped_src_nents = 0;
395 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
397 if (unlikely(!mapped_dst_nents)) {
398 dev_err(dev, "unable to map destination\n");
399 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
400 qi_cache_free(edesc);
401 return ERR_PTR(-ENOMEM);
404 src_nents = sg_nents_for_len(req->src, req->assoclen +
406 (encrypt ? authsize : 0));
407 if (unlikely(src_nents < 0)) {
408 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
409 req->assoclen + req->cryptlen +
410 (encrypt ? authsize : 0));
411 qi_cache_free(edesc);
412 return ERR_PTR(src_nents);
415 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
417 if (unlikely(!mapped_src_nents)) {
418 dev_err(dev, "unable to map source\n");
419 qi_cache_free(edesc);
420 return ERR_PTR(-ENOMEM);
424 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
425 ivsize = crypto_aead_ivsize(aead);
428 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
429 * Input is not contiguous.
431 qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
432 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
433 sg_table = &edesc->sgt[0];
434 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
435 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
436 CAAM_QI_MEMCACHE_SIZE)) {
437 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
438 qm_sg_nents, ivsize);
439 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
441 qi_cache_free(edesc);
442 return ERR_PTR(-ENOMEM);
446 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
448 /* Make sure IV is located in a DMAable area */
449 memcpy(iv, req->iv, ivsize);
451 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
452 if (dma_mapping_error(dev, iv_dma)) {
453 dev_err(dev, "unable to map IV\n");
454 caam_unmap(dev, req->src, req->dst, src_nents,
455 dst_nents, 0, 0, 0, 0);
456 qi_cache_free(edesc);
457 return ERR_PTR(-ENOMEM);
461 edesc->src_nents = src_nents;
462 edesc->dst_nents = dst_nents;
463 edesc->iv_dma = iv_dma;
465 edesc->assoclen = cpu_to_caam32(req->assoclen);
466 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
468 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
469 dev_err(dev, "unable to map assoclen\n");
470 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
471 iv_dma, ivsize, 0, 0);
472 qi_cache_free(edesc);
473 return ERR_PTR(-ENOMEM);
476 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
479 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
482 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
483 qm_sg_index += mapped_src_nents;
485 if (mapped_dst_nents > 1)
486 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
489 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
490 if (dma_mapping_error(dev, qm_sg_dma)) {
491 dev_err(dev, "unable to map S/G table\n");
492 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
493 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
494 iv_dma, ivsize, 0, 0);
495 qi_cache_free(edesc);
496 return ERR_PTR(-ENOMEM);
499 edesc->qm_sg_dma = qm_sg_dma;
500 edesc->qm_sg_bytes = qm_sg_bytes;
502 out_len = req->assoclen + req->cryptlen +
503 (encrypt ? ctx->authsize : (-ctx->authsize));
504 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
506 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
507 dpaa2_fl_set_final(in_fle, true);
508 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
509 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
510 dpaa2_fl_set_len(in_fle, in_len);
512 if (req->dst == req->src) {
513 if (mapped_src_nents == 1) {
514 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
515 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
517 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
518 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
519 (1 + !!ivsize) * sizeof(*sg_table));
521 } else if (mapped_dst_nents == 1) {
522 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
523 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
525 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
526 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
530 dpaa2_fl_set_len(out_fle, out_len);
535 static int gcm_set_sh_desc(struct crypto_aead *aead)
537 struct caam_ctx *ctx = crypto_aead_ctx(aead);
538 struct device *dev = ctx->dev;
539 unsigned int ivsize = crypto_aead_ivsize(aead);
540 struct caam_flc *flc;
542 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
545 if (!ctx->cdata.keylen || !ctx->authsize)
549 * AES GCM encrypt shared descriptor
550 * Job Descriptor and Shared Descriptor
551 * must fit into the 64-word Descriptor h/w Buffer
553 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
554 ctx->cdata.key_inline = true;
555 ctx->cdata.key_virt = ctx->key;
557 ctx->cdata.key_inline = false;
558 ctx->cdata.key_dma = ctx->key_dma;
561 flc = &ctx->flc[ENCRYPT];
563 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
564 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
565 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
566 sizeof(flc->flc) + desc_bytes(desc),
570 * Job Descriptor and Shared Descriptors
571 * must all fit into the 64-word Descriptor h/w Buffer
573 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
574 ctx->cdata.key_inline = true;
575 ctx->cdata.key_virt = ctx->key;
577 ctx->cdata.key_inline = false;
578 ctx->cdata.key_dma = ctx->key_dma;
581 flc = &ctx->flc[DECRYPT];
583 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
584 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
585 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
586 sizeof(flc->flc) + desc_bytes(desc),
592 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
594 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
596 ctx->authsize = authsize;
597 gcm_set_sh_desc(authenc);
602 static int gcm_setkey(struct crypto_aead *aead,
603 const u8 *key, unsigned int keylen)
605 struct caam_ctx *ctx = crypto_aead_ctx(aead);
606 struct device *dev = ctx->dev;
608 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
609 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
611 memcpy(ctx->key, key, keylen);
612 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
613 ctx->cdata.keylen = keylen;
615 return gcm_set_sh_desc(aead);
618 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
620 struct caam_ctx *ctx = crypto_aead_ctx(aead);
621 struct device *dev = ctx->dev;
622 unsigned int ivsize = crypto_aead_ivsize(aead);
623 struct caam_flc *flc;
625 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
628 if (!ctx->cdata.keylen || !ctx->authsize)
631 ctx->cdata.key_virt = ctx->key;
634 * RFC4106 encrypt shared descriptor
635 * Job Descriptor and Shared Descriptor
636 * must fit into the 64-word Descriptor h/w Buffer
638 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
639 ctx->cdata.key_inline = true;
641 ctx->cdata.key_inline = false;
642 ctx->cdata.key_dma = ctx->key_dma;
645 flc = &ctx->flc[ENCRYPT];
647 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
649 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
650 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
651 sizeof(flc->flc) + desc_bytes(desc),
655 * Job Descriptor and Shared Descriptors
656 * must all fit into the 64-word Descriptor h/w Buffer
658 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
659 ctx->cdata.key_inline = true;
661 ctx->cdata.key_inline = false;
662 ctx->cdata.key_dma = ctx->key_dma;
665 flc = &ctx->flc[DECRYPT];
667 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
669 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
670 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
671 sizeof(flc->flc) + desc_bytes(desc),
677 static int rfc4106_setauthsize(struct crypto_aead *authenc,
678 unsigned int authsize)
680 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
682 ctx->authsize = authsize;
683 rfc4106_set_sh_desc(authenc);
688 static int rfc4106_setkey(struct crypto_aead *aead,
689 const u8 *key, unsigned int keylen)
691 struct caam_ctx *ctx = crypto_aead_ctx(aead);
692 struct device *dev = ctx->dev;
697 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
698 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
700 memcpy(ctx->key, key, keylen);
702 * The last four bytes of the key material are used as the salt value
703 * in the nonce. Update the AES key length.
705 ctx->cdata.keylen = keylen - 4;
706 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
709 return rfc4106_set_sh_desc(aead);
712 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
714 struct caam_ctx *ctx = crypto_aead_ctx(aead);
715 struct device *dev = ctx->dev;
716 unsigned int ivsize = crypto_aead_ivsize(aead);
717 struct caam_flc *flc;
719 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
722 if (!ctx->cdata.keylen || !ctx->authsize)
725 ctx->cdata.key_virt = ctx->key;
728 * RFC4543 encrypt shared descriptor
729 * Job Descriptor and Shared Descriptor
730 * must fit into the 64-word Descriptor h/w Buffer
732 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
733 ctx->cdata.key_inline = true;
735 ctx->cdata.key_inline = false;
736 ctx->cdata.key_dma = ctx->key_dma;
739 flc = &ctx->flc[ENCRYPT];
741 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
743 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
744 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
745 sizeof(flc->flc) + desc_bytes(desc),
749 * Job Descriptor and Shared Descriptors
750 * must all fit into the 64-word Descriptor h/w Buffer
752 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
753 ctx->cdata.key_inline = true;
755 ctx->cdata.key_inline = false;
756 ctx->cdata.key_dma = ctx->key_dma;
759 flc = &ctx->flc[DECRYPT];
761 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
763 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
764 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
765 sizeof(flc->flc) + desc_bytes(desc),
771 static int rfc4543_setauthsize(struct crypto_aead *authenc,
772 unsigned int authsize)
774 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
776 ctx->authsize = authsize;
777 rfc4543_set_sh_desc(authenc);
782 static int rfc4543_setkey(struct crypto_aead *aead,
783 const u8 *key, unsigned int keylen)
785 struct caam_ctx *ctx = crypto_aead_ctx(aead);
786 struct device *dev = ctx->dev;
791 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
792 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
794 memcpy(ctx->key, key, keylen);
796 * The last four bytes of the key material are used as the salt value
797 * in the nonce. Update the AES key length.
799 ctx->cdata.keylen = keylen - 4;
800 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
803 return rfc4543_set_sh_desc(aead);
806 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
809 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
810 struct caam_skcipher_alg *alg =
811 container_of(crypto_skcipher_alg(skcipher),
812 struct caam_skcipher_alg, skcipher);
813 struct device *dev = ctx->dev;
814 struct caam_flc *flc;
815 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
818 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
819 OP_ALG_AAI_CTR_MOD128);
820 const bool is_rfc3686 = alg->caam.rfc3686;
822 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
823 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
826 * AES-CTR needs to load IV in CONTEXT1 reg
827 * at an offset of 128bits (16bytes)
828 * CONTEXT1[255:128] = IV
835 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
836 * | *key = {KEY, NONCE}
839 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
840 keylen -= CTR_RFC3686_NONCE_SIZE;
843 ctx->cdata.keylen = keylen;
844 ctx->cdata.key_virt = key;
845 ctx->cdata.key_inline = true;
847 /* skcipher_encrypt shared descriptor */
848 flc = &ctx->flc[ENCRYPT];
850 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
852 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
853 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
854 sizeof(flc->flc) + desc_bytes(desc),
857 /* skcipher_decrypt shared descriptor */
858 flc = &ctx->flc[DECRYPT];
860 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
862 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
863 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
864 sizeof(flc->flc) + desc_bytes(desc),
870 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
873 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
874 struct device *dev = ctx->dev;
875 struct caam_flc *flc;
878 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
879 dev_err(dev, "key size mismatch\n");
880 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
884 ctx->cdata.keylen = keylen;
885 ctx->cdata.key_virt = key;
886 ctx->cdata.key_inline = true;
888 /* xts_skcipher_encrypt shared descriptor */
889 flc = &ctx->flc[ENCRYPT];
891 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
892 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
893 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
894 sizeof(flc->flc) + desc_bytes(desc),
897 /* xts_skcipher_decrypt shared descriptor */
898 flc = &ctx->flc[DECRYPT];
900 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
901 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
902 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
903 sizeof(flc->flc) + desc_bytes(desc),
909 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
911 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
912 struct caam_request *req_ctx = skcipher_request_ctx(req);
913 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
914 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
915 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
916 struct device *dev = ctx->dev;
917 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
918 GFP_KERNEL : GFP_ATOMIC;
919 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
920 struct skcipher_edesc *edesc;
923 int ivsize = crypto_skcipher_ivsize(skcipher);
924 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
925 struct dpaa2_sg_entry *sg_table;
927 src_nents = sg_nents_for_len(req->src, req->cryptlen);
928 if (unlikely(src_nents < 0)) {
929 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
931 return ERR_PTR(src_nents);
934 if (unlikely(req->dst != req->src)) {
935 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
936 if (unlikely(dst_nents < 0)) {
937 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
939 return ERR_PTR(dst_nents);
942 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
944 if (unlikely(!mapped_src_nents)) {
945 dev_err(dev, "unable to map source\n");
946 return ERR_PTR(-ENOMEM);
949 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
951 if (unlikely(!mapped_dst_nents)) {
952 dev_err(dev, "unable to map destination\n");
953 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
954 return ERR_PTR(-ENOMEM);
957 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
959 if (unlikely(!mapped_src_nents)) {
960 dev_err(dev, "unable to map source\n");
961 return ERR_PTR(-ENOMEM);
965 qm_sg_ents = 1 + mapped_src_nents;
966 dst_sg_idx = qm_sg_ents;
968 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
969 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
970 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
971 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
972 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
974 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
976 return ERR_PTR(-ENOMEM);
979 /* allocate space for base edesc, link tables and IV */
980 edesc = qi_cache_zalloc(GFP_DMA | flags);
981 if (unlikely(!edesc)) {
982 dev_err(dev, "could not allocate extended descriptor\n");
983 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
985 return ERR_PTR(-ENOMEM);
988 /* Make sure IV is located in a DMAable area */
989 sg_table = &edesc->sgt[0];
990 iv = (u8 *)(sg_table + qm_sg_ents);
991 memcpy(iv, req->iv, ivsize);
993 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
994 if (dma_mapping_error(dev, iv_dma)) {
995 dev_err(dev, "unable to map IV\n");
996 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
998 qi_cache_free(edesc);
999 return ERR_PTR(-ENOMEM);
1002 edesc->src_nents = src_nents;
1003 edesc->dst_nents = dst_nents;
1004 edesc->iv_dma = iv_dma;
1005 edesc->qm_sg_bytes = qm_sg_bytes;
1007 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1008 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1010 if (mapped_dst_nents > 1)
1011 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1014 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1016 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1017 dev_err(dev, "unable to map S/G table\n");
1018 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1019 iv_dma, ivsize, 0, 0);
1020 qi_cache_free(edesc);
1021 return ERR_PTR(-ENOMEM);
1024 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1025 dpaa2_fl_set_final(in_fle, true);
1026 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1027 dpaa2_fl_set_len(out_fle, req->cryptlen);
1029 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1030 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1032 if (req->src == req->dst) {
1033 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1034 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1036 } else if (mapped_dst_nents > 1) {
1037 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1038 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1041 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1042 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1048 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1049 struct aead_request *req)
1051 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1052 int ivsize = crypto_aead_ivsize(aead);
1054 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1055 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1056 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1059 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1060 struct skcipher_request *req)
1062 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1063 int ivsize = crypto_skcipher_ivsize(skcipher);
1065 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1066 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1069 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1071 struct crypto_async_request *areq = cbk_ctx;
1072 struct aead_request *req = container_of(areq, struct aead_request,
1074 struct caam_request *req_ctx = to_caam_req(areq);
1075 struct aead_edesc *edesc = req_ctx->edesc;
1076 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1077 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1080 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1082 if (unlikely(status)) {
1083 caam_qi2_strstatus(ctx->dev, status);
1087 aead_unmap(ctx->dev, edesc, req);
1088 qi_cache_free(edesc);
1089 aead_request_complete(req, ecode);
1092 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1094 struct crypto_async_request *areq = cbk_ctx;
1095 struct aead_request *req = container_of(areq, struct aead_request,
1097 struct caam_request *req_ctx = to_caam_req(areq);
1098 struct aead_edesc *edesc = req_ctx->edesc;
1099 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1100 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1103 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1105 if (unlikely(status)) {
1106 caam_qi2_strstatus(ctx->dev, status);
1108 * verify hw auth check passed else return -EBADMSG
1110 if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1111 JRSTA_CCBERR_ERRID_ICVCHK)
1117 aead_unmap(ctx->dev, edesc, req);
1118 qi_cache_free(edesc);
1119 aead_request_complete(req, ecode);
1122 static int aead_encrypt(struct aead_request *req)
1124 struct aead_edesc *edesc;
1125 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1126 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1127 struct caam_request *caam_req = aead_request_ctx(req);
1130 /* allocate extended descriptor */
1131 edesc = aead_edesc_alloc(req, true);
1133 return PTR_ERR(edesc);
1135 caam_req->flc = &ctx->flc[ENCRYPT];
1136 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1137 caam_req->cbk = aead_encrypt_done;
1138 caam_req->ctx = &req->base;
1139 caam_req->edesc = edesc;
1140 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1141 if (ret != -EINPROGRESS &&
1142 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1143 aead_unmap(ctx->dev, edesc, req);
1144 qi_cache_free(edesc);
1150 static int aead_decrypt(struct aead_request *req)
1152 struct aead_edesc *edesc;
1153 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1154 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1155 struct caam_request *caam_req = aead_request_ctx(req);
1158 /* allocate extended descriptor */
1159 edesc = aead_edesc_alloc(req, false);
1161 return PTR_ERR(edesc);
1163 caam_req->flc = &ctx->flc[DECRYPT];
1164 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1165 caam_req->cbk = aead_decrypt_done;
1166 caam_req->ctx = &req->base;
1167 caam_req->edesc = edesc;
1168 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1169 if (ret != -EINPROGRESS &&
1170 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1171 aead_unmap(ctx->dev, edesc, req);
1172 qi_cache_free(edesc);
1178 static int ipsec_gcm_encrypt(struct aead_request *req)
1180 if (req->assoclen < 8)
1183 return aead_encrypt(req);
1186 static int ipsec_gcm_decrypt(struct aead_request *req)
1188 if (req->assoclen < 8)
1191 return aead_decrypt(req);
1194 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1196 struct crypto_async_request *areq = cbk_ctx;
1197 struct skcipher_request *req = skcipher_request_cast(areq);
1198 struct caam_request *req_ctx = to_caam_req(areq);
1199 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1200 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1201 struct skcipher_edesc *edesc = req_ctx->edesc;
1203 int ivsize = crypto_skcipher_ivsize(skcipher);
1205 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1207 if (unlikely(status)) {
1208 caam_qi2_strstatus(ctx->dev, status);
1212 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1213 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1214 edesc->src_nents > 1 ? 100 : ivsize, 1);
1215 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1216 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1217 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1219 skcipher_unmap(ctx->dev, edesc, req);
1222 * The crypto API expects us to set the IV (req->iv) to the last
1223 * ciphertext block. This is used e.g. by the CTS mode.
1225 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1228 qi_cache_free(edesc);
1229 skcipher_request_complete(req, ecode);
1232 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1234 struct crypto_async_request *areq = cbk_ctx;
1235 struct skcipher_request *req = skcipher_request_cast(areq);
1236 struct caam_request *req_ctx = to_caam_req(areq);
1237 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1238 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1239 struct skcipher_edesc *edesc = req_ctx->edesc;
1241 int ivsize = crypto_skcipher_ivsize(skcipher);
1243 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1245 if (unlikely(status)) {
1246 caam_qi2_strstatus(ctx->dev, status);
1250 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1251 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1252 edesc->src_nents > 1 ? 100 : ivsize, 1);
1253 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1254 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1255 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1257 skcipher_unmap(ctx->dev, edesc, req);
1258 qi_cache_free(edesc);
1259 skcipher_request_complete(req, ecode);
1262 static int skcipher_encrypt(struct skcipher_request *req)
1264 struct skcipher_edesc *edesc;
1265 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1266 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1267 struct caam_request *caam_req = skcipher_request_ctx(req);
1270 /* allocate extended descriptor */
1271 edesc = skcipher_edesc_alloc(req);
1273 return PTR_ERR(edesc);
1275 caam_req->flc = &ctx->flc[ENCRYPT];
1276 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1277 caam_req->cbk = skcipher_encrypt_done;
1278 caam_req->ctx = &req->base;
1279 caam_req->edesc = edesc;
1280 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1281 if (ret != -EINPROGRESS &&
1282 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1283 skcipher_unmap(ctx->dev, edesc, req);
1284 qi_cache_free(edesc);
1290 static int skcipher_decrypt(struct skcipher_request *req)
1292 struct skcipher_edesc *edesc;
1293 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1294 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1295 struct caam_request *caam_req = skcipher_request_ctx(req);
1296 int ivsize = crypto_skcipher_ivsize(skcipher);
1299 /* allocate extended descriptor */
1300 edesc = skcipher_edesc_alloc(req);
1302 return PTR_ERR(edesc);
1305 * The crypto API expects us to set the IV (req->iv) to the last
1308 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1311 caam_req->flc = &ctx->flc[DECRYPT];
1312 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1313 caam_req->cbk = skcipher_decrypt_done;
1314 caam_req->ctx = &req->base;
1315 caam_req->edesc = edesc;
1316 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1317 if (ret != -EINPROGRESS &&
1318 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1319 skcipher_unmap(ctx->dev, edesc, req);
1320 qi_cache_free(edesc);
1326 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1329 dma_addr_t dma_addr;
1332 /* copy descriptor header template value */
1333 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1334 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1336 ctx->dev = caam->dev;
1337 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1339 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1340 offsetof(struct caam_ctx, flc_dma),
1341 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1342 if (dma_mapping_error(ctx->dev, dma_addr)) {
1343 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1347 for (i = 0; i < NUM_OP; i++)
1348 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1349 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1354 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1356 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1357 struct caam_skcipher_alg *caam_alg =
1358 container_of(alg, typeof(*caam_alg), skcipher);
1360 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1361 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1364 static int caam_cra_init_aead(struct crypto_aead *tfm)
1366 struct aead_alg *alg = crypto_aead_alg(tfm);
1367 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1370 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1371 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1372 alg->setkey == aead_setkey);
1375 static void caam_exit_common(struct caam_ctx *ctx)
1377 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1378 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1379 DMA_ATTR_SKIP_CPU_SYNC);
1382 static void caam_cra_exit(struct crypto_skcipher *tfm)
1384 caam_exit_common(crypto_skcipher_ctx(tfm));
1387 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1389 caam_exit_common(crypto_aead_ctx(tfm));
1392 static struct caam_skcipher_alg driver_algs[] = {
1396 .cra_name = "cbc(aes)",
1397 .cra_driver_name = "cbc-aes-caam-qi2",
1398 .cra_blocksize = AES_BLOCK_SIZE,
1400 .setkey = skcipher_setkey,
1401 .encrypt = skcipher_encrypt,
1402 .decrypt = skcipher_decrypt,
1403 .min_keysize = AES_MIN_KEY_SIZE,
1404 .max_keysize = AES_MAX_KEY_SIZE,
1405 .ivsize = AES_BLOCK_SIZE,
1407 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1412 .cra_name = "cbc(des3_ede)",
1413 .cra_driver_name = "cbc-3des-caam-qi2",
1414 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1416 .setkey = skcipher_setkey,
1417 .encrypt = skcipher_encrypt,
1418 .decrypt = skcipher_decrypt,
1419 .min_keysize = DES3_EDE_KEY_SIZE,
1420 .max_keysize = DES3_EDE_KEY_SIZE,
1421 .ivsize = DES3_EDE_BLOCK_SIZE,
1423 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1428 .cra_name = "cbc(des)",
1429 .cra_driver_name = "cbc-des-caam-qi2",
1430 .cra_blocksize = DES_BLOCK_SIZE,
1432 .setkey = skcipher_setkey,
1433 .encrypt = skcipher_encrypt,
1434 .decrypt = skcipher_decrypt,
1435 .min_keysize = DES_KEY_SIZE,
1436 .max_keysize = DES_KEY_SIZE,
1437 .ivsize = DES_BLOCK_SIZE,
1439 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1444 .cra_name = "ctr(aes)",
1445 .cra_driver_name = "ctr-aes-caam-qi2",
1448 .setkey = skcipher_setkey,
1449 .encrypt = skcipher_encrypt,
1450 .decrypt = skcipher_decrypt,
1451 .min_keysize = AES_MIN_KEY_SIZE,
1452 .max_keysize = AES_MAX_KEY_SIZE,
1453 .ivsize = AES_BLOCK_SIZE,
1454 .chunksize = AES_BLOCK_SIZE,
1456 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1457 OP_ALG_AAI_CTR_MOD128,
1462 .cra_name = "rfc3686(ctr(aes))",
1463 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1466 .setkey = skcipher_setkey,
1467 .encrypt = skcipher_encrypt,
1468 .decrypt = skcipher_decrypt,
1469 .min_keysize = AES_MIN_KEY_SIZE +
1470 CTR_RFC3686_NONCE_SIZE,
1471 .max_keysize = AES_MAX_KEY_SIZE +
1472 CTR_RFC3686_NONCE_SIZE,
1473 .ivsize = CTR_RFC3686_IV_SIZE,
1474 .chunksize = AES_BLOCK_SIZE,
1477 .class1_alg_type = OP_ALG_ALGSEL_AES |
1478 OP_ALG_AAI_CTR_MOD128,
1485 .cra_name = "xts(aes)",
1486 .cra_driver_name = "xts-aes-caam-qi2",
1487 .cra_blocksize = AES_BLOCK_SIZE,
1489 .setkey = xts_skcipher_setkey,
1490 .encrypt = skcipher_encrypt,
1491 .decrypt = skcipher_decrypt,
1492 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1493 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1494 .ivsize = AES_BLOCK_SIZE,
1496 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1500 static struct caam_aead_alg driver_aeads[] = {
1504 .cra_name = "rfc4106(gcm(aes))",
1505 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1508 .setkey = rfc4106_setkey,
1509 .setauthsize = rfc4106_setauthsize,
1510 .encrypt = ipsec_gcm_encrypt,
1511 .decrypt = ipsec_gcm_decrypt,
1513 .maxauthsize = AES_BLOCK_SIZE,
1516 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1522 .cra_name = "rfc4543(gcm(aes))",
1523 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1526 .setkey = rfc4543_setkey,
1527 .setauthsize = rfc4543_setauthsize,
1528 .encrypt = ipsec_gcm_encrypt,
1529 .decrypt = ipsec_gcm_decrypt,
1531 .maxauthsize = AES_BLOCK_SIZE,
1534 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1537 /* Galois Counter Mode */
1541 .cra_name = "gcm(aes)",
1542 .cra_driver_name = "gcm-aes-caam-qi2",
1545 .setkey = gcm_setkey,
1546 .setauthsize = gcm_setauthsize,
1547 .encrypt = aead_encrypt,
1548 .decrypt = aead_decrypt,
1550 .maxauthsize = AES_BLOCK_SIZE,
1553 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1556 /* single-pass ipsec_esp descriptor */
1560 .cra_name = "authenc(hmac(md5),cbc(aes))",
1561 .cra_driver_name = "authenc-hmac-md5-"
1563 .cra_blocksize = AES_BLOCK_SIZE,
1565 .setkey = aead_setkey,
1566 .setauthsize = aead_setauthsize,
1567 .encrypt = aead_encrypt,
1568 .decrypt = aead_decrypt,
1569 .ivsize = AES_BLOCK_SIZE,
1570 .maxauthsize = MD5_DIGEST_SIZE,
1573 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1574 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1575 OP_ALG_AAI_HMAC_PRECOMP,
1581 .cra_name = "echainiv(authenc(hmac(md5),"
1583 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1585 .cra_blocksize = AES_BLOCK_SIZE,
1587 .setkey = aead_setkey,
1588 .setauthsize = aead_setauthsize,
1589 .encrypt = aead_encrypt,
1590 .decrypt = aead_decrypt,
1591 .ivsize = AES_BLOCK_SIZE,
1592 .maxauthsize = MD5_DIGEST_SIZE,
1595 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1596 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1597 OP_ALG_AAI_HMAC_PRECOMP,
1604 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1605 .cra_driver_name = "authenc-hmac-sha1-"
1607 .cra_blocksize = AES_BLOCK_SIZE,
1609 .setkey = aead_setkey,
1610 .setauthsize = aead_setauthsize,
1611 .encrypt = aead_encrypt,
1612 .decrypt = aead_decrypt,
1613 .ivsize = AES_BLOCK_SIZE,
1614 .maxauthsize = SHA1_DIGEST_SIZE,
1617 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1618 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1619 OP_ALG_AAI_HMAC_PRECOMP,
1625 .cra_name = "echainiv(authenc(hmac(sha1),"
1627 .cra_driver_name = "echainiv-authenc-"
1628 "hmac-sha1-cbc-aes-caam-qi2",
1629 .cra_blocksize = AES_BLOCK_SIZE,
1631 .setkey = aead_setkey,
1632 .setauthsize = aead_setauthsize,
1633 .encrypt = aead_encrypt,
1634 .decrypt = aead_decrypt,
1635 .ivsize = AES_BLOCK_SIZE,
1636 .maxauthsize = SHA1_DIGEST_SIZE,
1639 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1640 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1641 OP_ALG_AAI_HMAC_PRECOMP,
1648 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1649 .cra_driver_name = "authenc-hmac-sha224-"
1651 .cra_blocksize = AES_BLOCK_SIZE,
1653 .setkey = aead_setkey,
1654 .setauthsize = aead_setauthsize,
1655 .encrypt = aead_encrypt,
1656 .decrypt = aead_decrypt,
1657 .ivsize = AES_BLOCK_SIZE,
1658 .maxauthsize = SHA224_DIGEST_SIZE,
1661 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1662 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1663 OP_ALG_AAI_HMAC_PRECOMP,
1669 .cra_name = "echainiv(authenc(hmac(sha224),"
1671 .cra_driver_name = "echainiv-authenc-"
1672 "hmac-sha224-cbc-aes-caam-qi2",
1673 .cra_blocksize = AES_BLOCK_SIZE,
1675 .setkey = aead_setkey,
1676 .setauthsize = aead_setauthsize,
1677 .encrypt = aead_encrypt,
1678 .decrypt = aead_decrypt,
1679 .ivsize = AES_BLOCK_SIZE,
1680 .maxauthsize = SHA224_DIGEST_SIZE,
1683 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1684 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1685 OP_ALG_AAI_HMAC_PRECOMP,
1692 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1693 .cra_driver_name = "authenc-hmac-sha256-"
1695 .cra_blocksize = AES_BLOCK_SIZE,
1697 .setkey = aead_setkey,
1698 .setauthsize = aead_setauthsize,
1699 .encrypt = aead_encrypt,
1700 .decrypt = aead_decrypt,
1701 .ivsize = AES_BLOCK_SIZE,
1702 .maxauthsize = SHA256_DIGEST_SIZE,
1705 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1706 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1707 OP_ALG_AAI_HMAC_PRECOMP,
1713 .cra_name = "echainiv(authenc(hmac(sha256),"
1715 .cra_driver_name = "echainiv-authenc-"
1716 "hmac-sha256-cbc-aes-"
1718 .cra_blocksize = AES_BLOCK_SIZE,
1720 .setkey = aead_setkey,
1721 .setauthsize = aead_setauthsize,
1722 .encrypt = aead_encrypt,
1723 .decrypt = aead_decrypt,
1724 .ivsize = AES_BLOCK_SIZE,
1725 .maxauthsize = SHA256_DIGEST_SIZE,
1728 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1729 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1730 OP_ALG_AAI_HMAC_PRECOMP,
1737 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1738 .cra_driver_name = "authenc-hmac-sha384-"
1740 .cra_blocksize = AES_BLOCK_SIZE,
1742 .setkey = aead_setkey,
1743 .setauthsize = aead_setauthsize,
1744 .encrypt = aead_encrypt,
1745 .decrypt = aead_decrypt,
1746 .ivsize = AES_BLOCK_SIZE,
1747 .maxauthsize = SHA384_DIGEST_SIZE,
1750 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1751 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1752 OP_ALG_AAI_HMAC_PRECOMP,
1758 .cra_name = "echainiv(authenc(hmac(sha384),"
1760 .cra_driver_name = "echainiv-authenc-"
1761 "hmac-sha384-cbc-aes-"
1763 .cra_blocksize = AES_BLOCK_SIZE,
1765 .setkey = aead_setkey,
1766 .setauthsize = aead_setauthsize,
1767 .encrypt = aead_encrypt,
1768 .decrypt = aead_decrypt,
1769 .ivsize = AES_BLOCK_SIZE,
1770 .maxauthsize = SHA384_DIGEST_SIZE,
1773 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1774 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1775 OP_ALG_AAI_HMAC_PRECOMP,
1782 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1783 .cra_driver_name = "authenc-hmac-sha512-"
1785 .cra_blocksize = AES_BLOCK_SIZE,
1787 .setkey = aead_setkey,
1788 .setauthsize = aead_setauthsize,
1789 .encrypt = aead_encrypt,
1790 .decrypt = aead_decrypt,
1791 .ivsize = AES_BLOCK_SIZE,
1792 .maxauthsize = SHA512_DIGEST_SIZE,
1795 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1796 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1797 OP_ALG_AAI_HMAC_PRECOMP,
1803 .cra_name = "echainiv(authenc(hmac(sha512),"
1805 .cra_driver_name = "echainiv-authenc-"
1806 "hmac-sha512-cbc-aes-"
1808 .cra_blocksize = AES_BLOCK_SIZE,
1810 .setkey = aead_setkey,
1811 .setauthsize = aead_setauthsize,
1812 .encrypt = aead_encrypt,
1813 .decrypt = aead_decrypt,
1814 .ivsize = AES_BLOCK_SIZE,
1815 .maxauthsize = SHA512_DIGEST_SIZE,
1818 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1819 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1820 OP_ALG_AAI_HMAC_PRECOMP,
1827 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1828 .cra_driver_name = "authenc-hmac-md5-"
1829 "cbc-des3_ede-caam-qi2",
1830 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1832 .setkey = aead_setkey,
1833 .setauthsize = aead_setauthsize,
1834 .encrypt = aead_encrypt,
1835 .decrypt = aead_decrypt,
1836 .ivsize = DES3_EDE_BLOCK_SIZE,
1837 .maxauthsize = MD5_DIGEST_SIZE,
1840 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1841 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1842 OP_ALG_AAI_HMAC_PRECOMP,
1848 .cra_name = "echainiv(authenc(hmac(md5),"
1850 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1851 "cbc-des3_ede-caam-qi2",
1852 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1854 .setkey = aead_setkey,
1855 .setauthsize = aead_setauthsize,
1856 .encrypt = aead_encrypt,
1857 .decrypt = aead_decrypt,
1858 .ivsize = DES3_EDE_BLOCK_SIZE,
1859 .maxauthsize = MD5_DIGEST_SIZE,
1862 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1863 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1864 OP_ALG_AAI_HMAC_PRECOMP,
1871 .cra_name = "authenc(hmac(sha1),"
1873 .cra_driver_name = "authenc-hmac-sha1-"
1874 "cbc-des3_ede-caam-qi2",
1875 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1877 .setkey = aead_setkey,
1878 .setauthsize = aead_setauthsize,
1879 .encrypt = aead_encrypt,
1880 .decrypt = aead_decrypt,
1881 .ivsize = DES3_EDE_BLOCK_SIZE,
1882 .maxauthsize = SHA1_DIGEST_SIZE,
1885 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1886 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1887 OP_ALG_AAI_HMAC_PRECOMP,
1893 .cra_name = "echainiv(authenc(hmac(sha1),"
1895 .cra_driver_name = "echainiv-authenc-"
1897 "cbc-des3_ede-caam-qi2",
1898 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1900 .setkey = aead_setkey,
1901 .setauthsize = aead_setauthsize,
1902 .encrypt = aead_encrypt,
1903 .decrypt = aead_decrypt,
1904 .ivsize = DES3_EDE_BLOCK_SIZE,
1905 .maxauthsize = SHA1_DIGEST_SIZE,
1908 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1909 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1910 OP_ALG_AAI_HMAC_PRECOMP,
1917 .cra_name = "authenc(hmac(sha224),"
1919 .cra_driver_name = "authenc-hmac-sha224-"
1920 "cbc-des3_ede-caam-qi2",
1921 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1923 .setkey = aead_setkey,
1924 .setauthsize = aead_setauthsize,
1925 .encrypt = aead_encrypt,
1926 .decrypt = aead_decrypt,
1927 .ivsize = DES3_EDE_BLOCK_SIZE,
1928 .maxauthsize = SHA224_DIGEST_SIZE,
1931 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1932 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1933 OP_ALG_AAI_HMAC_PRECOMP,
1939 .cra_name = "echainiv(authenc(hmac(sha224),"
1941 .cra_driver_name = "echainiv-authenc-"
1943 "cbc-des3_ede-caam-qi2",
1944 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1946 .setkey = aead_setkey,
1947 .setauthsize = aead_setauthsize,
1948 .encrypt = aead_encrypt,
1949 .decrypt = aead_decrypt,
1950 .ivsize = DES3_EDE_BLOCK_SIZE,
1951 .maxauthsize = SHA224_DIGEST_SIZE,
1954 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1955 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1956 OP_ALG_AAI_HMAC_PRECOMP,
1963 .cra_name = "authenc(hmac(sha256),"
1965 .cra_driver_name = "authenc-hmac-sha256-"
1966 "cbc-des3_ede-caam-qi2",
1967 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1969 .setkey = aead_setkey,
1970 .setauthsize = aead_setauthsize,
1971 .encrypt = aead_encrypt,
1972 .decrypt = aead_decrypt,
1973 .ivsize = DES3_EDE_BLOCK_SIZE,
1974 .maxauthsize = SHA256_DIGEST_SIZE,
1977 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1978 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1979 OP_ALG_AAI_HMAC_PRECOMP,
1985 .cra_name = "echainiv(authenc(hmac(sha256),"
1987 .cra_driver_name = "echainiv-authenc-"
1989 "cbc-des3_ede-caam-qi2",
1990 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1992 .setkey = aead_setkey,
1993 .setauthsize = aead_setauthsize,
1994 .encrypt = aead_encrypt,
1995 .decrypt = aead_decrypt,
1996 .ivsize = DES3_EDE_BLOCK_SIZE,
1997 .maxauthsize = SHA256_DIGEST_SIZE,
2000 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2001 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2002 OP_ALG_AAI_HMAC_PRECOMP,
2009 .cra_name = "authenc(hmac(sha384),"
2011 .cra_driver_name = "authenc-hmac-sha384-"
2012 "cbc-des3_ede-caam-qi2",
2013 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2015 .setkey = aead_setkey,
2016 .setauthsize = aead_setauthsize,
2017 .encrypt = aead_encrypt,
2018 .decrypt = aead_decrypt,
2019 .ivsize = DES3_EDE_BLOCK_SIZE,
2020 .maxauthsize = SHA384_DIGEST_SIZE,
2023 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2024 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2025 OP_ALG_AAI_HMAC_PRECOMP,
2031 .cra_name = "echainiv(authenc(hmac(sha384),"
2033 .cra_driver_name = "echainiv-authenc-"
2035 "cbc-des3_ede-caam-qi2",
2036 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2038 .setkey = aead_setkey,
2039 .setauthsize = aead_setauthsize,
2040 .encrypt = aead_encrypt,
2041 .decrypt = aead_decrypt,
2042 .ivsize = DES3_EDE_BLOCK_SIZE,
2043 .maxauthsize = SHA384_DIGEST_SIZE,
2046 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2047 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2048 OP_ALG_AAI_HMAC_PRECOMP,
2055 .cra_name = "authenc(hmac(sha512),"
2057 .cra_driver_name = "authenc-hmac-sha512-"
2058 "cbc-des3_ede-caam-qi2",
2059 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2061 .setkey = aead_setkey,
2062 .setauthsize = aead_setauthsize,
2063 .encrypt = aead_encrypt,
2064 .decrypt = aead_decrypt,
2065 .ivsize = DES3_EDE_BLOCK_SIZE,
2066 .maxauthsize = SHA512_DIGEST_SIZE,
2069 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2070 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2071 OP_ALG_AAI_HMAC_PRECOMP,
2077 .cra_name = "echainiv(authenc(hmac(sha512),"
2079 .cra_driver_name = "echainiv-authenc-"
2081 "cbc-des3_ede-caam-qi2",
2082 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2084 .setkey = aead_setkey,
2085 .setauthsize = aead_setauthsize,
2086 .encrypt = aead_encrypt,
2087 .decrypt = aead_decrypt,
2088 .ivsize = DES3_EDE_BLOCK_SIZE,
2089 .maxauthsize = SHA512_DIGEST_SIZE,
2092 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2093 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094 OP_ALG_AAI_HMAC_PRECOMP,
2101 .cra_name = "authenc(hmac(md5),cbc(des))",
2102 .cra_driver_name = "authenc-hmac-md5-"
2104 .cra_blocksize = DES_BLOCK_SIZE,
2106 .setkey = aead_setkey,
2107 .setauthsize = aead_setauthsize,
2108 .encrypt = aead_encrypt,
2109 .decrypt = aead_decrypt,
2110 .ivsize = DES_BLOCK_SIZE,
2111 .maxauthsize = MD5_DIGEST_SIZE,
2114 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2115 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2116 OP_ALG_AAI_HMAC_PRECOMP,
2122 .cra_name = "echainiv(authenc(hmac(md5),"
2124 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2126 .cra_blocksize = DES_BLOCK_SIZE,
2128 .setkey = aead_setkey,
2129 .setauthsize = aead_setauthsize,
2130 .encrypt = aead_encrypt,
2131 .decrypt = aead_decrypt,
2132 .ivsize = DES_BLOCK_SIZE,
2133 .maxauthsize = MD5_DIGEST_SIZE,
2136 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2137 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2138 OP_ALG_AAI_HMAC_PRECOMP,
2145 .cra_name = "authenc(hmac(sha1),cbc(des))",
2146 .cra_driver_name = "authenc-hmac-sha1-"
2148 .cra_blocksize = DES_BLOCK_SIZE,
2150 .setkey = aead_setkey,
2151 .setauthsize = aead_setauthsize,
2152 .encrypt = aead_encrypt,
2153 .decrypt = aead_decrypt,
2154 .ivsize = DES_BLOCK_SIZE,
2155 .maxauthsize = SHA1_DIGEST_SIZE,
2158 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2159 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2160 OP_ALG_AAI_HMAC_PRECOMP,
2166 .cra_name = "echainiv(authenc(hmac(sha1),"
2168 .cra_driver_name = "echainiv-authenc-"
2169 "hmac-sha1-cbc-des-caam-qi2",
2170 .cra_blocksize = DES_BLOCK_SIZE,
2172 .setkey = aead_setkey,
2173 .setauthsize = aead_setauthsize,
2174 .encrypt = aead_encrypt,
2175 .decrypt = aead_decrypt,
2176 .ivsize = DES_BLOCK_SIZE,
2177 .maxauthsize = SHA1_DIGEST_SIZE,
2180 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2181 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2182 OP_ALG_AAI_HMAC_PRECOMP,
2189 .cra_name = "authenc(hmac(sha224),cbc(des))",
2190 .cra_driver_name = "authenc-hmac-sha224-"
2192 .cra_blocksize = DES_BLOCK_SIZE,
2194 .setkey = aead_setkey,
2195 .setauthsize = aead_setauthsize,
2196 .encrypt = aead_encrypt,
2197 .decrypt = aead_decrypt,
2198 .ivsize = DES_BLOCK_SIZE,
2199 .maxauthsize = SHA224_DIGEST_SIZE,
2202 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2203 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2204 OP_ALG_AAI_HMAC_PRECOMP,
2210 .cra_name = "echainiv(authenc(hmac(sha224),"
2212 .cra_driver_name = "echainiv-authenc-"
2213 "hmac-sha224-cbc-des-"
2215 .cra_blocksize = DES_BLOCK_SIZE,
2217 .setkey = aead_setkey,
2218 .setauthsize = aead_setauthsize,
2219 .encrypt = aead_encrypt,
2220 .decrypt = aead_decrypt,
2221 .ivsize = DES_BLOCK_SIZE,
2222 .maxauthsize = SHA224_DIGEST_SIZE,
2225 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2226 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2227 OP_ALG_AAI_HMAC_PRECOMP,
2234 .cra_name = "authenc(hmac(sha256),cbc(des))",
2235 .cra_driver_name = "authenc-hmac-sha256-"
2237 .cra_blocksize = DES_BLOCK_SIZE,
2239 .setkey = aead_setkey,
2240 .setauthsize = aead_setauthsize,
2241 .encrypt = aead_encrypt,
2242 .decrypt = aead_decrypt,
2243 .ivsize = DES_BLOCK_SIZE,
2244 .maxauthsize = SHA256_DIGEST_SIZE,
2247 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2248 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2249 OP_ALG_AAI_HMAC_PRECOMP,
2255 .cra_name = "echainiv(authenc(hmac(sha256),"
2257 .cra_driver_name = "echainiv-authenc-"
2258 "hmac-sha256-cbc-desi-"
2260 .cra_blocksize = DES_BLOCK_SIZE,
2262 .setkey = aead_setkey,
2263 .setauthsize = aead_setauthsize,
2264 .encrypt = aead_encrypt,
2265 .decrypt = aead_decrypt,
2266 .ivsize = DES_BLOCK_SIZE,
2267 .maxauthsize = SHA256_DIGEST_SIZE,
2270 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2271 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2272 OP_ALG_AAI_HMAC_PRECOMP,
2279 .cra_name = "authenc(hmac(sha384),cbc(des))",
2280 .cra_driver_name = "authenc-hmac-sha384-"
2282 .cra_blocksize = DES_BLOCK_SIZE,
2284 .setkey = aead_setkey,
2285 .setauthsize = aead_setauthsize,
2286 .encrypt = aead_encrypt,
2287 .decrypt = aead_decrypt,
2288 .ivsize = DES_BLOCK_SIZE,
2289 .maxauthsize = SHA384_DIGEST_SIZE,
2292 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2293 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2294 OP_ALG_AAI_HMAC_PRECOMP,
2300 .cra_name = "echainiv(authenc(hmac(sha384),"
2302 .cra_driver_name = "echainiv-authenc-"
2303 "hmac-sha384-cbc-des-"
2305 .cra_blocksize = DES_BLOCK_SIZE,
2307 .setkey = aead_setkey,
2308 .setauthsize = aead_setauthsize,
2309 .encrypt = aead_encrypt,
2310 .decrypt = aead_decrypt,
2311 .ivsize = DES_BLOCK_SIZE,
2312 .maxauthsize = SHA384_DIGEST_SIZE,
2315 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2316 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2317 OP_ALG_AAI_HMAC_PRECOMP,
2324 .cra_name = "authenc(hmac(sha512),cbc(des))",
2325 .cra_driver_name = "authenc-hmac-sha512-"
2327 .cra_blocksize = DES_BLOCK_SIZE,
2329 .setkey = aead_setkey,
2330 .setauthsize = aead_setauthsize,
2331 .encrypt = aead_encrypt,
2332 .decrypt = aead_decrypt,
2333 .ivsize = DES_BLOCK_SIZE,
2334 .maxauthsize = SHA512_DIGEST_SIZE,
2337 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2338 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2339 OP_ALG_AAI_HMAC_PRECOMP,
2345 .cra_name = "echainiv(authenc(hmac(sha512),"
2347 .cra_driver_name = "echainiv-authenc-"
2348 "hmac-sha512-cbc-des-"
2350 .cra_blocksize = DES_BLOCK_SIZE,
2352 .setkey = aead_setkey,
2353 .setauthsize = aead_setauthsize,
2354 .encrypt = aead_encrypt,
2355 .decrypt = aead_decrypt,
2356 .ivsize = DES_BLOCK_SIZE,
2357 .maxauthsize = SHA512_DIGEST_SIZE,
2360 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2361 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2362 OP_ALG_AAI_HMAC_PRECOMP,
2369 .cra_name = "authenc(hmac(md5),"
2370 "rfc3686(ctr(aes)))",
2371 .cra_driver_name = "authenc-hmac-md5-"
2372 "rfc3686-ctr-aes-caam-qi2",
2375 .setkey = aead_setkey,
2376 .setauthsize = aead_setauthsize,
2377 .encrypt = aead_encrypt,
2378 .decrypt = aead_decrypt,
2379 .ivsize = CTR_RFC3686_IV_SIZE,
2380 .maxauthsize = MD5_DIGEST_SIZE,
2383 .class1_alg_type = OP_ALG_ALGSEL_AES |
2384 OP_ALG_AAI_CTR_MOD128,
2385 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2386 OP_ALG_AAI_HMAC_PRECOMP,
2393 .cra_name = "seqiv(authenc("
2394 "hmac(md5),rfc3686(ctr(aes))))",
2395 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2396 "rfc3686-ctr-aes-caam-qi2",
2399 .setkey = aead_setkey,
2400 .setauthsize = aead_setauthsize,
2401 .encrypt = aead_encrypt,
2402 .decrypt = aead_decrypt,
2403 .ivsize = CTR_RFC3686_IV_SIZE,
2404 .maxauthsize = MD5_DIGEST_SIZE,
2407 .class1_alg_type = OP_ALG_ALGSEL_AES |
2408 OP_ALG_AAI_CTR_MOD128,
2409 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2410 OP_ALG_AAI_HMAC_PRECOMP,
2418 .cra_name = "authenc(hmac(sha1),"
2419 "rfc3686(ctr(aes)))",
2420 .cra_driver_name = "authenc-hmac-sha1-"
2421 "rfc3686-ctr-aes-caam-qi2",
2424 .setkey = aead_setkey,
2425 .setauthsize = aead_setauthsize,
2426 .encrypt = aead_encrypt,
2427 .decrypt = aead_decrypt,
2428 .ivsize = CTR_RFC3686_IV_SIZE,
2429 .maxauthsize = SHA1_DIGEST_SIZE,
2432 .class1_alg_type = OP_ALG_ALGSEL_AES |
2433 OP_ALG_AAI_CTR_MOD128,
2434 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2435 OP_ALG_AAI_HMAC_PRECOMP,
2442 .cra_name = "seqiv(authenc("
2443 "hmac(sha1),rfc3686(ctr(aes))))",
2444 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2445 "rfc3686-ctr-aes-caam-qi2",
2448 .setkey = aead_setkey,
2449 .setauthsize = aead_setauthsize,
2450 .encrypt = aead_encrypt,
2451 .decrypt = aead_decrypt,
2452 .ivsize = CTR_RFC3686_IV_SIZE,
2453 .maxauthsize = SHA1_DIGEST_SIZE,
2456 .class1_alg_type = OP_ALG_ALGSEL_AES |
2457 OP_ALG_AAI_CTR_MOD128,
2458 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2459 OP_ALG_AAI_HMAC_PRECOMP,
2467 .cra_name = "authenc(hmac(sha224),"
2468 "rfc3686(ctr(aes)))",
2469 .cra_driver_name = "authenc-hmac-sha224-"
2470 "rfc3686-ctr-aes-caam-qi2",
2473 .setkey = aead_setkey,
2474 .setauthsize = aead_setauthsize,
2475 .encrypt = aead_encrypt,
2476 .decrypt = aead_decrypt,
2477 .ivsize = CTR_RFC3686_IV_SIZE,
2478 .maxauthsize = SHA224_DIGEST_SIZE,
2481 .class1_alg_type = OP_ALG_ALGSEL_AES |
2482 OP_ALG_AAI_CTR_MOD128,
2483 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2484 OP_ALG_AAI_HMAC_PRECOMP,
2491 .cra_name = "seqiv(authenc("
2492 "hmac(sha224),rfc3686(ctr(aes))))",
2493 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2494 "rfc3686-ctr-aes-caam-qi2",
2497 .setkey = aead_setkey,
2498 .setauthsize = aead_setauthsize,
2499 .encrypt = aead_encrypt,
2500 .decrypt = aead_decrypt,
2501 .ivsize = CTR_RFC3686_IV_SIZE,
2502 .maxauthsize = SHA224_DIGEST_SIZE,
2505 .class1_alg_type = OP_ALG_ALGSEL_AES |
2506 OP_ALG_AAI_CTR_MOD128,
2507 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2508 OP_ALG_AAI_HMAC_PRECOMP,
2516 .cra_name = "authenc(hmac(sha256),"
2517 "rfc3686(ctr(aes)))",
2518 .cra_driver_name = "authenc-hmac-sha256-"
2519 "rfc3686-ctr-aes-caam-qi2",
2522 .setkey = aead_setkey,
2523 .setauthsize = aead_setauthsize,
2524 .encrypt = aead_encrypt,
2525 .decrypt = aead_decrypt,
2526 .ivsize = CTR_RFC3686_IV_SIZE,
2527 .maxauthsize = SHA256_DIGEST_SIZE,
2530 .class1_alg_type = OP_ALG_ALGSEL_AES |
2531 OP_ALG_AAI_CTR_MOD128,
2532 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2533 OP_ALG_AAI_HMAC_PRECOMP,
2540 .cra_name = "seqiv(authenc(hmac(sha256),"
2541 "rfc3686(ctr(aes))))",
2542 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2543 "rfc3686-ctr-aes-caam-qi2",
2546 .setkey = aead_setkey,
2547 .setauthsize = aead_setauthsize,
2548 .encrypt = aead_encrypt,
2549 .decrypt = aead_decrypt,
2550 .ivsize = CTR_RFC3686_IV_SIZE,
2551 .maxauthsize = SHA256_DIGEST_SIZE,
2554 .class1_alg_type = OP_ALG_ALGSEL_AES |
2555 OP_ALG_AAI_CTR_MOD128,
2556 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2557 OP_ALG_AAI_HMAC_PRECOMP,
2565 .cra_name = "authenc(hmac(sha384),"
2566 "rfc3686(ctr(aes)))",
2567 .cra_driver_name = "authenc-hmac-sha384-"
2568 "rfc3686-ctr-aes-caam-qi2",
2571 .setkey = aead_setkey,
2572 .setauthsize = aead_setauthsize,
2573 .encrypt = aead_encrypt,
2574 .decrypt = aead_decrypt,
2575 .ivsize = CTR_RFC3686_IV_SIZE,
2576 .maxauthsize = SHA384_DIGEST_SIZE,
2579 .class1_alg_type = OP_ALG_ALGSEL_AES |
2580 OP_ALG_AAI_CTR_MOD128,
2581 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2582 OP_ALG_AAI_HMAC_PRECOMP,
2589 .cra_name = "seqiv(authenc(hmac(sha384),"
2590 "rfc3686(ctr(aes))))",
2591 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2592 "rfc3686-ctr-aes-caam-qi2",
2595 .setkey = aead_setkey,
2596 .setauthsize = aead_setauthsize,
2597 .encrypt = aead_encrypt,
2598 .decrypt = aead_decrypt,
2599 .ivsize = CTR_RFC3686_IV_SIZE,
2600 .maxauthsize = SHA384_DIGEST_SIZE,
2603 .class1_alg_type = OP_ALG_ALGSEL_AES |
2604 OP_ALG_AAI_CTR_MOD128,
2605 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2606 OP_ALG_AAI_HMAC_PRECOMP,
2614 .cra_name = "authenc(hmac(sha512),"
2615 "rfc3686(ctr(aes)))",
2616 .cra_driver_name = "authenc-hmac-sha512-"
2617 "rfc3686-ctr-aes-caam-qi2",
2620 .setkey = aead_setkey,
2621 .setauthsize = aead_setauthsize,
2622 .encrypt = aead_encrypt,
2623 .decrypt = aead_decrypt,
2624 .ivsize = CTR_RFC3686_IV_SIZE,
2625 .maxauthsize = SHA512_DIGEST_SIZE,
2628 .class1_alg_type = OP_ALG_ALGSEL_AES |
2629 OP_ALG_AAI_CTR_MOD128,
2630 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2631 OP_ALG_AAI_HMAC_PRECOMP,
2638 .cra_name = "seqiv(authenc(hmac(sha512),"
2639 "rfc3686(ctr(aes))))",
2640 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2641 "rfc3686-ctr-aes-caam-qi2",
2644 .setkey = aead_setkey,
2645 .setauthsize = aead_setauthsize,
2646 .encrypt = aead_encrypt,
2647 .decrypt = aead_decrypt,
2648 .ivsize = CTR_RFC3686_IV_SIZE,
2649 .maxauthsize = SHA512_DIGEST_SIZE,
2652 .class1_alg_type = OP_ALG_ALGSEL_AES |
2653 OP_ALG_AAI_CTR_MOD128,
2654 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2655 OP_ALG_AAI_HMAC_PRECOMP,
2662 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2664 struct skcipher_alg *alg = &t_alg->skcipher;
2666 alg->base.cra_module = THIS_MODULE;
2667 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2668 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2669 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2671 alg->init = caam_cra_init_skcipher;
2672 alg->exit = caam_cra_exit;
2675 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2677 struct aead_alg *alg = &t_alg->aead;
2679 alg->base.cra_module = THIS_MODULE;
2680 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2681 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2682 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2684 alg->init = caam_cra_init_aead;
2685 alg->exit = caam_cra_exit_aead;
2688 /* max hash key is max split key size */
2689 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2691 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2693 /* caam context sizes for hashes: running digest + 8 */
2694 #define HASH_MSG_LEN 8
2695 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2706 * caam_hash_ctx - ahash per-session context
2707 * @flc: Flow Contexts array
2708 * @flc_dma: I/O virtual addresses of the Flow Contexts
2709 * @dev: dpseci device
2710 * @ctx_len: size of Context Register
2711 * @adata: hashing algorithm details
2713 struct caam_hash_ctx {
2714 struct caam_flc flc[HASH_NUM_OP];
2715 dma_addr_t flc_dma[HASH_NUM_OP];
2718 struct alginfo adata;
2722 struct caam_hash_state {
2723 struct caam_request caam_req;
2726 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2728 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2730 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2731 int (*update)(struct ahash_request *req);
2732 int (*final)(struct ahash_request *req);
2733 int (*finup)(struct ahash_request *req);
2737 struct caam_export_state {
2738 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2739 u8 caam_ctx[MAX_CTX_LEN];
2741 int (*update)(struct ahash_request *req);
2742 int (*final)(struct ahash_request *req);
2743 int (*finup)(struct ahash_request *req);
2746 static inline void switch_buf(struct caam_hash_state *state)
2748 state->current_buf ^= 1;
2751 static inline u8 *current_buf(struct caam_hash_state *state)
2753 return state->current_buf ? state->buf_1 : state->buf_0;
2756 static inline u8 *alt_buf(struct caam_hash_state *state)
2758 return state->current_buf ? state->buf_0 : state->buf_1;
2761 static inline int *current_buflen(struct caam_hash_state *state)
2763 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2766 static inline int *alt_buflen(struct caam_hash_state *state)
2768 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2771 /* Map current buffer in state (if length > 0) and put it in link table */
2772 static inline int buf_map_to_qm_sg(struct device *dev,
2773 struct dpaa2_sg_entry *qm_sg,
2774 struct caam_hash_state *state)
2776 int buflen = *current_buflen(state);
2781 state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2783 if (dma_mapping_error(dev, state->buf_dma)) {
2784 dev_err(dev, "unable to map buf\n");
2789 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2794 /* Map state->caam_ctx, and add it to link table */
2795 static inline int ctx_map_to_qm_sg(struct device *dev,
2796 struct caam_hash_state *state, int ctx_len,
2797 struct dpaa2_sg_entry *qm_sg, u32 flag)
2799 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2800 if (dma_mapping_error(dev, state->ctx_dma)) {
2801 dev_err(dev, "unable to map ctx\n");
2806 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2811 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2813 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2814 int digestsize = crypto_ahash_digestsize(ahash);
2815 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2816 struct caam_flc *flc;
2819 /* ahash_update shared descriptor */
2820 flc = &ctx->flc[UPDATE];
2821 desc = flc->sh_desc;
2822 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2823 ctx->ctx_len, true, priv->sec_attr.era);
2824 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2825 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2826 desc_bytes(desc), DMA_BIDIRECTIONAL);
2827 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
2828 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2831 /* ahash_update_first shared descriptor */
2832 flc = &ctx->flc[UPDATE_FIRST];
2833 desc = flc->sh_desc;
2834 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2835 ctx->ctx_len, false, priv->sec_attr.era);
2836 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2837 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
2838 desc_bytes(desc), DMA_BIDIRECTIONAL);
2839 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
2840 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2843 /* ahash_final shared descriptor */
2844 flc = &ctx->flc[FINALIZE];
2845 desc = flc->sh_desc;
2846 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2847 ctx->ctx_len, true, priv->sec_attr.era);
2848 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2849 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
2850 desc_bytes(desc), DMA_BIDIRECTIONAL);
2851 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
2852 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2855 /* ahash_digest shared descriptor */
2856 flc = &ctx->flc[DIGEST];
2857 desc = flc->sh_desc;
2858 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2859 ctx->ctx_len, false, priv->sec_attr.era);
2860 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2861 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
2862 desc_bytes(desc), DMA_BIDIRECTIONAL);
2863 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
2864 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2870 struct split_key_sh_result {
2871 struct completion completion;
2876 static void split_key_sh_done(void *cbk_ctx, u32 err)
2878 struct split_key_sh_result *res = cbk_ctx;
2880 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2883 caam_qi2_strstatus(res->dev, err);
2886 complete(&res->completion);
2889 /* Digest hash size if it is too large */
2890 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
2891 u32 *keylen, u8 *key_out, u32 digestsize)
2893 struct caam_request *req_ctx;
2895 struct split_key_sh_result result;
2896 dma_addr_t src_dma, dst_dma;
2897 struct caam_flc *flc;
2900 struct dpaa2_fl_entry *in_fle, *out_fle;
2902 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
2906 in_fle = &req_ctx->fd_flt[1];
2907 out_fle = &req_ctx->fd_flt[0];
2909 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
2913 src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
2915 if (dma_mapping_error(ctx->dev, src_dma)) {
2916 dev_err(ctx->dev, "unable to map key input memory\n");
2919 dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
2921 if (dma_mapping_error(ctx->dev, dst_dma)) {
2922 dev_err(ctx->dev, "unable to map key output memory\n");
2926 desc = flc->sh_desc;
2928 init_sh_desc(desc, 0);
2930 /* descriptor to perform unkeyed hash on key_in */
2931 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
2932 OP_ALG_AS_INITFINAL);
2933 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
2934 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
2935 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
2936 LDST_SRCDST_BYTE_CONTEXT);
2938 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2939 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
2940 desc_bytes(desc), DMA_TO_DEVICE);
2941 if (dma_mapping_error(ctx->dev, flc_dma)) {
2942 dev_err(ctx->dev, "unable to map shared descriptor\n");
2946 dpaa2_fl_set_final(in_fle, true);
2947 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
2948 dpaa2_fl_set_addr(in_fle, src_dma);
2949 dpaa2_fl_set_len(in_fle, *keylen);
2950 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
2951 dpaa2_fl_set_addr(out_fle, dst_dma);
2952 dpaa2_fl_set_len(out_fle, digestsize);
2954 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
2955 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
2956 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
2957 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2961 init_completion(&result.completion);
2962 result.dev = ctx->dev;
2965 req_ctx->flc_dma = flc_dma;
2966 req_ctx->cbk = split_key_sh_done;
2967 req_ctx->ctx = &result;
2969 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
2970 if (ret == -EINPROGRESS) {
2972 wait_for_completion(&result.completion);
2974 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
2975 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
2979 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
2982 dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
2984 dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
2990 *keylen = digestsize;
2995 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2996 unsigned int keylen)
2998 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2999 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3000 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3002 u8 *hashed_key = NULL;
3004 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3006 if (keylen > blocksize) {
3007 hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
3008 GFP_KERNEL | GFP_DMA);
3011 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
3018 ctx->adata.keylen = keylen;
3019 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3020 OP_ALG_ALGSEL_MASK);
3021 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3024 ctx->adata.key_virt = key;
3025 ctx->adata.key_inline = true;
3027 ret = ahash_set_sh_desc(ahash);
3032 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3036 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3037 struct ahash_request *req, int dst_len)
3039 struct caam_hash_state *state = ahash_request_ctx(req);
3041 if (edesc->src_nents)
3042 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3044 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3046 if (edesc->qm_sg_bytes)
3047 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3050 if (state->buf_dma) {
3051 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3057 static inline void ahash_unmap_ctx(struct device *dev,
3058 struct ahash_edesc *edesc,
3059 struct ahash_request *req, int dst_len,
3062 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3063 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3064 struct caam_hash_state *state = ahash_request_ctx(req);
3066 if (state->ctx_dma) {
3067 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
3070 ahash_unmap(dev, edesc, req, dst_len);
3073 static void ahash_done(void *cbk_ctx, u32 status)
3075 struct crypto_async_request *areq = cbk_ctx;
3076 struct ahash_request *req = ahash_request_cast(areq);
3077 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3078 struct caam_hash_state *state = ahash_request_ctx(req);
3079 struct ahash_edesc *edesc = state->caam_req.edesc;
3080 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3081 int digestsize = crypto_ahash_digestsize(ahash);
3084 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3086 if (unlikely(status)) {
3087 caam_qi2_strstatus(ctx->dev, status);
3091 ahash_unmap(ctx->dev, edesc, req, digestsize);
3092 qi_cache_free(edesc);
3094 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3095 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3098 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3099 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3102 req->base.complete(&req->base, ecode);
3105 static void ahash_done_bi(void *cbk_ctx, u32 status)
3107 struct crypto_async_request *areq = cbk_ctx;
3108 struct ahash_request *req = ahash_request_cast(areq);
3109 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3110 struct caam_hash_state *state = ahash_request_ctx(req);
3111 struct ahash_edesc *edesc = state->caam_req.edesc;
3112 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3115 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3117 if (unlikely(status)) {
3118 caam_qi2_strstatus(ctx->dev, status);
3122 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3124 qi_cache_free(edesc);
3126 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3127 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3130 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3131 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3132 crypto_ahash_digestsize(ahash), 1);
3134 req->base.complete(&req->base, ecode);
3137 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3139 struct crypto_async_request *areq = cbk_ctx;
3140 struct ahash_request *req = ahash_request_cast(areq);
3141 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3142 struct caam_hash_state *state = ahash_request_ctx(req);
3143 struct ahash_edesc *edesc = state->caam_req.edesc;
3144 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3145 int digestsize = crypto_ahash_digestsize(ahash);
3148 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3150 if (unlikely(status)) {
3151 caam_qi2_strstatus(ctx->dev, status);
3155 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
3156 qi_cache_free(edesc);
3158 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3159 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3162 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3163 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3166 req->base.complete(&req->base, ecode);
3169 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3171 struct crypto_async_request *areq = cbk_ctx;
3172 struct ahash_request *req = ahash_request_cast(areq);
3173 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3174 struct caam_hash_state *state = ahash_request_ctx(req);
3175 struct ahash_edesc *edesc = state->caam_req.edesc;
3176 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3179 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3181 if (unlikely(status)) {
3182 caam_qi2_strstatus(ctx->dev, status);
3186 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
3188 qi_cache_free(edesc);
3190 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3191 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3194 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3195 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3196 crypto_ahash_digestsize(ahash), 1);
3198 req->base.complete(&req->base, ecode);
3201 static int ahash_update_ctx(struct ahash_request *req)
3203 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3204 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3205 struct caam_hash_state *state = ahash_request_ctx(req);
3206 struct caam_request *req_ctx = &state->caam_req;
3207 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3208 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3209 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3210 GFP_KERNEL : GFP_ATOMIC;
3211 u8 *buf = current_buf(state);
3212 int *buflen = current_buflen(state);
3213 u8 *next_buf = alt_buf(state);
3214 int *next_buflen = alt_buflen(state), last_buflen;
3215 int in_len = *buflen + req->nbytes, to_hash;
3216 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3217 struct ahash_edesc *edesc;
3220 last_buflen = *next_buflen;
3221 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3222 to_hash = in_len - *next_buflen;
3225 struct dpaa2_sg_entry *sg_table;
3227 src_nents = sg_nents_for_len(req->src,
3228 req->nbytes - (*next_buflen));
3229 if (src_nents < 0) {
3230 dev_err(ctx->dev, "Invalid number of src SG.\n");
3235 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3237 if (!mapped_nents) {
3238 dev_err(ctx->dev, "unable to DMA map source\n");
3245 /* allocate space for base edesc and link tables */
3246 edesc = qi_cache_zalloc(GFP_DMA | flags);
3248 dma_unmap_sg(ctx->dev, req->src, src_nents,
3253 edesc->src_nents = src_nents;
3254 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3255 qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3257 sg_table = &edesc->sgt[0];
3259 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3264 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3269 sg_to_qm_sg_last(req->src, mapped_nents,
3270 sg_table + qm_sg_src_index, 0);
3272 scatterwalk_map_and_copy(next_buf, req->src,
3276 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3280 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3281 qm_sg_bytes, DMA_TO_DEVICE);
3282 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3283 dev_err(ctx->dev, "unable to map S/G table\n");
3287 edesc->qm_sg_bytes = qm_sg_bytes;
3289 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3290 dpaa2_fl_set_final(in_fle, true);
3291 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3292 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3293 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3294 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3295 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3296 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3298 req_ctx->flc = &ctx->flc[UPDATE];
3299 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3300 req_ctx->cbk = ahash_done_bi;
3301 req_ctx->ctx = &req->base;
3302 req_ctx->edesc = edesc;
3304 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3305 if (ret != -EINPROGRESS &&
3307 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3309 } else if (*next_buflen) {
3310 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3312 *buflen = *next_buflen;
3313 *next_buflen = last_buflen;
3316 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3317 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3318 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3319 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3324 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3325 qi_cache_free(edesc);
3329 static int ahash_final_ctx(struct ahash_request *req)
3331 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3332 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3333 struct caam_hash_state *state = ahash_request_ctx(req);
3334 struct caam_request *req_ctx = &state->caam_req;
3335 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3336 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3337 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3338 GFP_KERNEL : GFP_ATOMIC;
3339 int buflen = *current_buflen(state);
3340 int qm_sg_bytes, qm_sg_src_index;
3341 int digestsize = crypto_ahash_digestsize(ahash);
3342 struct ahash_edesc *edesc;
3343 struct dpaa2_sg_entry *sg_table;
3346 /* allocate space for base edesc and link tables */
3347 edesc = qi_cache_zalloc(GFP_DMA | flags);
3351 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3352 qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3353 sg_table = &edesc->sgt[0];
3355 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3360 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3364 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3366 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3368 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3369 dev_err(ctx->dev, "unable to map S/G table\n");
3373 edesc->qm_sg_bytes = qm_sg_bytes;
3375 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3377 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3378 dev_err(ctx->dev, "unable to map dst\n");
3384 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3385 dpaa2_fl_set_final(in_fle, true);
3386 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3387 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3388 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3389 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3390 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3391 dpaa2_fl_set_len(out_fle, digestsize);
3393 req_ctx->flc = &ctx->flc[FINALIZE];
3394 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3395 req_ctx->cbk = ahash_done_ctx_src;
3396 req_ctx->ctx = &req->base;
3397 req_ctx->edesc = edesc;
3399 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3400 if (ret == -EINPROGRESS ||
3401 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3405 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3406 qi_cache_free(edesc);
3410 static int ahash_finup_ctx(struct ahash_request *req)
3412 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3413 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3414 struct caam_hash_state *state = ahash_request_ctx(req);
3415 struct caam_request *req_ctx = &state->caam_req;
3416 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3417 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3418 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3419 GFP_KERNEL : GFP_ATOMIC;
3420 int buflen = *current_buflen(state);
3421 int qm_sg_bytes, qm_sg_src_index;
3422 int src_nents, mapped_nents;
3423 int digestsize = crypto_ahash_digestsize(ahash);
3424 struct ahash_edesc *edesc;
3425 struct dpaa2_sg_entry *sg_table;
3428 src_nents = sg_nents_for_len(req->src, req->nbytes);
3429 if (src_nents < 0) {
3430 dev_err(ctx->dev, "Invalid number of src SG.\n");
3435 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3437 if (!mapped_nents) {
3438 dev_err(ctx->dev, "unable to DMA map source\n");
3445 /* allocate space for base edesc and link tables */
3446 edesc = qi_cache_zalloc(GFP_DMA | flags);
3448 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3452 edesc->src_nents = src_nents;
3453 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3454 qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3455 sg_table = &edesc->sgt[0];
3457 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3462 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3466 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3468 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3470 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3471 dev_err(ctx->dev, "unable to map S/G table\n");
3475 edesc->qm_sg_bytes = qm_sg_bytes;
3477 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3479 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3480 dev_err(ctx->dev, "unable to map dst\n");
3486 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3487 dpaa2_fl_set_final(in_fle, true);
3488 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3489 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3490 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3491 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3492 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3493 dpaa2_fl_set_len(out_fle, digestsize);
3495 req_ctx->flc = &ctx->flc[FINALIZE];
3496 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3497 req_ctx->cbk = ahash_done_ctx_src;
3498 req_ctx->ctx = &req->base;
3499 req_ctx->edesc = edesc;
3501 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3502 if (ret == -EINPROGRESS ||
3503 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3507 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3508 qi_cache_free(edesc);
3512 static int ahash_digest(struct ahash_request *req)
3514 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3515 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3516 struct caam_hash_state *state = ahash_request_ctx(req);
3517 struct caam_request *req_ctx = &state->caam_req;
3518 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3519 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3520 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3521 GFP_KERNEL : GFP_ATOMIC;
3522 int digestsize = crypto_ahash_digestsize(ahash);
3523 int src_nents, mapped_nents;
3524 struct ahash_edesc *edesc;
3529 src_nents = sg_nents_for_len(req->src, req->nbytes);
3530 if (src_nents < 0) {
3531 dev_err(ctx->dev, "Invalid number of src SG.\n");
3536 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3538 if (!mapped_nents) {
3539 dev_err(ctx->dev, "unable to map source for DMA\n");
3546 /* allocate space for base edesc and link tables */
3547 edesc = qi_cache_zalloc(GFP_DMA | flags);
3549 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3553 edesc->src_nents = src_nents;
3554 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3556 if (mapped_nents > 1) {
3558 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3560 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3561 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3562 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3563 qm_sg_bytes, DMA_TO_DEVICE);
3564 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3565 dev_err(ctx->dev, "unable to map S/G table\n");
3568 edesc->qm_sg_bytes = qm_sg_bytes;
3569 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3570 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3572 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3573 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3576 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3578 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3579 dev_err(ctx->dev, "unable to map dst\n");
3584 dpaa2_fl_set_final(in_fle, true);
3585 dpaa2_fl_set_len(in_fle, req->nbytes);
3586 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3587 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3588 dpaa2_fl_set_len(out_fle, digestsize);
3590 req_ctx->flc = &ctx->flc[DIGEST];
3591 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3592 req_ctx->cbk = ahash_done;
3593 req_ctx->ctx = &req->base;
3594 req_ctx->edesc = edesc;
3595 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3596 if (ret == -EINPROGRESS ||
3597 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3601 ahash_unmap(ctx->dev, edesc, req, digestsize);
3602 qi_cache_free(edesc);
3606 static int ahash_final_no_ctx(struct ahash_request *req)
3608 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3609 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3610 struct caam_hash_state *state = ahash_request_ctx(req);
3611 struct caam_request *req_ctx = &state->caam_req;
3612 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3613 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3614 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3615 GFP_KERNEL : GFP_ATOMIC;
3616 u8 *buf = current_buf(state);
3617 int buflen = *current_buflen(state);
3618 int digestsize = crypto_ahash_digestsize(ahash);
3619 struct ahash_edesc *edesc;
3622 /* allocate space for base edesc and link tables */
3623 edesc = qi_cache_zalloc(GFP_DMA | flags);
3627 state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
3628 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3629 dev_err(ctx->dev, "unable to map src\n");
3633 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3635 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3636 dev_err(ctx->dev, "unable to map dst\n");
3641 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3642 dpaa2_fl_set_final(in_fle, true);
3643 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3644 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3645 dpaa2_fl_set_len(in_fle, buflen);
3646 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3647 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3648 dpaa2_fl_set_len(out_fle, digestsize);
3650 req_ctx->flc = &ctx->flc[DIGEST];
3651 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3652 req_ctx->cbk = ahash_done;
3653 req_ctx->ctx = &req->base;
3654 req_ctx->edesc = edesc;
3656 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3657 if (ret == -EINPROGRESS ||
3658 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3662 ahash_unmap(ctx->dev, edesc, req, digestsize);
3663 qi_cache_free(edesc);
3667 static int ahash_update_no_ctx(struct ahash_request *req)
3669 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3671 struct caam_hash_state *state = ahash_request_ctx(req);
3672 struct caam_request *req_ctx = &state->caam_req;
3673 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3674 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3675 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3676 GFP_KERNEL : GFP_ATOMIC;
3677 u8 *buf = current_buf(state);
3678 int *buflen = current_buflen(state);
3679 u8 *next_buf = alt_buf(state);
3680 int *next_buflen = alt_buflen(state);
3681 int in_len = *buflen + req->nbytes, to_hash;
3682 int qm_sg_bytes, src_nents, mapped_nents;
3683 struct ahash_edesc *edesc;
3686 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3687 to_hash = in_len - *next_buflen;
3690 struct dpaa2_sg_entry *sg_table;
3692 src_nents = sg_nents_for_len(req->src,
3693 req->nbytes - *next_buflen);
3694 if (src_nents < 0) {
3695 dev_err(ctx->dev, "Invalid number of src SG.\n");
3700 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3702 if (!mapped_nents) {
3703 dev_err(ctx->dev, "unable to DMA map source\n");
3710 /* allocate space for base edesc and link tables */
3711 edesc = qi_cache_zalloc(GFP_DMA | flags);
3713 dma_unmap_sg(ctx->dev, req->src, src_nents,
3718 edesc->src_nents = src_nents;
3719 qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3720 sg_table = &edesc->sgt[0];
3722 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3726 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3729 scatterwalk_map_and_copy(next_buf, req->src,
3733 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3734 qm_sg_bytes, DMA_TO_DEVICE);
3735 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3736 dev_err(ctx->dev, "unable to map S/G table\n");
3740 edesc->qm_sg_bytes = qm_sg_bytes;
3742 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3743 ctx->ctx_len, DMA_FROM_DEVICE);
3744 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3745 dev_err(ctx->dev, "unable to map ctx\n");
3751 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3752 dpaa2_fl_set_final(in_fle, true);
3753 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3754 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3755 dpaa2_fl_set_len(in_fle, to_hash);
3756 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3757 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3758 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3760 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3761 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3762 req_ctx->cbk = ahash_done_ctx_dst;
3763 req_ctx->ctx = &req->base;
3764 req_ctx->edesc = edesc;
3766 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3767 if (ret != -EINPROGRESS &&
3769 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3772 state->update = ahash_update_ctx;
3773 state->finup = ahash_finup_ctx;
3774 state->final = ahash_final_ctx;
3775 } else if (*next_buflen) {
3776 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3778 *buflen = *next_buflen;
3782 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3783 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3784 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3785 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3790 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
3791 qi_cache_free(edesc);
3795 static int ahash_finup_no_ctx(struct ahash_request *req)
3797 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3798 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3799 struct caam_hash_state *state = ahash_request_ctx(req);
3800 struct caam_request *req_ctx = &state->caam_req;
3801 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3802 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3803 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3804 GFP_KERNEL : GFP_ATOMIC;
3805 int buflen = *current_buflen(state);
3806 int qm_sg_bytes, src_nents, mapped_nents;
3807 int digestsize = crypto_ahash_digestsize(ahash);
3808 struct ahash_edesc *edesc;
3809 struct dpaa2_sg_entry *sg_table;
3812 src_nents = sg_nents_for_len(req->src, req->nbytes);
3813 if (src_nents < 0) {
3814 dev_err(ctx->dev, "Invalid number of src SG.\n");
3819 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3821 if (!mapped_nents) {
3822 dev_err(ctx->dev, "unable to DMA map source\n");
3829 /* allocate space for base edesc and link tables */
3830 edesc = qi_cache_zalloc(GFP_DMA | flags);
3832 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3836 edesc->src_nents = src_nents;
3837 qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3838 sg_table = &edesc->sgt[0];
3840 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3844 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3846 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3848 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3849 dev_err(ctx->dev, "unable to map S/G table\n");
3853 edesc->qm_sg_bytes = qm_sg_bytes;
3855 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3857 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3858 dev_err(ctx->dev, "unable to map dst\n");
3864 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3865 dpaa2_fl_set_final(in_fle, true);
3866 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3867 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3868 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
3869 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3870 dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3871 dpaa2_fl_set_len(out_fle, digestsize);
3873 req_ctx->flc = &ctx->flc[DIGEST];
3874 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3875 req_ctx->cbk = ahash_done;
3876 req_ctx->ctx = &req->base;
3877 req_ctx->edesc = edesc;
3878 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3879 if (ret != -EINPROGRESS &&
3880 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3885 ahash_unmap(ctx->dev, edesc, req, digestsize);
3886 qi_cache_free(edesc);
3890 static int ahash_update_first(struct ahash_request *req)
3892 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3893 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3894 struct caam_hash_state *state = ahash_request_ctx(req);
3895 struct caam_request *req_ctx = &state->caam_req;
3896 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3897 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3898 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3899 GFP_KERNEL : GFP_ATOMIC;
3900 u8 *next_buf = alt_buf(state);
3901 int *next_buflen = alt_buflen(state);
3903 int src_nents, mapped_nents;
3904 struct ahash_edesc *edesc;
3907 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
3909 to_hash = req->nbytes - *next_buflen;
3912 struct dpaa2_sg_entry *sg_table;
3914 src_nents = sg_nents_for_len(req->src,
3915 req->nbytes - (*next_buflen));
3916 if (src_nents < 0) {
3917 dev_err(ctx->dev, "Invalid number of src SG.\n");
3922 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3924 if (!mapped_nents) {
3925 dev_err(ctx->dev, "unable to map source for DMA\n");
3932 /* allocate space for base edesc and link tables */
3933 edesc = qi_cache_zalloc(GFP_DMA | flags);
3935 dma_unmap_sg(ctx->dev, req->src, src_nents,
3940 edesc->src_nents = src_nents;
3941 sg_table = &edesc->sgt[0];
3943 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3944 dpaa2_fl_set_final(in_fle, true);
3945 dpaa2_fl_set_len(in_fle, to_hash);
3947 if (mapped_nents > 1) {
3950 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3951 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3952 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3955 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3956 dev_err(ctx->dev, "unable to map S/G table\n");
3960 edesc->qm_sg_bytes = qm_sg_bytes;
3961 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3962 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3964 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3965 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3969 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
3972 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3973 ctx->ctx_len, DMA_FROM_DEVICE);
3974 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3975 dev_err(ctx->dev, "unable to map ctx\n");
3981 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3982 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3983 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3985 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3986 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3987 req_ctx->cbk = ahash_done_ctx_dst;
3988 req_ctx->ctx = &req->base;
3989 req_ctx->edesc = edesc;
3991 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3992 if (ret != -EINPROGRESS &&
3993 !(ret == -EBUSY && req->base.flags &
3994 CRYPTO_TFM_REQ_MAY_BACKLOG))
3997 state->update = ahash_update_ctx;
3998 state->finup = ahash_finup_ctx;
3999 state->final = ahash_final_ctx;
4000 } else if (*next_buflen) {
4001 state->update = ahash_update_no_ctx;
4002 state->finup = ahash_finup_no_ctx;
4003 state->final = ahash_final_no_ctx;
4004 scatterwalk_map_and_copy(next_buf, req->src, 0,
4009 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4010 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4015 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
4016 qi_cache_free(edesc);
4020 static int ahash_finup_first(struct ahash_request *req)
4022 return ahash_digest(req);
4025 static int ahash_init(struct ahash_request *req)
4027 struct caam_hash_state *state = ahash_request_ctx(req);
4029 state->update = ahash_update_first;
4030 state->finup = ahash_finup_first;
4031 state->final = ahash_final_no_ctx;
4034 state->current_buf = 0;
4036 state->buflen_0 = 0;
4037 state->buflen_1 = 0;
4042 static int ahash_update(struct ahash_request *req)
4044 struct caam_hash_state *state = ahash_request_ctx(req);
4046 return state->update(req);
4049 static int ahash_finup(struct ahash_request *req)
4051 struct caam_hash_state *state = ahash_request_ctx(req);
4053 return state->finup(req);
4056 static int ahash_final(struct ahash_request *req)
4058 struct caam_hash_state *state = ahash_request_ctx(req);
4060 return state->final(req);
4063 static int ahash_export(struct ahash_request *req, void *out)
4065 struct caam_hash_state *state = ahash_request_ctx(req);
4066 struct caam_export_state *export = out;
4070 if (state->current_buf) {
4072 len = state->buflen_1;
4075 len = state->buflen_0;
4078 memcpy(export->buf, buf, len);
4079 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4080 export->buflen = len;
4081 export->update = state->update;
4082 export->final = state->final;
4083 export->finup = state->finup;
4088 static int ahash_import(struct ahash_request *req, const void *in)
4090 struct caam_hash_state *state = ahash_request_ctx(req);
4091 const struct caam_export_state *export = in;
4093 memset(state, 0, sizeof(*state));
4094 memcpy(state->buf_0, export->buf, export->buflen);
4095 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4096 state->buflen_0 = export->buflen;
4097 state->update = export->update;
4098 state->final = export->final;
4099 state->finup = export->finup;
4104 struct caam_hash_template {
4105 char name[CRYPTO_MAX_ALG_NAME];
4106 char driver_name[CRYPTO_MAX_ALG_NAME];
4107 char hmac_name[CRYPTO_MAX_ALG_NAME];
4108 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4109 unsigned int blocksize;
4110 struct ahash_alg template_ahash;
4114 /* ahash descriptors */
4115 static struct caam_hash_template driver_hash[] = {
4118 .driver_name = "sha1-caam-qi2",
4119 .hmac_name = "hmac(sha1)",
4120 .hmac_driver_name = "hmac-sha1-caam-qi2",
4121 .blocksize = SHA1_BLOCK_SIZE,
4124 .update = ahash_update,
4125 .final = ahash_final,
4126 .finup = ahash_finup,
4127 .digest = ahash_digest,
4128 .export = ahash_export,
4129 .import = ahash_import,
4130 .setkey = ahash_setkey,
4132 .digestsize = SHA1_DIGEST_SIZE,
4133 .statesize = sizeof(struct caam_export_state),
4136 .alg_type = OP_ALG_ALGSEL_SHA1,
4139 .driver_name = "sha224-caam-qi2",
4140 .hmac_name = "hmac(sha224)",
4141 .hmac_driver_name = "hmac-sha224-caam-qi2",
4142 .blocksize = SHA224_BLOCK_SIZE,
4145 .update = ahash_update,
4146 .final = ahash_final,
4147 .finup = ahash_finup,
4148 .digest = ahash_digest,
4149 .export = ahash_export,
4150 .import = ahash_import,
4151 .setkey = ahash_setkey,
4153 .digestsize = SHA224_DIGEST_SIZE,
4154 .statesize = sizeof(struct caam_export_state),
4157 .alg_type = OP_ALG_ALGSEL_SHA224,
4160 .driver_name = "sha256-caam-qi2",
4161 .hmac_name = "hmac(sha256)",
4162 .hmac_driver_name = "hmac-sha256-caam-qi2",
4163 .blocksize = SHA256_BLOCK_SIZE,
4166 .update = ahash_update,
4167 .final = ahash_final,
4168 .finup = ahash_finup,
4169 .digest = ahash_digest,
4170 .export = ahash_export,
4171 .import = ahash_import,
4172 .setkey = ahash_setkey,
4174 .digestsize = SHA256_DIGEST_SIZE,
4175 .statesize = sizeof(struct caam_export_state),
4178 .alg_type = OP_ALG_ALGSEL_SHA256,
4181 .driver_name = "sha384-caam-qi2",
4182 .hmac_name = "hmac(sha384)",
4183 .hmac_driver_name = "hmac-sha384-caam-qi2",
4184 .blocksize = SHA384_BLOCK_SIZE,
4187 .update = ahash_update,
4188 .final = ahash_final,
4189 .finup = ahash_finup,
4190 .digest = ahash_digest,
4191 .export = ahash_export,
4192 .import = ahash_import,
4193 .setkey = ahash_setkey,
4195 .digestsize = SHA384_DIGEST_SIZE,
4196 .statesize = sizeof(struct caam_export_state),
4199 .alg_type = OP_ALG_ALGSEL_SHA384,
4202 .driver_name = "sha512-caam-qi2",
4203 .hmac_name = "hmac(sha512)",
4204 .hmac_driver_name = "hmac-sha512-caam-qi2",
4205 .blocksize = SHA512_BLOCK_SIZE,
4208 .update = ahash_update,
4209 .final = ahash_final,
4210 .finup = ahash_finup,
4211 .digest = ahash_digest,
4212 .export = ahash_export,
4213 .import = ahash_import,
4214 .setkey = ahash_setkey,
4216 .digestsize = SHA512_DIGEST_SIZE,
4217 .statesize = sizeof(struct caam_export_state),
4220 .alg_type = OP_ALG_ALGSEL_SHA512,
4223 .driver_name = "md5-caam-qi2",
4224 .hmac_name = "hmac(md5)",
4225 .hmac_driver_name = "hmac-md5-caam-qi2",
4226 .blocksize = MD5_BLOCK_WORDS * 4,
4229 .update = ahash_update,
4230 .final = ahash_final,
4231 .finup = ahash_finup,
4232 .digest = ahash_digest,
4233 .export = ahash_export,
4234 .import = ahash_import,
4235 .setkey = ahash_setkey,
4237 .digestsize = MD5_DIGEST_SIZE,
4238 .statesize = sizeof(struct caam_export_state),
4241 .alg_type = OP_ALG_ALGSEL_MD5,
4245 struct caam_hash_alg {
4246 struct list_head entry;
4249 struct ahash_alg ahash_alg;
4252 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4254 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4255 struct crypto_alg *base = tfm->__crt_alg;
4256 struct hash_alg_common *halg =
4257 container_of(base, struct hash_alg_common, base);
4258 struct ahash_alg *alg =
4259 container_of(halg, struct ahash_alg, halg);
4260 struct caam_hash_alg *caam_hash =
4261 container_of(alg, struct caam_hash_alg, ahash_alg);
4262 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4263 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4264 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4265 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4267 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4269 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4270 dma_addr_t dma_addr;
4273 ctx->dev = caam_hash->dev;
4275 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4277 DMA_ATTR_SKIP_CPU_SYNC);
4278 if (dma_mapping_error(ctx->dev, dma_addr)) {
4279 dev_err(ctx->dev, "unable to map shared descriptors\n");
4283 for (i = 0; i < HASH_NUM_OP; i++)
4284 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4286 /* copy descriptor header template value */
4287 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4289 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4290 OP_ALG_ALGSEL_SUBMASK) >>
4291 OP_ALG_ALGSEL_SHIFT];
4293 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4294 sizeof(struct caam_hash_state));
4296 return ahash_set_sh_desc(ahash);
4299 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4301 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4303 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4304 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4307 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4308 struct caam_hash_template *template, bool keyed)
4310 struct caam_hash_alg *t_alg;
4311 struct ahash_alg *halg;
4312 struct crypto_alg *alg;
4314 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4316 return ERR_PTR(-ENOMEM);
4318 t_alg->ahash_alg = template->template_ahash;
4319 halg = &t_alg->ahash_alg;
4320 alg = &halg->halg.base;
4323 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4324 template->hmac_name);
4325 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4326 template->hmac_driver_name);
4328 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4330 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4331 template->driver_name);
4332 t_alg->ahash_alg.setkey = NULL;
4334 alg->cra_module = THIS_MODULE;
4335 alg->cra_init = caam_hash_cra_init;
4336 alg->cra_exit = caam_hash_cra_exit;
4337 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4338 alg->cra_priority = CAAM_CRA_PRIORITY;
4339 alg->cra_blocksize = template->blocksize;
4340 alg->cra_alignmask = 0;
4341 alg->cra_flags = CRYPTO_ALG_ASYNC;
4343 t_alg->alg_type = template->alg_type;
4349 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4351 struct dpaa2_caam_priv_per_cpu *ppriv;
4353 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4354 napi_schedule_irqoff(&ppriv->napi);
4357 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4359 struct device *dev = priv->dev;
4360 struct dpaa2_io_notification_ctx *nctx;
4361 struct dpaa2_caam_priv_per_cpu *ppriv;
4362 int err, i = 0, cpu;
4364 for_each_online_cpu(cpu) {
4365 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4367 nctx = &ppriv->nctx;
4369 nctx->id = ppriv->rsp_fqid;
4370 nctx->desired_cpu = cpu;
4371 nctx->cb = dpaa2_caam_fqdan_cb;
4373 /* Register notification callbacks */
4374 err = dpaa2_io_service_register(NULL, nctx);
4375 if (unlikely(err)) {
4376 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4379 * If no affine DPIO for this core, there's probably
4380 * none available for next cores either. Signal we want
4381 * to retry later, in case the DPIO devices weren't
4384 err = -EPROBE_DEFER;
4388 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4390 if (unlikely(!ppriv->store)) {
4391 dev_err(dev, "dpaa2_io_store_create() failed\n");
4396 if (++i == priv->num_pairs)
4403 for_each_online_cpu(cpu) {
4404 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4405 if (!ppriv->nctx.cb)
4407 dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4410 for_each_online_cpu(cpu) {
4411 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4414 dpaa2_io_store_destroy(ppriv->store);
4420 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4422 struct dpaa2_caam_priv_per_cpu *ppriv;
4425 for_each_online_cpu(cpu) {
4426 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4427 dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4428 dpaa2_io_store_destroy(ppriv->store);
4430 if (++i == priv->num_pairs)
4435 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4437 struct dpseci_rx_queue_cfg rx_queue_cfg;
4438 struct device *dev = priv->dev;
4439 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4440 struct dpaa2_caam_priv_per_cpu *ppriv;
4441 int err = 0, i = 0, cpu;
4443 /* Configure Rx queues */
4444 for_each_online_cpu(cpu) {
4445 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4447 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4448 DPSECI_QUEUE_OPT_USER_CTX;
4449 rx_queue_cfg.order_preservation_en = 0;
4450 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4451 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4453 * Rx priority (WQ) doesn't really matter, since we use
4454 * pull mode, i.e. volatile dequeues from specific FQs
4456 rx_queue_cfg.dest_cfg.priority = 0;
4457 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4459 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4462 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4467 if (++i == priv->num_pairs)
4474 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4476 struct device *dev = priv->dev;
4478 if (!priv->cscn_mem)
4481 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4482 kfree(priv->cscn_mem);
4485 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4487 struct device *dev = priv->dev;
4488 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4490 dpaa2_dpseci_congestion_free(priv);
4491 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4494 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4495 const struct dpaa2_fd *fd)
4497 struct caam_request *req;
4500 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4501 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4505 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4506 if (unlikely(fd_err))
4507 dev_err(priv->dev, "FD error: %08x\n", fd_err);
4510 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4511 * in FD[ERR] or FD[FRC].
4513 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4514 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4516 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4519 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4523 /* Retry while portal is busy */
4525 err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
4527 } while (err == -EBUSY);
4530 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4535 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4537 struct dpaa2_dq *dq;
4538 int cleaned = 0, is_last;
4541 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4542 if (unlikely(!dq)) {
4543 if (unlikely(!is_last)) {
4544 dev_dbg(ppriv->priv->dev,
4545 "FQ %d returned no valid frames\n",
4548 * MUST retry until we get some sort of
4549 * valid response token (be it "empty dequeue"
4550 * or a valid frame).
4558 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4565 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4567 struct dpaa2_caam_priv_per_cpu *ppriv;
4568 struct dpaa2_caam_priv *priv;
4569 int err, cleaned = 0, store_cleaned;
4571 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4574 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4578 store_cleaned = dpaa2_caam_store_consume(ppriv);
4579 cleaned += store_cleaned;
4581 if (store_cleaned == 0 ||
4582 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4585 /* Try to dequeue some more */
4586 err = dpaa2_caam_pull_fq(ppriv);
4591 if (cleaned < budget) {
4592 napi_complete_done(napi, cleaned);
4593 err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
4595 dev_err(priv->dev, "Notification rearm failed: %d\n",
4602 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4605 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4606 struct device *dev = priv->dev;
4610 * Congestion group feature supported starting with DPSECI API v5.1
4611 * and only when object has been created with this capability.
4613 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4614 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4617 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4618 GFP_KERNEL | GFP_DMA);
4619 if (!priv->cscn_mem)
4622 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4623 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4624 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4625 if (dma_mapping_error(dev, priv->cscn_dma)) {
4626 dev_err(dev, "Error mapping CSCN memory area\n");
4631 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4632 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4633 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4634 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4635 cong_notif_cfg.message_iova = priv->cscn_dma;
4636 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4637 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4638 DPSECI_CGN_MODE_COHERENT_WRITE;
4640 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4643 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4650 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4652 kfree(priv->cscn_mem);
4657 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4659 struct device *dev = &ls_dev->dev;
4660 struct dpaa2_caam_priv *priv;
4661 struct dpaa2_caam_priv_per_cpu *ppriv;
4665 priv = dev_get_drvdata(dev);
4668 priv->dpsec_id = ls_dev->obj_desc.id;
4670 /* Get a handle for the DPSECI this interface is associate with */
4671 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4673 dev_err(dev, "dpseci_open() failed: %d\n", err);
4677 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4680 dev_err(dev, "dpseci_get_api_version() failed\n");
4684 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4686 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4687 &priv->dpseci_attr);
4689 dev_err(dev, "dpseci_get_attributes() failed\n");
4693 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4696 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4700 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4702 dev_err(dev, "setup_congestion() failed\n");
4706 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4707 priv->dpseci_attr.num_tx_queues);
4708 if (priv->num_pairs > num_online_cpus()) {
4709 dev_warn(dev, "%d queues won't be used\n",
4710 priv->num_pairs - num_online_cpus());
4711 priv->num_pairs = num_online_cpus();
4714 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4715 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4716 &priv->rx_queue_attr[i]);
4718 dev_err(dev, "dpseci_get_rx_queue() failed\n");
4719 goto err_get_rx_queue;
4723 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4724 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4725 &priv->tx_queue_attr[i]);
4727 dev_err(dev, "dpseci_get_tx_queue() failed\n");
4728 goto err_get_rx_queue;
4733 for_each_online_cpu(cpu) {
4734 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
4735 priv->rx_queue_attr[i].fqid,
4736 priv->tx_queue_attr[i].fqid);
4738 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4739 ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
4740 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
4743 ppriv->net_dev.dev = *dev;
4744 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4745 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4746 DPAA2_CAAM_NAPI_WEIGHT);
4747 if (++i == priv->num_pairs)
4754 dpaa2_dpseci_congestion_free(priv);
4756 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4761 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4763 struct device *dev = priv->dev;
4764 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4765 struct dpaa2_caam_priv_per_cpu *ppriv;
4768 for (i = 0; i < priv->num_pairs; i++) {
4769 ppriv = per_cpu_ptr(priv->ppriv, i);
4770 napi_enable(&ppriv->napi);
4773 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4776 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4778 struct device *dev = priv->dev;
4779 struct dpaa2_caam_priv_per_cpu *ppriv;
4780 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4781 int i, err = 0, enabled;
4783 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4785 dev_err(dev, "dpseci_disable() failed\n");
4789 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4791 dev_err(dev, "dpseci_is_enabled() failed\n");
4795 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4797 for (i = 0; i < priv->num_pairs; i++) {
4798 ppriv = per_cpu_ptr(priv->ppriv, i);
4799 napi_disable(&ppriv->napi);
4800 netif_napi_del(&ppriv->napi);
4806 static struct list_head hash_list;
4808 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4811 struct dpaa2_caam_priv *priv;
4813 bool registered = false;
4816 * There is no way to get CAAM endianness - there is no direct register
4817 * space access and MC f/w does not provide this attribute.
4818 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4821 caam_little_end = true;
4825 dev = &dpseci_dev->dev;
4827 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4831 dev_set_drvdata(dev, priv);
4833 priv->domain = iommu_get_domain_for_dev(dev);
4835 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4836 0, SLAB_CACHE_DMA, NULL);
4838 dev_err(dev, "Can't allocate SEC cache\n");
4842 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
4844 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
4848 /* Obtain a MC portal */
4849 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
4852 err = -EPROBE_DEFER;
4854 dev_err(dev, "MC portal allocation failed\n");
4859 priv->ppriv = alloc_percpu(*priv->ppriv);
4861 dev_err(dev, "alloc_percpu() failed\n");
4863 goto err_alloc_ppriv;
4866 /* DPSECI initialization */
4867 err = dpaa2_dpseci_setup(dpseci_dev);
4869 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
4870 goto err_dpseci_setup;
4874 err = dpaa2_dpseci_dpio_setup(priv);
4876 if (err != -EPROBE_DEFER)
4877 dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
4878 goto err_dpio_setup;
4881 /* DPSECI binding to DPIO */
4882 err = dpaa2_dpseci_bind(priv);
4884 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
4889 err = dpaa2_dpseci_enable(priv);
4891 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
4895 /* register crypto algorithms the device supports */
4896 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4897 struct caam_skcipher_alg *t_alg = driver_algs + i;
4898 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
4900 /* Skip DES algorithms if not supported by device */
4901 if (!priv->sec_attr.des_acc_num &&
4902 (alg_sel == OP_ALG_ALGSEL_3DES ||
4903 alg_sel == OP_ALG_ALGSEL_DES))
4906 /* Skip AES algorithms if not supported by device */
4907 if (!priv->sec_attr.aes_acc_num &&
4908 alg_sel == OP_ALG_ALGSEL_AES)
4911 t_alg->caam.dev = dev;
4912 caam_skcipher_alg_init(t_alg);
4914 err = crypto_register_skcipher(&t_alg->skcipher);
4916 dev_warn(dev, "%s alg registration failed: %d\n",
4917 t_alg->skcipher.base.cra_driver_name, err);
4921 t_alg->registered = true;
4925 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4926 struct caam_aead_alg *t_alg = driver_aeads + i;
4927 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4929 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4932 /* Skip DES algorithms if not supported by device */
4933 if (!priv->sec_attr.des_acc_num &&
4934 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
4935 c1_alg_sel == OP_ALG_ALGSEL_DES))
4938 /* Skip AES algorithms if not supported by device */
4939 if (!priv->sec_attr.aes_acc_num &&
4940 c1_alg_sel == OP_ALG_ALGSEL_AES)
4944 * Skip algorithms requiring message digests
4945 * if MD not supported by device.
4947 if (!priv->sec_attr.md_acc_num && c2_alg_sel)
4950 t_alg->caam.dev = dev;
4951 caam_aead_alg_init(t_alg);
4953 err = crypto_register_aead(&t_alg->aead);
4955 dev_warn(dev, "%s alg registration failed: %d\n",
4956 t_alg->aead.base.cra_driver_name, err);
4960 t_alg->registered = true;
4964 dev_info(dev, "algorithms registered in /proc/crypto\n");
4966 /* register hash algorithms the device supports */
4967 INIT_LIST_HEAD(&hash_list);
4970 * Skip registration of any hashing algorithms if MD block
4973 if (!priv->sec_attr.md_acc_num)
4976 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
4977 struct caam_hash_alg *t_alg;
4978 struct caam_hash_template *alg = driver_hash + i;
4980 /* register hmac version */
4981 t_alg = caam_hash_alloc(dev, alg, true);
4982 if (IS_ERR(t_alg)) {
4983 err = PTR_ERR(t_alg);
4984 dev_warn(dev, "%s hash alg allocation failed: %d\n",
4985 alg->driver_name, err);
4989 err = crypto_register_ahash(&t_alg->ahash_alg);
4991 dev_warn(dev, "%s alg registration failed: %d\n",
4992 t_alg->ahash_alg.halg.base.cra_driver_name,
4996 list_add_tail(&t_alg->entry, &hash_list);
4999 /* register unkeyed version */
5000 t_alg = caam_hash_alloc(dev, alg, false);
5001 if (IS_ERR(t_alg)) {
5002 err = PTR_ERR(t_alg);
5003 dev_warn(dev, "%s alg allocation failed: %d\n",
5004 alg->driver_name, err);
5008 err = crypto_register_ahash(&t_alg->ahash_alg);
5010 dev_warn(dev, "%s alg registration failed: %d\n",
5011 t_alg->ahash_alg.halg.base.cra_driver_name,
5015 list_add_tail(&t_alg->entry, &hash_list);
5018 if (!list_empty(&hash_list))
5019 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5024 dpaa2_dpseci_dpio_free(priv);
5026 dpaa2_dpseci_free(priv);
5028 free_percpu(priv->ppriv);
5030 fsl_mc_portal_free(priv->mc_io);
5032 kmem_cache_destroy(qi_cache);
5037 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5040 struct dpaa2_caam_priv *priv;
5044 priv = dev_get_drvdata(dev);
5046 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5047 struct caam_aead_alg *t_alg = driver_aeads + i;
5049 if (t_alg->registered)
5050 crypto_unregister_aead(&t_alg->aead);
5053 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5054 struct caam_skcipher_alg *t_alg = driver_algs + i;
5056 if (t_alg->registered)
5057 crypto_unregister_skcipher(&t_alg->skcipher);
5060 if (hash_list.next) {
5061 struct caam_hash_alg *t_hash_alg, *p;
5063 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5064 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5065 list_del(&t_hash_alg->entry);
5070 dpaa2_dpseci_disable(priv);
5071 dpaa2_dpseci_dpio_free(priv);
5072 dpaa2_dpseci_free(priv);
5073 free_percpu(priv->ppriv);
5074 fsl_mc_portal_free(priv->mc_io);
5075 kmem_cache_destroy(qi_cache);
5080 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5083 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5087 return PTR_ERR(req);
5089 if (priv->cscn_mem) {
5090 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5093 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5094 dev_dbg_ratelimited(dev, "Dropping request\n");
5099 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5101 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5103 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5104 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5108 memset(&fd, 0, sizeof(fd));
5109 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5110 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5111 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5112 dpaa2_fd_set_flc(&fd, req->flc_dma);
5115 * There is no guarantee that preemption is disabled here,
5119 id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
5120 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5121 err = dpaa2_io_service_enqueue_fq(NULL,
5122 priv->tx_queue_attr[id].fqid,
5129 if (unlikely(err)) {
5130 dev_err(dev, "Error enqueuing frame: %d\n", err);
5134 return -EINPROGRESS;
5137 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5141 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5143 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5145 .vendor = FSL_MC_VENDOR_FREESCALE,
5146 .obj_type = "dpseci",
5151 static struct fsl_mc_driver dpaa2_caam_driver = {
5153 .name = KBUILD_MODNAME,
5154 .owner = THIS_MODULE,
5156 .probe = dpaa2_caam_probe,
5157 .remove = dpaa2_caam_remove,
5158 .match_id_table = dpaa2_caam_match_id_table
5161 MODULE_LICENSE("Dual BSD/GPL");
5162 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5163 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5165 module_fsl_mc_driver(dpaa2_caam_driver);