OSDN Git Service

crypto: caam/qi2 - avoid double export
[uclinux-h8/linux.git] / drivers / crypto / caam / caamalg_qi2.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2015-2016 Freescale Semiconductor Inc.
4  * Copyright 2017-2018 NXP
5  */
6
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include <linux/fsl/mc.h>
19 #include <soc/fsl/dpaa2-io.h>
20 #include <soc/fsl/dpaa2-fd.h>
21
22 #define CAAM_CRA_PRIORITY       2000
23
24 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25 #define CAAM_MAX_KEY_SIZE       (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26                                  SHA512_DIGEST_SIZE * 2)
27
28 #if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
29 bool caam_little_end;
30 EXPORT_SYMBOL(caam_little_end);
31 bool caam_imx;
32 EXPORT_SYMBOL(caam_imx);
33 #endif
34
35 /*
36  * This is a a cache of buffers, from which the users of CAAM QI driver
37  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
38  * NOTE: A more elegant solution would be to have some headroom in the frames
39  *       being processed. This can be added by the dpaa2-eth driver. This would
40  *       pose a problem for userspace application processing which cannot
41  *       know of this limitation. So for now, this will work.
42  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
43  */
44 static struct kmem_cache *qi_cache;
45
46 struct caam_alg_entry {
47         struct device *dev;
48         int class1_alg_type;
49         int class2_alg_type;
50         bool rfc3686;
51         bool geniv;
52 };
53
54 struct caam_aead_alg {
55         struct aead_alg aead;
56         struct caam_alg_entry caam;
57         bool registered;
58 };
59
60 struct caam_skcipher_alg {
61         struct skcipher_alg skcipher;
62         struct caam_alg_entry caam;
63         bool registered;
64 };
65
66 /**
67  * caam_ctx - per-session context
68  * @flc: Flow Contexts array
69  * @key:  [authentication key], encryption key
70  * @flc_dma: I/O virtual addresses of the Flow Contexts
71  * @key_dma: I/O virtual address of the key
72  * @dir: DMA direction for mapping key and Flow Contexts
73  * @dev: dpseci device
74  * @adata: authentication algorithm details
75  * @cdata: encryption algorithm details
76  * @authsize: authentication tag (a.k.a. ICV / MAC) size
77  */
78 struct caam_ctx {
79         struct caam_flc flc[NUM_OP];
80         u8 key[CAAM_MAX_KEY_SIZE];
81         dma_addr_t flc_dma[NUM_OP];
82         dma_addr_t key_dma;
83         enum dma_data_direction dir;
84         struct device *dev;
85         struct alginfo adata;
86         struct alginfo cdata;
87         unsigned int authsize;
88 };
89
90 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
91                                      dma_addr_t iova_addr)
92 {
93         phys_addr_t phys_addr;
94
95         phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
96                                    iova_addr;
97
98         return phys_to_virt(phys_addr);
99 }
100
101 /*
102  * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
103  *
104  * Allocate data on the hotpath. Instead of using kzalloc, one can use the
105  * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
106  * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
107  * hosting 16 SG entries.
108  *
109  * @flags - flags that would be used for the equivalent kmalloc(..) call
110  *
111  * Returns a pointer to a retrieved buffer on success or NULL on failure.
112  */
113 static inline void *qi_cache_zalloc(gfp_t flags)
114 {
115         return kmem_cache_zalloc(qi_cache, flags);
116 }
117
118 /*
119  * qi_cache_free - Frees buffers allocated from CAAM-QI cache
120  *
121  * @obj - buffer previously allocated by qi_cache_zalloc
122  *
123  * No checking is being done, the call is a passthrough call to
124  * kmem_cache_free(...)
125  */
126 static inline void qi_cache_free(void *obj)
127 {
128         kmem_cache_free(qi_cache, obj);
129 }
130
131 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
132 {
133         switch (crypto_tfm_alg_type(areq->tfm)) {
134         case CRYPTO_ALG_TYPE_SKCIPHER:
135                 return skcipher_request_ctx(skcipher_request_cast(areq));
136         case CRYPTO_ALG_TYPE_AEAD:
137                 return aead_request_ctx(container_of(areq, struct aead_request,
138                                                      base));
139         case CRYPTO_ALG_TYPE_AHASH:
140                 return ahash_request_ctx(ahash_request_cast(areq));
141         default:
142                 return ERR_PTR(-EINVAL);
143         }
144 }
145
146 static void caam_unmap(struct device *dev, struct scatterlist *src,
147                        struct scatterlist *dst, int src_nents,
148                        int dst_nents, dma_addr_t iv_dma, int ivsize,
149                        dma_addr_t qm_sg_dma, int qm_sg_bytes)
150 {
151         if (dst != src) {
152                 if (src_nents)
153                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154                 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
155         } else {
156                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
157         }
158
159         if (iv_dma)
160                 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
161
162         if (qm_sg_bytes)
163                 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
164 }
165
166 static int aead_set_sh_desc(struct crypto_aead *aead)
167 {
168         struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
169                                                  typeof(*alg), aead);
170         struct caam_ctx *ctx = crypto_aead_ctx(aead);
171         unsigned int ivsize = crypto_aead_ivsize(aead);
172         struct device *dev = ctx->dev;
173         struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
174         struct caam_flc *flc;
175         u32 *desc;
176         u32 ctx1_iv_off = 0;
177         u32 *nonce = NULL;
178         unsigned int data_len[2];
179         u32 inl_mask;
180         const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
181                                OP_ALG_AAI_CTR_MOD128);
182         const bool is_rfc3686 = alg->caam.rfc3686;
183
184         if (!ctx->cdata.keylen || !ctx->authsize)
185                 return 0;
186
187         /*
188          * AES-CTR needs to load IV in CONTEXT1 reg
189          * at an offset of 128bits (16bytes)
190          * CONTEXT1[255:128] = IV
191          */
192         if (ctr_mode)
193                 ctx1_iv_off = 16;
194
195         /*
196          * RFC3686 specific:
197          *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
198          */
199         if (is_rfc3686) {
200                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
201                 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
202                                 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
203         }
204
205         data_len[0] = ctx->adata.keylen_pad;
206         data_len[1] = ctx->cdata.keylen;
207
208         /* aead_encrypt shared descriptor */
209         if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
210                                                  DESC_QI_AEAD_ENC_LEN) +
211                               (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
212                               DESC_JOB_IO_LEN, data_len, &inl_mask,
213                               ARRAY_SIZE(data_len)) < 0)
214                 return -EINVAL;
215
216         if (inl_mask & 1)
217                 ctx->adata.key_virt = ctx->key;
218         else
219                 ctx->adata.key_dma = ctx->key_dma;
220
221         if (inl_mask & 2)
222                 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
223         else
224                 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
225
226         ctx->adata.key_inline = !!(inl_mask & 1);
227         ctx->cdata.key_inline = !!(inl_mask & 2);
228
229         flc = &ctx->flc[ENCRYPT];
230         desc = flc->sh_desc;
231
232         if (alg->caam.geniv)
233                 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
234                                           ivsize, ctx->authsize, is_rfc3686,
235                                           nonce, ctx1_iv_off, true,
236                                           priv->sec_attr.era);
237         else
238                 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
239                                        ivsize, ctx->authsize, is_rfc3686, nonce,
240                                        ctx1_iv_off, true, priv->sec_attr.era);
241
242         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
243         dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
244                                    sizeof(flc->flc) + desc_bytes(desc),
245                                    ctx->dir);
246
247         /* aead_decrypt shared descriptor */
248         if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
249                               (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
250                               DESC_JOB_IO_LEN, data_len, &inl_mask,
251                               ARRAY_SIZE(data_len)) < 0)
252                 return -EINVAL;
253
254         if (inl_mask & 1)
255                 ctx->adata.key_virt = ctx->key;
256         else
257                 ctx->adata.key_dma = ctx->key_dma;
258
259         if (inl_mask & 2)
260                 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
261         else
262                 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
263
264         ctx->adata.key_inline = !!(inl_mask & 1);
265         ctx->cdata.key_inline = !!(inl_mask & 2);
266
267         flc = &ctx->flc[DECRYPT];
268         desc = flc->sh_desc;
269         cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
270                                ivsize, ctx->authsize, alg->caam.geniv,
271                                is_rfc3686, nonce, ctx1_iv_off, true,
272                                priv->sec_attr.era);
273         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
274         dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
275                                    sizeof(flc->flc) + desc_bytes(desc),
276                                    ctx->dir);
277
278         return 0;
279 }
280
281 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
282 {
283         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
284
285         ctx->authsize = authsize;
286         aead_set_sh_desc(authenc);
287
288         return 0;
289 }
290
291 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
292                        unsigned int keylen)
293 {
294         struct caam_ctx *ctx = crypto_aead_ctx(aead);
295         struct device *dev = ctx->dev;
296         struct crypto_authenc_keys keys;
297
298         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
299                 goto badkey;
300
301         dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
302                 keys.authkeylen + keys.enckeylen, keys.enckeylen,
303                 keys.authkeylen);
304         print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
305                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
306
307         ctx->adata.keylen = keys.authkeylen;
308         ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
309                                               OP_ALG_ALGSEL_MASK);
310
311         if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
312                 goto badkey;
313
314         memcpy(ctx->key, keys.authkey, keys.authkeylen);
315         memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
316         dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
317                                    keys.enckeylen, ctx->dir);
318         print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
319                              DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
320                              ctx->adata.keylen_pad + keys.enckeylen, 1);
321
322         ctx->cdata.keylen = keys.enckeylen;
323
324         memzero_explicit(&keys, sizeof(keys));
325         return aead_set_sh_desc(aead);
326 badkey:
327         crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
328         memzero_explicit(&keys, sizeof(keys));
329         return -EINVAL;
330 }
331
332 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
333                                            bool encrypt)
334 {
335         struct crypto_aead *aead = crypto_aead_reqtfm(req);
336         struct caam_request *req_ctx = aead_request_ctx(req);
337         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
338         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
339         struct caam_ctx *ctx = crypto_aead_ctx(aead);
340         struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
341                                                  typeof(*alg), aead);
342         struct device *dev = ctx->dev;
343         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
344                       GFP_KERNEL : GFP_ATOMIC;
345         int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
346         struct aead_edesc *edesc;
347         dma_addr_t qm_sg_dma, iv_dma = 0;
348         int ivsize = 0;
349         unsigned int authsize = ctx->authsize;
350         int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
351         int in_len, out_len;
352         struct dpaa2_sg_entry *sg_table;
353
354         /* allocate space for base edesc, link tables and IV */
355         edesc = qi_cache_zalloc(GFP_DMA | flags);
356         if (unlikely(!edesc)) {
357                 dev_err(dev, "could not allocate extended descriptor\n");
358                 return ERR_PTR(-ENOMEM);
359         }
360
361         if (unlikely(req->dst != req->src)) {
362                 src_nents = sg_nents_for_len(req->src, req->assoclen +
363                                              req->cryptlen);
364                 if (unlikely(src_nents < 0)) {
365                         dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
366                                 req->assoclen + req->cryptlen);
367                         qi_cache_free(edesc);
368                         return ERR_PTR(src_nents);
369                 }
370
371                 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
372                                              req->cryptlen +
373                                              (encrypt ? authsize :
374                                                         (-authsize)));
375                 if (unlikely(dst_nents < 0)) {
376                         dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
377                                 req->assoclen + req->cryptlen +
378                                 (encrypt ? authsize : (-authsize)));
379                         qi_cache_free(edesc);
380                         return ERR_PTR(dst_nents);
381                 }
382
383                 if (src_nents) {
384                         mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
385                                                       DMA_TO_DEVICE);
386                         if (unlikely(!mapped_src_nents)) {
387                                 dev_err(dev, "unable to map source\n");
388                                 qi_cache_free(edesc);
389                                 return ERR_PTR(-ENOMEM);
390                         }
391                 } else {
392                         mapped_src_nents = 0;
393                 }
394
395                 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
396                                               DMA_FROM_DEVICE);
397                 if (unlikely(!mapped_dst_nents)) {
398                         dev_err(dev, "unable to map destination\n");
399                         dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
400                         qi_cache_free(edesc);
401                         return ERR_PTR(-ENOMEM);
402                 }
403         } else {
404                 src_nents = sg_nents_for_len(req->src, req->assoclen +
405                                              req->cryptlen +
406                                                 (encrypt ? authsize : 0));
407                 if (unlikely(src_nents < 0)) {
408                         dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
409                                 req->assoclen + req->cryptlen +
410                                 (encrypt ? authsize : 0));
411                         qi_cache_free(edesc);
412                         return ERR_PTR(src_nents);
413                 }
414
415                 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
416                                               DMA_BIDIRECTIONAL);
417                 if (unlikely(!mapped_src_nents)) {
418                         dev_err(dev, "unable to map source\n");
419                         qi_cache_free(edesc);
420                         return ERR_PTR(-ENOMEM);
421                 }
422         }
423
424         if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
425                 ivsize = crypto_aead_ivsize(aead);
426
427         /*
428          * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
429          * Input is not contiguous.
430          */
431         qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
432                       (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
433         sg_table = &edesc->sgt[0];
434         qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
435         if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
436                      CAAM_QI_MEMCACHE_SIZE)) {
437                 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
438                         qm_sg_nents, ivsize);
439                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
440                            0, 0, 0);
441                 qi_cache_free(edesc);
442                 return ERR_PTR(-ENOMEM);
443         }
444
445         if (ivsize) {
446                 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
447
448                 /* Make sure IV is located in a DMAable area */
449                 memcpy(iv, req->iv, ivsize);
450
451                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
452                 if (dma_mapping_error(dev, iv_dma)) {
453                         dev_err(dev, "unable to map IV\n");
454                         caam_unmap(dev, req->src, req->dst, src_nents,
455                                    dst_nents, 0, 0, 0, 0);
456                         qi_cache_free(edesc);
457                         return ERR_PTR(-ENOMEM);
458                 }
459         }
460
461         edesc->src_nents = src_nents;
462         edesc->dst_nents = dst_nents;
463         edesc->iv_dma = iv_dma;
464
465         edesc->assoclen = cpu_to_caam32(req->assoclen);
466         edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
467                                              DMA_TO_DEVICE);
468         if (dma_mapping_error(dev, edesc->assoclen_dma)) {
469                 dev_err(dev, "unable to map assoclen\n");
470                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
471                            iv_dma, ivsize, 0, 0);
472                 qi_cache_free(edesc);
473                 return ERR_PTR(-ENOMEM);
474         }
475
476         dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
477         qm_sg_index++;
478         if (ivsize) {
479                 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
480                 qm_sg_index++;
481         }
482         sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
483         qm_sg_index += mapped_src_nents;
484
485         if (mapped_dst_nents > 1)
486                 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
487                                  qm_sg_index, 0);
488
489         qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
490         if (dma_mapping_error(dev, qm_sg_dma)) {
491                 dev_err(dev, "unable to map S/G table\n");
492                 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
493                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
494                            iv_dma, ivsize, 0, 0);
495                 qi_cache_free(edesc);
496                 return ERR_PTR(-ENOMEM);
497         }
498
499         edesc->qm_sg_dma = qm_sg_dma;
500         edesc->qm_sg_bytes = qm_sg_bytes;
501
502         out_len = req->assoclen + req->cryptlen +
503                   (encrypt ? ctx->authsize : (-ctx->authsize));
504         in_len = 4 + ivsize + req->assoclen + req->cryptlen;
505
506         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
507         dpaa2_fl_set_final(in_fle, true);
508         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
509         dpaa2_fl_set_addr(in_fle, qm_sg_dma);
510         dpaa2_fl_set_len(in_fle, in_len);
511
512         if (req->dst == req->src) {
513                 if (mapped_src_nents == 1) {
514                         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
515                         dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
516                 } else {
517                         dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
518                         dpaa2_fl_set_addr(out_fle, qm_sg_dma +
519                                           (1 + !!ivsize) * sizeof(*sg_table));
520                 }
521         } else if (mapped_dst_nents == 1) {
522                 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
523                 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
524         } else {
525                 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
526                 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
527                                   sizeof(*sg_table));
528         }
529
530         dpaa2_fl_set_len(out_fle, out_len);
531
532         return edesc;
533 }
534
535 static int gcm_set_sh_desc(struct crypto_aead *aead)
536 {
537         struct caam_ctx *ctx = crypto_aead_ctx(aead);
538         struct device *dev = ctx->dev;
539         unsigned int ivsize = crypto_aead_ivsize(aead);
540         struct caam_flc *flc;
541         u32 *desc;
542         int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
543                         ctx->cdata.keylen;
544
545         if (!ctx->cdata.keylen || !ctx->authsize)
546                 return 0;
547
548         /*
549          * AES GCM encrypt shared descriptor
550          * Job Descriptor and Shared Descriptor
551          * must fit into the 64-word Descriptor h/w Buffer
552          */
553         if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
554                 ctx->cdata.key_inline = true;
555                 ctx->cdata.key_virt = ctx->key;
556         } else {
557                 ctx->cdata.key_inline = false;
558                 ctx->cdata.key_dma = ctx->key_dma;
559         }
560
561         flc = &ctx->flc[ENCRYPT];
562         desc = flc->sh_desc;
563         cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
564         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
565         dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
566                                    sizeof(flc->flc) + desc_bytes(desc),
567                                    ctx->dir);
568
569         /*
570          * Job Descriptor and Shared Descriptors
571          * must all fit into the 64-word Descriptor h/w Buffer
572          */
573         if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
574                 ctx->cdata.key_inline = true;
575                 ctx->cdata.key_virt = ctx->key;
576         } else {
577                 ctx->cdata.key_inline = false;
578                 ctx->cdata.key_dma = ctx->key_dma;
579         }
580
581         flc = &ctx->flc[DECRYPT];
582         desc = flc->sh_desc;
583         cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
584         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
585         dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
586                                    sizeof(flc->flc) + desc_bytes(desc),
587                                    ctx->dir);
588
589         return 0;
590 }
591
592 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
593 {
594         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
595
596         ctx->authsize = authsize;
597         gcm_set_sh_desc(authenc);
598
599         return 0;
600 }
601
602 static int gcm_setkey(struct crypto_aead *aead,
603                       const u8 *key, unsigned int keylen)
604 {
605         struct caam_ctx *ctx = crypto_aead_ctx(aead);
606         struct device *dev = ctx->dev;
607
608         print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
609                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
610
611         memcpy(ctx->key, key, keylen);
612         dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
613         ctx->cdata.keylen = keylen;
614
615         return gcm_set_sh_desc(aead);
616 }
617
618 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
619 {
620         struct caam_ctx *ctx = crypto_aead_ctx(aead);
621         struct device *dev = ctx->dev;
622         unsigned int ivsize = crypto_aead_ivsize(aead);
623         struct caam_flc *flc;
624         u32 *desc;
625         int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
626                         ctx->cdata.keylen;
627
628         if (!ctx->cdata.keylen || !ctx->authsize)
629                 return 0;
630
631         ctx->cdata.key_virt = ctx->key;
632
633         /*
634          * RFC4106 encrypt shared descriptor
635          * Job Descriptor and Shared Descriptor
636          * must fit into the 64-word Descriptor h/w Buffer
637          */
638         if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
639                 ctx->cdata.key_inline = true;
640         } else {
641                 ctx->cdata.key_inline = false;
642                 ctx->cdata.key_dma = ctx->key_dma;
643         }
644
645         flc = &ctx->flc[ENCRYPT];
646         desc = flc->sh_desc;
647         cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
648                                   true);
649         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
650         dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
651                                    sizeof(flc->flc) + desc_bytes(desc),
652                                    ctx->dir);
653
654         /*
655          * Job Descriptor and Shared Descriptors
656          * must all fit into the 64-word Descriptor h/w Buffer
657          */
658         if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
659                 ctx->cdata.key_inline = true;
660         } else {
661                 ctx->cdata.key_inline = false;
662                 ctx->cdata.key_dma = ctx->key_dma;
663         }
664
665         flc = &ctx->flc[DECRYPT];
666         desc = flc->sh_desc;
667         cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
668                                   true);
669         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
670         dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
671                                    sizeof(flc->flc) + desc_bytes(desc),
672                                    ctx->dir);
673
674         return 0;
675 }
676
677 static int rfc4106_setauthsize(struct crypto_aead *authenc,
678                                unsigned int authsize)
679 {
680         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
681
682         ctx->authsize = authsize;
683         rfc4106_set_sh_desc(authenc);
684
685         return 0;
686 }
687
688 static int rfc4106_setkey(struct crypto_aead *aead,
689                           const u8 *key, unsigned int keylen)
690 {
691         struct caam_ctx *ctx = crypto_aead_ctx(aead);
692         struct device *dev = ctx->dev;
693
694         if (keylen < 4)
695                 return -EINVAL;
696
697         print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
698                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
699
700         memcpy(ctx->key, key, keylen);
701         /*
702          * The last four bytes of the key material are used as the salt value
703          * in the nonce. Update the AES key length.
704          */
705         ctx->cdata.keylen = keylen - 4;
706         dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
707                                    ctx->dir);
708
709         return rfc4106_set_sh_desc(aead);
710 }
711
712 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
713 {
714         struct caam_ctx *ctx = crypto_aead_ctx(aead);
715         struct device *dev = ctx->dev;
716         unsigned int ivsize = crypto_aead_ivsize(aead);
717         struct caam_flc *flc;
718         u32 *desc;
719         int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
720                         ctx->cdata.keylen;
721
722         if (!ctx->cdata.keylen || !ctx->authsize)
723                 return 0;
724
725         ctx->cdata.key_virt = ctx->key;
726
727         /*
728          * RFC4543 encrypt shared descriptor
729          * Job Descriptor and Shared Descriptor
730          * must fit into the 64-word Descriptor h/w Buffer
731          */
732         if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
733                 ctx->cdata.key_inline = true;
734         } else {
735                 ctx->cdata.key_inline = false;
736                 ctx->cdata.key_dma = ctx->key_dma;
737         }
738
739         flc = &ctx->flc[ENCRYPT];
740         desc = flc->sh_desc;
741         cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
742                                   true);
743         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
744         dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
745                                    sizeof(flc->flc) + desc_bytes(desc),
746                                    ctx->dir);
747
748         /*
749          * Job Descriptor and Shared Descriptors
750          * must all fit into the 64-word Descriptor h/w Buffer
751          */
752         if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
753                 ctx->cdata.key_inline = true;
754         } else {
755                 ctx->cdata.key_inline = false;
756                 ctx->cdata.key_dma = ctx->key_dma;
757         }
758
759         flc = &ctx->flc[DECRYPT];
760         desc = flc->sh_desc;
761         cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
762                                   true);
763         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
764         dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
765                                    sizeof(flc->flc) + desc_bytes(desc),
766                                    ctx->dir);
767
768         return 0;
769 }
770
771 static int rfc4543_setauthsize(struct crypto_aead *authenc,
772                                unsigned int authsize)
773 {
774         struct caam_ctx *ctx = crypto_aead_ctx(authenc);
775
776         ctx->authsize = authsize;
777         rfc4543_set_sh_desc(authenc);
778
779         return 0;
780 }
781
782 static int rfc4543_setkey(struct crypto_aead *aead,
783                           const u8 *key, unsigned int keylen)
784 {
785         struct caam_ctx *ctx = crypto_aead_ctx(aead);
786         struct device *dev = ctx->dev;
787
788         if (keylen < 4)
789                 return -EINVAL;
790
791         print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
792                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
793
794         memcpy(ctx->key, key, keylen);
795         /*
796          * The last four bytes of the key material are used as the salt value
797          * in the nonce. Update the AES key length.
798          */
799         ctx->cdata.keylen = keylen - 4;
800         dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
801                                    ctx->dir);
802
803         return rfc4543_set_sh_desc(aead);
804 }
805
806 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
807                            unsigned int keylen)
808 {
809         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
810         struct caam_skcipher_alg *alg =
811                 container_of(crypto_skcipher_alg(skcipher),
812                              struct caam_skcipher_alg, skcipher);
813         struct device *dev = ctx->dev;
814         struct caam_flc *flc;
815         unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
816         u32 *desc;
817         u32 ctx1_iv_off = 0;
818         const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
819                                OP_ALG_AAI_CTR_MOD128);
820         const bool is_rfc3686 = alg->caam.rfc3686;
821
822         print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
823                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
824
825         /*
826          * AES-CTR needs to load IV in CONTEXT1 reg
827          * at an offset of 128bits (16bytes)
828          * CONTEXT1[255:128] = IV
829          */
830         if (ctr_mode)
831                 ctx1_iv_off = 16;
832
833         /*
834          * RFC3686 specific:
835          *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
836          *      | *key = {KEY, NONCE}
837          */
838         if (is_rfc3686) {
839                 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
840                 keylen -= CTR_RFC3686_NONCE_SIZE;
841         }
842
843         ctx->cdata.keylen = keylen;
844         ctx->cdata.key_virt = key;
845         ctx->cdata.key_inline = true;
846
847         /* skcipher_encrypt shared descriptor */
848         flc = &ctx->flc[ENCRYPT];
849         desc = flc->sh_desc;
850         cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
851                                    ctx1_iv_off);
852         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
853         dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
854                                    sizeof(flc->flc) + desc_bytes(desc),
855                                    ctx->dir);
856
857         /* skcipher_decrypt shared descriptor */
858         flc = &ctx->flc[DECRYPT];
859         desc = flc->sh_desc;
860         cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
861                                    ctx1_iv_off);
862         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
863         dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
864                                    sizeof(flc->flc) + desc_bytes(desc),
865                                    ctx->dir);
866
867         return 0;
868 }
869
870 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
871                                unsigned int keylen)
872 {
873         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
874         struct device *dev = ctx->dev;
875         struct caam_flc *flc;
876         u32 *desc;
877
878         if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
879                 dev_err(dev, "key size mismatch\n");
880                 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
881                 return -EINVAL;
882         }
883
884         ctx->cdata.keylen = keylen;
885         ctx->cdata.key_virt = key;
886         ctx->cdata.key_inline = true;
887
888         /* xts_skcipher_encrypt shared descriptor */
889         flc = &ctx->flc[ENCRYPT];
890         desc = flc->sh_desc;
891         cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
892         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
893         dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
894                                    sizeof(flc->flc) + desc_bytes(desc),
895                                    ctx->dir);
896
897         /* xts_skcipher_decrypt shared descriptor */
898         flc = &ctx->flc[DECRYPT];
899         desc = flc->sh_desc;
900         cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
901         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
902         dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
903                                    sizeof(flc->flc) + desc_bytes(desc),
904                                    ctx->dir);
905
906         return 0;
907 }
908
909 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
910 {
911         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
912         struct caam_request *req_ctx = skcipher_request_ctx(req);
913         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
914         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
915         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
916         struct device *dev = ctx->dev;
917         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
918                        GFP_KERNEL : GFP_ATOMIC;
919         int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
920         struct skcipher_edesc *edesc;
921         dma_addr_t iv_dma;
922         u8 *iv;
923         int ivsize = crypto_skcipher_ivsize(skcipher);
924         int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
925         struct dpaa2_sg_entry *sg_table;
926
927         src_nents = sg_nents_for_len(req->src, req->cryptlen);
928         if (unlikely(src_nents < 0)) {
929                 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
930                         req->cryptlen);
931                 return ERR_PTR(src_nents);
932         }
933
934         if (unlikely(req->dst != req->src)) {
935                 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
936                 if (unlikely(dst_nents < 0)) {
937                         dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
938                                 req->cryptlen);
939                         return ERR_PTR(dst_nents);
940                 }
941
942                 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
943                                               DMA_TO_DEVICE);
944                 if (unlikely(!mapped_src_nents)) {
945                         dev_err(dev, "unable to map source\n");
946                         return ERR_PTR(-ENOMEM);
947                 }
948
949                 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
950                                               DMA_FROM_DEVICE);
951                 if (unlikely(!mapped_dst_nents)) {
952                         dev_err(dev, "unable to map destination\n");
953                         dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
954                         return ERR_PTR(-ENOMEM);
955                 }
956         } else {
957                 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
958                                               DMA_BIDIRECTIONAL);
959                 if (unlikely(!mapped_src_nents)) {
960                         dev_err(dev, "unable to map source\n");
961                         return ERR_PTR(-ENOMEM);
962                 }
963         }
964
965         qm_sg_ents = 1 + mapped_src_nents;
966         dst_sg_idx = qm_sg_ents;
967
968         qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
969         qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
970         if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
971                      ivsize > CAAM_QI_MEMCACHE_SIZE)) {
972                 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
973                         qm_sg_ents, ivsize);
974                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
975                            0, 0, 0);
976                 return ERR_PTR(-ENOMEM);
977         }
978
979         /* allocate space for base edesc, link tables and IV */
980         edesc = qi_cache_zalloc(GFP_DMA | flags);
981         if (unlikely(!edesc)) {
982                 dev_err(dev, "could not allocate extended descriptor\n");
983                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
984                            0, 0, 0);
985                 return ERR_PTR(-ENOMEM);
986         }
987
988         /* Make sure IV is located in a DMAable area */
989         sg_table = &edesc->sgt[0];
990         iv = (u8 *)(sg_table + qm_sg_ents);
991         memcpy(iv, req->iv, ivsize);
992
993         iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
994         if (dma_mapping_error(dev, iv_dma)) {
995                 dev_err(dev, "unable to map IV\n");
996                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
997                            0, 0, 0);
998                 qi_cache_free(edesc);
999                 return ERR_PTR(-ENOMEM);
1000         }
1001
1002         edesc->src_nents = src_nents;
1003         edesc->dst_nents = dst_nents;
1004         edesc->iv_dma = iv_dma;
1005         edesc->qm_sg_bytes = qm_sg_bytes;
1006
1007         dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1008         sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1009
1010         if (mapped_dst_nents > 1)
1011                 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1012                                  dst_sg_idx, 0);
1013
1014         edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1015                                           DMA_TO_DEVICE);
1016         if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1017                 dev_err(dev, "unable to map S/G table\n");
1018                 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1019                            iv_dma, ivsize, 0, 0);
1020                 qi_cache_free(edesc);
1021                 return ERR_PTR(-ENOMEM);
1022         }
1023
1024         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1025         dpaa2_fl_set_final(in_fle, true);
1026         dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1027         dpaa2_fl_set_len(out_fle, req->cryptlen);
1028
1029         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1030         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1031
1032         if (req->src == req->dst) {
1033                 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1034                 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1035                                   sizeof(*sg_table));
1036         } else if (mapped_dst_nents > 1) {
1037                 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1038                 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1039                                   sizeof(*sg_table));
1040         } else {
1041                 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1042                 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1043         }
1044
1045         return edesc;
1046 }
1047
1048 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1049                        struct aead_request *req)
1050 {
1051         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1052         int ivsize = crypto_aead_ivsize(aead);
1053
1054         caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1055                    edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1056         dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1057 }
1058
1059 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1060                            struct skcipher_request *req)
1061 {
1062         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1063         int ivsize = crypto_skcipher_ivsize(skcipher);
1064
1065         caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1066                    edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1067 }
1068
1069 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1070 {
1071         struct crypto_async_request *areq = cbk_ctx;
1072         struct aead_request *req = container_of(areq, struct aead_request,
1073                                                 base);
1074         struct caam_request *req_ctx = to_caam_req(areq);
1075         struct aead_edesc *edesc = req_ctx->edesc;
1076         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1077         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1078         int ecode = 0;
1079
1080         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1081
1082         if (unlikely(status)) {
1083                 caam_qi2_strstatus(ctx->dev, status);
1084                 ecode = -EIO;
1085         }
1086
1087         aead_unmap(ctx->dev, edesc, req);
1088         qi_cache_free(edesc);
1089         aead_request_complete(req, ecode);
1090 }
1091
1092 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1093 {
1094         struct crypto_async_request *areq = cbk_ctx;
1095         struct aead_request *req = container_of(areq, struct aead_request,
1096                                                 base);
1097         struct caam_request *req_ctx = to_caam_req(areq);
1098         struct aead_edesc *edesc = req_ctx->edesc;
1099         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1100         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1101         int ecode = 0;
1102
1103         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1104
1105         if (unlikely(status)) {
1106                 caam_qi2_strstatus(ctx->dev, status);
1107                 /*
1108                  * verify hw auth check passed else return -EBADMSG
1109                  */
1110                 if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1111                      JRSTA_CCBERR_ERRID_ICVCHK)
1112                         ecode = -EBADMSG;
1113                 else
1114                         ecode = -EIO;
1115         }
1116
1117         aead_unmap(ctx->dev, edesc, req);
1118         qi_cache_free(edesc);
1119         aead_request_complete(req, ecode);
1120 }
1121
1122 static int aead_encrypt(struct aead_request *req)
1123 {
1124         struct aead_edesc *edesc;
1125         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1126         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1127         struct caam_request *caam_req = aead_request_ctx(req);
1128         int ret;
1129
1130         /* allocate extended descriptor */
1131         edesc = aead_edesc_alloc(req, true);
1132         if (IS_ERR(edesc))
1133                 return PTR_ERR(edesc);
1134
1135         caam_req->flc = &ctx->flc[ENCRYPT];
1136         caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1137         caam_req->cbk = aead_encrypt_done;
1138         caam_req->ctx = &req->base;
1139         caam_req->edesc = edesc;
1140         ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1141         if (ret != -EINPROGRESS &&
1142             !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1143                 aead_unmap(ctx->dev, edesc, req);
1144                 qi_cache_free(edesc);
1145         }
1146
1147         return ret;
1148 }
1149
1150 static int aead_decrypt(struct aead_request *req)
1151 {
1152         struct aead_edesc *edesc;
1153         struct crypto_aead *aead = crypto_aead_reqtfm(req);
1154         struct caam_ctx *ctx = crypto_aead_ctx(aead);
1155         struct caam_request *caam_req = aead_request_ctx(req);
1156         int ret;
1157
1158         /* allocate extended descriptor */
1159         edesc = aead_edesc_alloc(req, false);
1160         if (IS_ERR(edesc))
1161                 return PTR_ERR(edesc);
1162
1163         caam_req->flc = &ctx->flc[DECRYPT];
1164         caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1165         caam_req->cbk = aead_decrypt_done;
1166         caam_req->ctx = &req->base;
1167         caam_req->edesc = edesc;
1168         ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1169         if (ret != -EINPROGRESS &&
1170             !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1171                 aead_unmap(ctx->dev, edesc, req);
1172                 qi_cache_free(edesc);
1173         }
1174
1175         return ret;
1176 }
1177
1178 static int ipsec_gcm_encrypt(struct aead_request *req)
1179 {
1180         if (req->assoclen < 8)
1181                 return -EINVAL;
1182
1183         return aead_encrypt(req);
1184 }
1185
1186 static int ipsec_gcm_decrypt(struct aead_request *req)
1187 {
1188         if (req->assoclen < 8)
1189                 return -EINVAL;
1190
1191         return aead_decrypt(req);
1192 }
1193
1194 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1195 {
1196         struct crypto_async_request *areq = cbk_ctx;
1197         struct skcipher_request *req = skcipher_request_cast(areq);
1198         struct caam_request *req_ctx = to_caam_req(areq);
1199         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1200         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1201         struct skcipher_edesc *edesc = req_ctx->edesc;
1202         int ecode = 0;
1203         int ivsize = crypto_skcipher_ivsize(skcipher);
1204
1205         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1206
1207         if (unlikely(status)) {
1208                 caam_qi2_strstatus(ctx->dev, status);
1209                 ecode = -EIO;
1210         }
1211
1212         print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1213                              DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1214                              edesc->src_nents > 1 ? 100 : ivsize, 1);
1215         caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1216                      DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1217                      edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1218
1219         skcipher_unmap(ctx->dev, edesc, req);
1220
1221         /*
1222          * The crypto API expects us to set the IV (req->iv) to the last
1223          * ciphertext block. This is used e.g. by the CTS mode.
1224          */
1225         scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1226                                  ivsize, 0);
1227
1228         qi_cache_free(edesc);
1229         skcipher_request_complete(req, ecode);
1230 }
1231
1232 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1233 {
1234         struct crypto_async_request *areq = cbk_ctx;
1235         struct skcipher_request *req = skcipher_request_cast(areq);
1236         struct caam_request *req_ctx = to_caam_req(areq);
1237         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1238         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1239         struct skcipher_edesc *edesc = req_ctx->edesc;
1240         int ecode = 0;
1241         int ivsize = crypto_skcipher_ivsize(skcipher);
1242
1243         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1244
1245         if (unlikely(status)) {
1246                 caam_qi2_strstatus(ctx->dev, status);
1247                 ecode = -EIO;
1248         }
1249
1250         print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1251                              DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1252                              edesc->src_nents > 1 ? 100 : ivsize, 1);
1253         caam_dump_sg(KERN_DEBUG, "dst    @" __stringify(__LINE__)": ",
1254                      DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1255                      edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1256
1257         skcipher_unmap(ctx->dev, edesc, req);
1258         qi_cache_free(edesc);
1259         skcipher_request_complete(req, ecode);
1260 }
1261
1262 static int skcipher_encrypt(struct skcipher_request *req)
1263 {
1264         struct skcipher_edesc *edesc;
1265         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1266         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1267         struct caam_request *caam_req = skcipher_request_ctx(req);
1268         int ret;
1269
1270         /* allocate extended descriptor */
1271         edesc = skcipher_edesc_alloc(req);
1272         if (IS_ERR(edesc))
1273                 return PTR_ERR(edesc);
1274
1275         caam_req->flc = &ctx->flc[ENCRYPT];
1276         caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1277         caam_req->cbk = skcipher_encrypt_done;
1278         caam_req->ctx = &req->base;
1279         caam_req->edesc = edesc;
1280         ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1281         if (ret != -EINPROGRESS &&
1282             !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1283                 skcipher_unmap(ctx->dev, edesc, req);
1284                 qi_cache_free(edesc);
1285         }
1286
1287         return ret;
1288 }
1289
1290 static int skcipher_decrypt(struct skcipher_request *req)
1291 {
1292         struct skcipher_edesc *edesc;
1293         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1294         struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1295         struct caam_request *caam_req = skcipher_request_ctx(req);
1296         int ivsize = crypto_skcipher_ivsize(skcipher);
1297         int ret;
1298
1299         /* allocate extended descriptor */
1300         edesc = skcipher_edesc_alloc(req);
1301         if (IS_ERR(edesc))
1302                 return PTR_ERR(edesc);
1303
1304         /*
1305          * The crypto API expects us to set the IV (req->iv) to the last
1306          * ciphertext block.
1307          */
1308         scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1309                                  ivsize, 0);
1310
1311         caam_req->flc = &ctx->flc[DECRYPT];
1312         caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1313         caam_req->cbk = skcipher_decrypt_done;
1314         caam_req->ctx = &req->base;
1315         caam_req->edesc = edesc;
1316         ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1317         if (ret != -EINPROGRESS &&
1318             !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1319                 skcipher_unmap(ctx->dev, edesc, req);
1320                 qi_cache_free(edesc);
1321         }
1322
1323         return ret;
1324 }
1325
1326 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1327                          bool uses_dkp)
1328 {
1329         dma_addr_t dma_addr;
1330         int i;
1331
1332         /* copy descriptor header template value */
1333         ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1334         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1335
1336         ctx->dev = caam->dev;
1337         ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1338
1339         dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1340                                         offsetof(struct caam_ctx, flc_dma),
1341                                         ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1342         if (dma_mapping_error(ctx->dev, dma_addr)) {
1343                 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1344                 return -ENOMEM;
1345         }
1346
1347         for (i = 0; i < NUM_OP; i++)
1348                 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1349         ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1350
1351         return 0;
1352 }
1353
1354 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1355 {
1356         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1357         struct caam_skcipher_alg *caam_alg =
1358                 container_of(alg, typeof(*caam_alg), skcipher);
1359
1360         crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1361         return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1362 }
1363
1364 static int caam_cra_init_aead(struct crypto_aead *tfm)
1365 {
1366         struct aead_alg *alg = crypto_aead_alg(tfm);
1367         struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1368                                                       aead);
1369
1370         crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1371         return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1372                              alg->setkey == aead_setkey);
1373 }
1374
1375 static void caam_exit_common(struct caam_ctx *ctx)
1376 {
1377         dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1378                                offsetof(struct caam_ctx, flc_dma), ctx->dir,
1379                                DMA_ATTR_SKIP_CPU_SYNC);
1380 }
1381
1382 static void caam_cra_exit(struct crypto_skcipher *tfm)
1383 {
1384         caam_exit_common(crypto_skcipher_ctx(tfm));
1385 }
1386
1387 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1388 {
1389         caam_exit_common(crypto_aead_ctx(tfm));
1390 }
1391
1392 static struct caam_skcipher_alg driver_algs[] = {
1393         {
1394                 .skcipher = {
1395                         .base = {
1396                                 .cra_name = "cbc(aes)",
1397                                 .cra_driver_name = "cbc-aes-caam-qi2",
1398                                 .cra_blocksize = AES_BLOCK_SIZE,
1399                         },
1400                         .setkey = skcipher_setkey,
1401                         .encrypt = skcipher_encrypt,
1402                         .decrypt = skcipher_decrypt,
1403                         .min_keysize = AES_MIN_KEY_SIZE,
1404                         .max_keysize = AES_MAX_KEY_SIZE,
1405                         .ivsize = AES_BLOCK_SIZE,
1406                 },
1407                 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1408         },
1409         {
1410                 .skcipher = {
1411                         .base = {
1412                                 .cra_name = "cbc(des3_ede)",
1413                                 .cra_driver_name = "cbc-3des-caam-qi2",
1414                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1415                         },
1416                         .setkey = skcipher_setkey,
1417                         .encrypt = skcipher_encrypt,
1418                         .decrypt = skcipher_decrypt,
1419                         .min_keysize = DES3_EDE_KEY_SIZE,
1420                         .max_keysize = DES3_EDE_KEY_SIZE,
1421                         .ivsize = DES3_EDE_BLOCK_SIZE,
1422                 },
1423                 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1424         },
1425         {
1426                 .skcipher = {
1427                         .base = {
1428                                 .cra_name = "cbc(des)",
1429                                 .cra_driver_name = "cbc-des-caam-qi2",
1430                                 .cra_blocksize = DES_BLOCK_SIZE,
1431                         },
1432                         .setkey = skcipher_setkey,
1433                         .encrypt = skcipher_encrypt,
1434                         .decrypt = skcipher_decrypt,
1435                         .min_keysize = DES_KEY_SIZE,
1436                         .max_keysize = DES_KEY_SIZE,
1437                         .ivsize = DES_BLOCK_SIZE,
1438                 },
1439                 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1440         },
1441         {
1442                 .skcipher = {
1443                         .base = {
1444                                 .cra_name = "ctr(aes)",
1445                                 .cra_driver_name = "ctr-aes-caam-qi2",
1446                                 .cra_blocksize = 1,
1447                         },
1448                         .setkey = skcipher_setkey,
1449                         .encrypt = skcipher_encrypt,
1450                         .decrypt = skcipher_decrypt,
1451                         .min_keysize = AES_MIN_KEY_SIZE,
1452                         .max_keysize = AES_MAX_KEY_SIZE,
1453                         .ivsize = AES_BLOCK_SIZE,
1454                         .chunksize = AES_BLOCK_SIZE,
1455                 },
1456                 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1457                                         OP_ALG_AAI_CTR_MOD128,
1458         },
1459         {
1460                 .skcipher = {
1461                         .base = {
1462                                 .cra_name = "rfc3686(ctr(aes))",
1463                                 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1464                                 .cra_blocksize = 1,
1465                         },
1466                         .setkey = skcipher_setkey,
1467                         .encrypt = skcipher_encrypt,
1468                         .decrypt = skcipher_decrypt,
1469                         .min_keysize = AES_MIN_KEY_SIZE +
1470                                        CTR_RFC3686_NONCE_SIZE,
1471                         .max_keysize = AES_MAX_KEY_SIZE +
1472                                        CTR_RFC3686_NONCE_SIZE,
1473                         .ivsize = CTR_RFC3686_IV_SIZE,
1474                         .chunksize = AES_BLOCK_SIZE,
1475                 },
1476                 .caam = {
1477                         .class1_alg_type = OP_ALG_ALGSEL_AES |
1478                                            OP_ALG_AAI_CTR_MOD128,
1479                         .rfc3686 = true,
1480                 },
1481         },
1482         {
1483                 .skcipher = {
1484                         .base = {
1485                                 .cra_name = "xts(aes)",
1486                                 .cra_driver_name = "xts-aes-caam-qi2",
1487                                 .cra_blocksize = AES_BLOCK_SIZE,
1488                         },
1489                         .setkey = xts_skcipher_setkey,
1490                         .encrypt = skcipher_encrypt,
1491                         .decrypt = skcipher_decrypt,
1492                         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1493                         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1494                         .ivsize = AES_BLOCK_SIZE,
1495                 },
1496                 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1497         }
1498 };
1499
1500 static struct caam_aead_alg driver_aeads[] = {
1501         {
1502                 .aead = {
1503                         .base = {
1504                                 .cra_name = "rfc4106(gcm(aes))",
1505                                 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1506                                 .cra_blocksize = 1,
1507                         },
1508                         .setkey = rfc4106_setkey,
1509                         .setauthsize = rfc4106_setauthsize,
1510                         .encrypt = ipsec_gcm_encrypt,
1511                         .decrypt = ipsec_gcm_decrypt,
1512                         .ivsize = 8,
1513                         .maxauthsize = AES_BLOCK_SIZE,
1514                 },
1515                 .caam = {
1516                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1517                 },
1518         },
1519         {
1520                 .aead = {
1521                         .base = {
1522                                 .cra_name = "rfc4543(gcm(aes))",
1523                                 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1524                                 .cra_blocksize = 1,
1525                         },
1526                         .setkey = rfc4543_setkey,
1527                         .setauthsize = rfc4543_setauthsize,
1528                         .encrypt = ipsec_gcm_encrypt,
1529                         .decrypt = ipsec_gcm_decrypt,
1530                         .ivsize = 8,
1531                         .maxauthsize = AES_BLOCK_SIZE,
1532                 },
1533                 .caam = {
1534                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1535                 },
1536         },
1537         /* Galois Counter Mode */
1538         {
1539                 .aead = {
1540                         .base = {
1541                                 .cra_name = "gcm(aes)",
1542                                 .cra_driver_name = "gcm-aes-caam-qi2",
1543                                 .cra_blocksize = 1,
1544                         },
1545                         .setkey = gcm_setkey,
1546                         .setauthsize = gcm_setauthsize,
1547                         .encrypt = aead_encrypt,
1548                         .decrypt = aead_decrypt,
1549                         .ivsize = 12,
1550                         .maxauthsize = AES_BLOCK_SIZE,
1551                 },
1552                 .caam = {
1553                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1554                 }
1555         },
1556         /* single-pass ipsec_esp descriptor */
1557         {
1558                 .aead = {
1559                         .base = {
1560                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
1561                                 .cra_driver_name = "authenc-hmac-md5-"
1562                                                    "cbc-aes-caam-qi2",
1563                                 .cra_blocksize = AES_BLOCK_SIZE,
1564                         },
1565                         .setkey = aead_setkey,
1566                         .setauthsize = aead_setauthsize,
1567                         .encrypt = aead_encrypt,
1568                         .decrypt = aead_decrypt,
1569                         .ivsize = AES_BLOCK_SIZE,
1570                         .maxauthsize = MD5_DIGEST_SIZE,
1571                 },
1572                 .caam = {
1573                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1574                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1575                                            OP_ALG_AAI_HMAC_PRECOMP,
1576                 }
1577         },
1578         {
1579                 .aead = {
1580                         .base = {
1581                                 .cra_name = "echainiv(authenc(hmac(md5),"
1582                                             "cbc(aes)))",
1583                                 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1584                                                    "cbc-aes-caam-qi2",
1585                                 .cra_blocksize = AES_BLOCK_SIZE,
1586                         },
1587                         .setkey = aead_setkey,
1588                         .setauthsize = aead_setauthsize,
1589                         .encrypt = aead_encrypt,
1590                         .decrypt = aead_decrypt,
1591                         .ivsize = AES_BLOCK_SIZE,
1592                         .maxauthsize = MD5_DIGEST_SIZE,
1593                 },
1594                 .caam = {
1595                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1596                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1597                                            OP_ALG_AAI_HMAC_PRECOMP,
1598                         .geniv = true,
1599                 }
1600         },
1601         {
1602                 .aead = {
1603                         .base = {
1604                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1605                                 .cra_driver_name = "authenc-hmac-sha1-"
1606                                                    "cbc-aes-caam-qi2",
1607                                 .cra_blocksize = AES_BLOCK_SIZE,
1608                         },
1609                         .setkey = aead_setkey,
1610                         .setauthsize = aead_setauthsize,
1611                         .encrypt = aead_encrypt,
1612                         .decrypt = aead_decrypt,
1613                         .ivsize = AES_BLOCK_SIZE,
1614                         .maxauthsize = SHA1_DIGEST_SIZE,
1615                 },
1616                 .caam = {
1617                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1618                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1619                                            OP_ALG_AAI_HMAC_PRECOMP,
1620                 }
1621         },
1622         {
1623                 .aead = {
1624                         .base = {
1625                                 .cra_name = "echainiv(authenc(hmac(sha1),"
1626                                             "cbc(aes)))",
1627                                 .cra_driver_name = "echainiv-authenc-"
1628                                                    "hmac-sha1-cbc-aes-caam-qi2",
1629                                 .cra_blocksize = AES_BLOCK_SIZE,
1630                         },
1631                         .setkey = aead_setkey,
1632                         .setauthsize = aead_setauthsize,
1633                         .encrypt = aead_encrypt,
1634                         .decrypt = aead_decrypt,
1635                         .ivsize = AES_BLOCK_SIZE,
1636                         .maxauthsize = SHA1_DIGEST_SIZE,
1637                 },
1638                 .caam = {
1639                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1640                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1641                                            OP_ALG_AAI_HMAC_PRECOMP,
1642                         .geniv = true,
1643                 },
1644         },
1645         {
1646                 .aead = {
1647                         .base = {
1648                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1649                                 .cra_driver_name = "authenc-hmac-sha224-"
1650                                                    "cbc-aes-caam-qi2",
1651                                 .cra_blocksize = AES_BLOCK_SIZE,
1652                         },
1653                         .setkey = aead_setkey,
1654                         .setauthsize = aead_setauthsize,
1655                         .encrypt = aead_encrypt,
1656                         .decrypt = aead_decrypt,
1657                         .ivsize = AES_BLOCK_SIZE,
1658                         .maxauthsize = SHA224_DIGEST_SIZE,
1659                 },
1660                 .caam = {
1661                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1662                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1663                                            OP_ALG_AAI_HMAC_PRECOMP,
1664                 }
1665         },
1666         {
1667                 .aead = {
1668                         .base = {
1669                                 .cra_name = "echainiv(authenc(hmac(sha224),"
1670                                             "cbc(aes)))",
1671                                 .cra_driver_name = "echainiv-authenc-"
1672                                                    "hmac-sha224-cbc-aes-caam-qi2",
1673                                 .cra_blocksize = AES_BLOCK_SIZE,
1674                         },
1675                         .setkey = aead_setkey,
1676                         .setauthsize = aead_setauthsize,
1677                         .encrypt = aead_encrypt,
1678                         .decrypt = aead_decrypt,
1679                         .ivsize = AES_BLOCK_SIZE,
1680                         .maxauthsize = SHA224_DIGEST_SIZE,
1681                 },
1682                 .caam = {
1683                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1684                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1685                                            OP_ALG_AAI_HMAC_PRECOMP,
1686                         .geniv = true,
1687                 }
1688         },
1689         {
1690                 .aead = {
1691                         .base = {
1692                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1693                                 .cra_driver_name = "authenc-hmac-sha256-"
1694                                                    "cbc-aes-caam-qi2",
1695                                 .cra_blocksize = AES_BLOCK_SIZE,
1696                         },
1697                         .setkey = aead_setkey,
1698                         .setauthsize = aead_setauthsize,
1699                         .encrypt = aead_encrypt,
1700                         .decrypt = aead_decrypt,
1701                         .ivsize = AES_BLOCK_SIZE,
1702                         .maxauthsize = SHA256_DIGEST_SIZE,
1703                 },
1704                 .caam = {
1705                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1706                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1707                                            OP_ALG_AAI_HMAC_PRECOMP,
1708                 }
1709         },
1710         {
1711                 .aead = {
1712                         .base = {
1713                                 .cra_name = "echainiv(authenc(hmac(sha256),"
1714                                             "cbc(aes)))",
1715                                 .cra_driver_name = "echainiv-authenc-"
1716                                                    "hmac-sha256-cbc-aes-"
1717                                                    "caam-qi2",
1718                                 .cra_blocksize = AES_BLOCK_SIZE,
1719                         },
1720                         .setkey = aead_setkey,
1721                         .setauthsize = aead_setauthsize,
1722                         .encrypt = aead_encrypt,
1723                         .decrypt = aead_decrypt,
1724                         .ivsize = AES_BLOCK_SIZE,
1725                         .maxauthsize = SHA256_DIGEST_SIZE,
1726                 },
1727                 .caam = {
1728                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1729                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1730                                            OP_ALG_AAI_HMAC_PRECOMP,
1731                         .geniv = true,
1732                 }
1733         },
1734         {
1735                 .aead = {
1736                         .base = {
1737                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1738                                 .cra_driver_name = "authenc-hmac-sha384-"
1739                                                    "cbc-aes-caam-qi2",
1740                                 .cra_blocksize = AES_BLOCK_SIZE,
1741                         },
1742                         .setkey = aead_setkey,
1743                         .setauthsize = aead_setauthsize,
1744                         .encrypt = aead_encrypt,
1745                         .decrypt = aead_decrypt,
1746                         .ivsize = AES_BLOCK_SIZE,
1747                         .maxauthsize = SHA384_DIGEST_SIZE,
1748                 },
1749                 .caam = {
1750                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1751                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1752                                            OP_ALG_AAI_HMAC_PRECOMP,
1753                 }
1754         },
1755         {
1756                 .aead = {
1757                         .base = {
1758                                 .cra_name = "echainiv(authenc(hmac(sha384),"
1759                                             "cbc(aes)))",
1760                                 .cra_driver_name = "echainiv-authenc-"
1761                                                    "hmac-sha384-cbc-aes-"
1762                                                    "caam-qi2",
1763                                 .cra_blocksize = AES_BLOCK_SIZE,
1764                         },
1765                         .setkey = aead_setkey,
1766                         .setauthsize = aead_setauthsize,
1767                         .encrypt = aead_encrypt,
1768                         .decrypt = aead_decrypt,
1769                         .ivsize = AES_BLOCK_SIZE,
1770                         .maxauthsize = SHA384_DIGEST_SIZE,
1771                 },
1772                 .caam = {
1773                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1774                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1775                                            OP_ALG_AAI_HMAC_PRECOMP,
1776                         .geniv = true,
1777                 }
1778         },
1779         {
1780                 .aead = {
1781                         .base = {
1782                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1783                                 .cra_driver_name = "authenc-hmac-sha512-"
1784                                                    "cbc-aes-caam-qi2",
1785                                 .cra_blocksize = AES_BLOCK_SIZE,
1786                         },
1787                         .setkey = aead_setkey,
1788                         .setauthsize = aead_setauthsize,
1789                         .encrypt = aead_encrypt,
1790                         .decrypt = aead_decrypt,
1791                         .ivsize = AES_BLOCK_SIZE,
1792                         .maxauthsize = SHA512_DIGEST_SIZE,
1793                 },
1794                 .caam = {
1795                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1796                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1797                                            OP_ALG_AAI_HMAC_PRECOMP,
1798                 }
1799         },
1800         {
1801                 .aead = {
1802                         .base = {
1803                                 .cra_name = "echainiv(authenc(hmac(sha512),"
1804                                             "cbc(aes)))",
1805                                 .cra_driver_name = "echainiv-authenc-"
1806                                                    "hmac-sha512-cbc-aes-"
1807                                                    "caam-qi2",
1808                                 .cra_blocksize = AES_BLOCK_SIZE,
1809                         },
1810                         .setkey = aead_setkey,
1811                         .setauthsize = aead_setauthsize,
1812                         .encrypt = aead_encrypt,
1813                         .decrypt = aead_decrypt,
1814                         .ivsize = AES_BLOCK_SIZE,
1815                         .maxauthsize = SHA512_DIGEST_SIZE,
1816                 },
1817                 .caam = {
1818                         .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1819                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1820                                            OP_ALG_AAI_HMAC_PRECOMP,
1821                         .geniv = true,
1822                 }
1823         },
1824         {
1825                 .aead = {
1826                         .base = {
1827                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1828                                 .cra_driver_name = "authenc-hmac-md5-"
1829                                                    "cbc-des3_ede-caam-qi2",
1830                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1831                         },
1832                         .setkey = aead_setkey,
1833                         .setauthsize = aead_setauthsize,
1834                         .encrypt = aead_encrypt,
1835                         .decrypt = aead_decrypt,
1836                         .ivsize = DES3_EDE_BLOCK_SIZE,
1837                         .maxauthsize = MD5_DIGEST_SIZE,
1838                 },
1839                 .caam = {
1840                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1841                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1842                                            OP_ALG_AAI_HMAC_PRECOMP,
1843                 }
1844         },
1845         {
1846                 .aead = {
1847                         .base = {
1848                                 .cra_name = "echainiv(authenc(hmac(md5),"
1849                                             "cbc(des3_ede)))",
1850                                 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1851                                                    "cbc-des3_ede-caam-qi2",
1852                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1853                         },
1854                         .setkey = aead_setkey,
1855                         .setauthsize = aead_setauthsize,
1856                         .encrypt = aead_encrypt,
1857                         .decrypt = aead_decrypt,
1858                         .ivsize = DES3_EDE_BLOCK_SIZE,
1859                         .maxauthsize = MD5_DIGEST_SIZE,
1860                 },
1861                 .caam = {
1862                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1863                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1864                                            OP_ALG_AAI_HMAC_PRECOMP,
1865                         .geniv = true,
1866                 }
1867         },
1868         {
1869                 .aead = {
1870                         .base = {
1871                                 .cra_name = "authenc(hmac(sha1),"
1872                                             "cbc(des3_ede))",
1873                                 .cra_driver_name = "authenc-hmac-sha1-"
1874                                                    "cbc-des3_ede-caam-qi2",
1875                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1876                         },
1877                         .setkey = aead_setkey,
1878                         .setauthsize = aead_setauthsize,
1879                         .encrypt = aead_encrypt,
1880                         .decrypt = aead_decrypt,
1881                         .ivsize = DES3_EDE_BLOCK_SIZE,
1882                         .maxauthsize = SHA1_DIGEST_SIZE,
1883                 },
1884                 .caam = {
1885                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1886                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1887                                            OP_ALG_AAI_HMAC_PRECOMP,
1888                 },
1889         },
1890         {
1891                 .aead = {
1892                         .base = {
1893                                 .cra_name = "echainiv(authenc(hmac(sha1),"
1894                                             "cbc(des3_ede)))",
1895                                 .cra_driver_name = "echainiv-authenc-"
1896                                                    "hmac-sha1-"
1897                                                    "cbc-des3_ede-caam-qi2",
1898                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1899                         },
1900                         .setkey = aead_setkey,
1901                         .setauthsize = aead_setauthsize,
1902                         .encrypt = aead_encrypt,
1903                         .decrypt = aead_decrypt,
1904                         .ivsize = DES3_EDE_BLOCK_SIZE,
1905                         .maxauthsize = SHA1_DIGEST_SIZE,
1906                 },
1907                 .caam = {
1908                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1909                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1910                                            OP_ALG_AAI_HMAC_PRECOMP,
1911                         .geniv = true,
1912                 }
1913         },
1914         {
1915                 .aead = {
1916                         .base = {
1917                                 .cra_name = "authenc(hmac(sha224),"
1918                                             "cbc(des3_ede))",
1919                                 .cra_driver_name = "authenc-hmac-sha224-"
1920                                                    "cbc-des3_ede-caam-qi2",
1921                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1922                         },
1923                         .setkey = aead_setkey,
1924                         .setauthsize = aead_setauthsize,
1925                         .encrypt = aead_encrypt,
1926                         .decrypt = aead_decrypt,
1927                         .ivsize = DES3_EDE_BLOCK_SIZE,
1928                         .maxauthsize = SHA224_DIGEST_SIZE,
1929                 },
1930                 .caam = {
1931                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1932                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1933                                            OP_ALG_AAI_HMAC_PRECOMP,
1934                 },
1935         },
1936         {
1937                 .aead = {
1938                         .base = {
1939                                 .cra_name = "echainiv(authenc(hmac(sha224),"
1940                                             "cbc(des3_ede)))",
1941                                 .cra_driver_name = "echainiv-authenc-"
1942                                                    "hmac-sha224-"
1943                                                    "cbc-des3_ede-caam-qi2",
1944                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1945                         },
1946                         .setkey = aead_setkey,
1947                         .setauthsize = aead_setauthsize,
1948                         .encrypt = aead_encrypt,
1949                         .decrypt = aead_decrypt,
1950                         .ivsize = DES3_EDE_BLOCK_SIZE,
1951                         .maxauthsize = SHA224_DIGEST_SIZE,
1952                 },
1953                 .caam = {
1954                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1955                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1956                                            OP_ALG_AAI_HMAC_PRECOMP,
1957                         .geniv = true,
1958                 }
1959         },
1960         {
1961                 .aead = {
1962                         .base = {
1963                                 .cra_name = "authenc(hmac(sha256),"
1964                                             "cbc(des3_ede))",
1965                                 .cra_driver_name = "authenc-hmac-sha256-"
1966                                                    "cbc-des3_ede-caam-qi2",
1967                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1968                         },
1969                         .setkey = aead_setkey,
1970                         .setauthsize = aead_setauthsize,
1971                         .encrypt = aead_encrypt,
1972                         .decrypt = aead_decrypt,
1973                         .ivsize = DES3_EDE_BLOCK_SIZE,
1974                         .maxauthsize = SHA256_DIGEST_SIZE,
1975                 },
1976                 .caam = {
1977                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1978                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1979                                            OP_ALG_AAI_HMAC_PRECOMP,
1980                 },
1981         },
1982         {
1983                 .aead = {
1984                         .base = {
1985                                 .cra_name = "echainiv(authenc(hmac(sha256),"
1986                                             "cbc(des3_ede)))",
1987                                 .cra_driver_name = "echainiv-authenc-"
1988                                                    "hmac-sha256-"
1989                                                    "cbc-des3_ede-caam-qi2",
1990                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1991                         },
1992                         .setkey = aead_setkey,
1993                         .setauthsize = aead_setauthsize,
1994                         .encrypt = aead_encrypt,
1995                         .decrypt = aead_decrypt,
1996                         .ivsize = DES3_EDE_BLOCK_SIZE,
1997                         .maxauthsize = SHA256_DIGEST_SIZE,
1998                 },
1999                 .caam = {
2000                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2001                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2002                                            OP_ALG_AAI_HMAC_PRECOMP,
2003                         .geniv = true,
2004                 }
2005         },
2006         {
2007                 .aead = {
2008                         .base = {
2009                                 .cra_name = "authenc(hmac(sha384),"
2010                                             "cbc(des3_ede))",
2011                                 .cra_driver_name = "authenc-hmac-sha384-"
2012                                                    "cbc-des3_ede-caam-qi2",
2013                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2014                         },
2015                         .setkey = aead_setkey,
2016                         .setauthsize = aead_setauthsize,
2017                         .encrypt = aead_encrypt,
2018                         .decrypt = aead_decrypt,
2019                         .ivsize = DES3_EDE_BLOCK_SIZE,
2020                         .maxauthsize = SHA384_DIGEST_SIZE,
2021                 },
2022                 .caam = {
2023                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2024                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2025                                            OP_ALG_AAI_HMAC_PRECOMP,
2026                 },
2027         },
2028         {
2029                 .aead = {
2030                         .base = {
2031                                 .cra_name = "echainiv(authenc(hmac(sha384),"
2032                                             "cbc(des3_ede)))",
2033                                 .cra_driver_name = "echainiv-authenc-"
2034                                                    "hmac-sha384-"
2035                                                    "cbc-des3_ede-caam-qi2",
2036                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2037                         },
2038                         .setkey = aead_setkey,
2039                         .setauthsize = aead_setauthsize,
2040                         .encrypt = aead_encrypt,
2041                         .decrypt = aead_decrypt,
2042                         .ivsize = DES3_EDE_BLOCK_SIZE,
2043                         .maxauthsize = SHA384_DIGEST_SIZE,
2044                 },
2045                 .caam = {
2046                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2047                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2048                                            OP_ALG_AAI_HMAC_PRECOMP,
2049                         .geniv = true,
2050                 }
2051         },
2052         {
2053                 .aead = {
2054                         .base = {
2055                                 .cra_name = "authenc(hmac(sha512),"
2056                                             "cbc(des3_ede))",
2057                                 .cra_driver_name = "authenc-hmac-sha512-"
2058                                                    "cbc-des3_ede-caam-qi2",
2059                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2060                         },
2061                         .setkey = aead_setkey,
2062                         .setauthsize = aead_setauthsize,
2063                         .encrypt = aead_encrypt,
2064                         .decrypt = aead_decrypt,
2065                         .ivsize = DES3_EDE_BLOCK_SIZE,
2066                         .maxauthsize = SHA512_DIGEST_SIZE,
2067                 },
2068                 .caam = {
2069                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2070                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2071                                            OP_ALG_AAI_HMAC_PRECOMP,
2072                 },
2073         },
2074         {
2075                 .aead = {
2076                         .base = {
2077                                 .cra_name = "echainiv(authenc(hmac(sha512),"
2078                                             "cbc(des3_ede)))",
2079                                 .cra_driver_name = "echainiv-authenc-"
2080                                                    "hmac-sha512-"
2081                                                    "cbc-des3_ede-caam-qi2",
2082                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2083                         },
2084                         .setkey = aead_setkey,
2085                         .setauthsize = aead_setauthsize,
2086                         .encrypt = aead_encrypt,
2087                         .decrypt = aead_decrypt,
2088                         .ivsize = DES3_EDE_BLOCK_SIZE,
2089                         .maxauthsize = SHA512_DIGEST_SIZE,
2090                 },
2091                 .caam = {
2092                         .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2093                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094                                            OP_ALG_AAI_HMAC_PRECOMP,
2095                         .geniv = true,
2096                 }
2097         },
2098         {
2099                 .aead = {
2100                         .base = {
2101                                 .cra_name = "authenc(hmac(md5),cbc(des))",
2102                                 .cra_driver_name = "authenc-hmac-md5-"
2103                                                    "cbc-des-caam-qi2",
2104                                 .cra_blocksize = DES_BLOCK_SIZE,
2105                         },
2106                         .setkey = aead_setkey,
2107                         .setauthsize = aead_setauthsize,
2108                         .encrypt = aead_encrypt,
2109                         .decrypt = aead_decrypt,
2110                         .ivsize = DES_BLOCK_SIZE,
2111                         .maxauthsize = MD5_DIGEST_SIZE,
2112                 },
2113                 .caam = {
2114                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2115                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2116                                            OP_ALG_AAI_HMAC_PRECOMP,
2117                 },
2118         },
2119         {
2120                 .aead = {
2121                         .base = {
2122                                 .cra_name = "echainiv(authenc(hmac(md5),"
2123                                             "cbc(des)))",
2124                                 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2125                                                    "cbc-des-caam-qi2",
2126                                 .cra_blocksize = DES_BLOCK_SIZE,
2127                         },
2128                         .setkey = aead_setkey,
2129                         .setauthsize = aead_setauthsize,
2130                         .encrypt = aead_encrypt,
2131                         .decrypt = aead_decrypt,
2132                         .ivsize = DES_BLOCK_SIZE,
2133                         .maxauthsize = MD5_DIGEST_SIZE,
2134                 },
2135                 .caam = {
2136                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2137                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2138                                            OP_ALG_AAI_HMAC_PRECOMP,
2139                         .geniv = true,
2140                 }
2141         },
2142         {
2143                 .aead = {
2144                         .base = {
2145                                 .cra_name = "authenc(hmac(sha1),cbc(des))",
2146                                 .cra_driver_name = "authenc-hmac-sha1-"
2147                                                    "cbc-des-caam-qi2",
2148                                 .cra_blocksize = DES_BLOCK_SIZE,
2149                         },
2150                         .setkey = aead_setkey,
2151                         .setauthsize = aead_setauthsize,
2152                         .encrypt = aead_encrypt,
2153                         .decrypt = aead_decrypt,
2154                         .ivsize = DES_BLOCK_SIZE,
2155                         .maxauthsize = SHA1_DIGEST_SIZE,
2156                 },
2157                 .caam = {
2158                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2159                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2160                                            OP_ALG_AAI_HMAC_PRECOMP,
2161                 },
2162         },
2163         {
2164                 .aead = {
2165                         .base = {
2166                                 .cra_name = "echainiv(authenc(hmac(sha1),"
2167                                             "cbc(des)))",
2168                                 .cra_driver_name = "echainiv-authenc-"
2169                                                    "hmac-sha1-cbc-des-caam-qi2",
2170                                 .cra_blocksize = DES_BLOCK_SIZE,
2171                         },
2172                         .setkey = aead_setkey,
2173                         .setauthsize = aead_setauthsize,
2174                         .encrypt = aead_encrypt,
2175                         .decrypt = aead_decrypt,
2176                         .ivsize = DES_BLOCK_SIZE,
2177                         .maxauthsize = SHA1_DIGEST_SIZE,
2178                 },
2179                 .caam = {
2180                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2181                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2182                                            OP_ALG_AAI_HMAC_PRECOMP,
2183                         .geniv = true,
2184                 }
2185         },
2186         {
2187                 .aead = {
2188                         .base = {
2189                                 .cra_name = "authenc(hmac(sha224),cbc(des))",
2190                                 .cra_driver_name = "authenc-hmac-sha224-"
2191                                                    "cbc-des-caam-qi2",
2192                                 .cra_blocksize = DES_BLOCK_SIZE,
2193                         },
2194                         .setkey = aead_setkey,
2195                         .setauthsize = aead_setauthsize,
2196                         .encrypt = aead_encrypt,
2197                         .decrypt = aead_decrypt,
2198                         .ivsize = DES_BLOCK_SIZE,
2199                         .maxauthsize = SHA224_DIGEST_SIZE,
2200                 },
2201                 .caam = {
2202                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2203                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2204                                            OP_ALG_AAI_HMAC_PRECOMP,
2205                 },
2206         },
2207         {
2208                 .aead = {
2209                         .base = {
2210                                 .cra_name = "echainiv(authenc(hmac(sha224),"
2211                                             "cbc(des)))",
2212                                 .cra_driver_name = "echainiv-authenc-"
2213                                                    "hmac-sha224-cbc-des-"
2214                                                    "caam-qi2",
2215                                 .cra_blocksize = DES_BLOCK_SIZE,
2216                         },
2217                         .setkey = aead_setkey,
2218                         .setauthsize = aead_setauthsize,
2219                         .encrypt = aead_encrypt,
2220                         .decrypt = aead_decrypt,
2221                         .ivsize = DES_BLOCK_SIZE,
2222                         .maxauthsize = SHA224_DIGEST_SIZE,
2223                 },
2224                 .caam = {
2225                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2226                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2227                                            OP_ALG_AAI_HMAC_PRECOMP,
2228                         .geniv = true,
2229                 }
2230         },
2231         {
2232                 .aead = {
2233                         .base = {
2234                                 .cra_name = "authenc(hmac(sha256),cbc(des))",
2235                                 .cra_driver_name = "authenc-hmac-sha256-"
2236                                                    "cbc-des-caam-qi2",
2237                                 .cra_blocksize = DES_BLOCK_SIZE,
2238                         },
2239                         .setkey = aead_setkey,
2240                         .setauthsize = aead_setauthsize,
2241                         .encrypt = aead_encrypt,
2242                         .decrypt = aead_decrypt,
2243                         .ivsize = DES_BLOCK_SIZE,
2244                         .maxauthsize = SHA256_DIGEST_SIZE,
2245                 },
2246                 .caam = {
2247                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2248                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2249                                            OP_ALG_AAI_HMAC_PRECOMP,
2250                 },
2251         },
2252         {
2253                 .aead = {
2254                         .base = {
2255                                 .cra_name = "echainiv(authenc(hmac(sha256),"
2256                                             "cbc(des)))",
2257                                 .cra_driver_name = "echainiv-authenc-"
2258                                                    "hmac-sha256-cbc-desi-"
2259                                                    "caam-qi2",
2260                                 .cra_blocksize = DES_BLOCK_SIZE,
2261                         },
2262                         .setkey = aead_setkey,
2263                         .setauthsize = aead_setauthsize,
2264                         .encrypt = aead_encrypt,
2265                         .decrypt = aead_decrypt,
2266                         .ivsize = DES_BLOCK_SIZE,
2267                         .maxauthsize = SHA256_DIGEST_SIZE,
2268                 },
2269                 .caam = {
2270                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2271                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2272                                            OP_ALG_AAI_HMAC_PRECOMP,
2273                         .geniv = true,
2274                 },
2275         },
2276         {
2277                 .aead = {
2278                         .base = {
2279                                 .cra_name = "authenc(hmac(sha384),cbc(des))",
2280                                 .cra_driver_name = "authenc-hmac-sha384-"
2281                                                    "cbc-des-caam-qi2",
2282                                 .cra_blocksize = DES_BLOCK_SIZE,
2283                         },
2284                         .setkey = aead_setkey,
2285                         .setauthsize = aead_setauthsize,
2286                         .encrypt = aead_encrypt,
2287                         .decrypt = aead_decrypt,
2288                         .ivsize = DES_BLOCK_SIZE,
2289                         .maxauthsize = SHA384_DIGEST_SIZE,
2290                 },
2291                 .caam = {
2292                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2293                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2294                                            OP_ALG_AAI_HMAC_PRECOMP,
2295                 },
2296         },
2297         {
2298                 .aead = {
2299                         .base = {
2300                                 .cra_name = "echainiv(authenc(hmac(sha384),"
2301                                             "cbc(des)))",
2302                                 .cra_driver_name = "echainiv-authenc-"
2303                                                    "hmac-sha384-cbc-des-"
2304                                                    "caam-qi2",
2305                                 .cra_blocksize = DES_BLOCK_SIZE,
2306                         },
2307                         .setkey = aead_setkey,
2308                         .setauthsize = aead_setauthsize,
2309                         .encrypt = aead_encrypt,
2310                         .decrypt = aead_decrypt,
2311                         .ivsize = DES_BLOCK_SIZE,
2312                         .maxauthsize = SHA384_DIGEST_SIZE,
2313                 },
2314                 .caam = {
2315                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2316                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2317                                            OP_ALG_AAI_HMAC_PRECOMP,
2318                         .geniv = true,
2319                 }
2320         },
2321         {
2322                 .aead = {
2323                         .base = {
2324                                 .cra_name = "authenc(hmac(sha512),cbc(des))",
2325                                 .cra_driver_name = "authenc-hmac-sha512-"
2326                                                    "cbc-des-caam-qi2",
2327                                 .cra_blocksize = DES_BLOCK_SIZE,
2328                         },
2329                         .setkey = aead_setkey,
2330                         .setauthsize = aead_setauthsize,
2331                         .encrypt = aead_encrypt,
2332                         .decrypt = aead_decrypt,
2333                         .ivsize = DES_BLOCK_SIZE,
2334                         .maxauthsize = SHA512_DIGEST_SIZE,
2335                 },
2336                 .caam = {
2337                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2338                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2339                                            OP_ALG_AAI_HMAC_PRECOMP,
2340                 }
2341         },
2342         {
2343                 .aead = {
2344                         .base = {
2345                                 .cra_name = "echainiv(authenc(hmac(sha512),"
2346                                             "cbc(des)))",
2347                                 .cra_driver_name = "echainiv-authenc-"
2348                                                    "hmac-sha512-cbc-des-"
2349                                                    "caam-qi2",
2350                                 .cra_blocksize = DES_BLOCK_SIZE,
2351                         },
2352                         .setkey = aead_setkey,
2353                         .setauthsize = aead_setauthsize,
2354                         .encrypt = aead_encrypt,
2355                         .decrypt = aead_decrypt,
2356                         .ivsize = DES_BLOCK_SIZE,
2357                         .maxauthsize = SHA512_DIGEST_SIZE,
2358                 },
2359                 .caam = {
2360                         .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2361                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2362                                            OP_ALG_AAI_HMAC_PRECOMP,
2363                         .geniv = true,
2364                 }
2365         },
2366         {
2367                 .aead = {
2368                         .base = {
2369                                 .cra_name = "authenc(hmac(md5),"
2370                                             "rfc3686(ctr(aes)))",
2371                                 .cra_driver_name = "authenc-hmac-md5-"
2372                                                    "rfc3686-ctr-aes-caam-qi2",
2373                                 .cra_blocksize = 1,
2374                         },
2375                         .setkey = aead_setkey,
2376                         .setauthsize = aead_setauthsize,
2377                         .encrypt = aead_encrypt,
2378                         .decrypt = aead_decrypt,
2379                         .ivsize = CTR_RFC3686_IV_SIZE,
2380                         .maxauthsize = MD5_DIGEST_SIZE,
2381                 },
2382                 .caam = {
2383                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2384                                            OP_ALG_AAI_CTR_MOD128,
2385                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2386                                            OP_ALG_AAI_HMAC_PRECOMP,
2387                         .rfc3686 = true,
2388                 },
2389         },
2390         {
2391                 .aead = {
2392                         .base = {
2393                                 .cra_name = "seqiv(authenc("
2394                                             "hmac(md5),rfc3686(ctr(aes))))",
2395                                 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2396                                                    "rfc3686-ctr-aes-caam-qi2",
2397                                 .cra_blocksize = 1,
2398                         },
2399                         .setkey = aead_setkey,
2400                         .setauthsize = aead_setauthsize,
2401                         .encrypt = aead_encrypt,
2402                         .decrypt = aead_decrypt,
2403                         .ivsize = CTR_RFC3686_IV_SIZE,
2404                         .maxauthsize = MD5_DIGEST_SIZE,
2405                 },
2406                 .caam = {
2407                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2408                                            OP_ALG_AAI_CTR_MOD128,
2409                         .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2410                                            OP_ALG_AAI_HMAC_PRECOMP,
2411                         .rfc3686 = true,
2412                         .geniv = true,
2413                 },
2414         },
2415         {
2416                 .aead = {
2417                         .base = {
2418                                 .cra_name = "authenc(hmac(sha1),"
2419                                             "rfc3686(ctr(aes)))",
2420                                 .cra_driver_name = "authenc-hmac-sha1-"
2421                                                    "rfc3686-ctr-aes-caam-qi2",
2422                                 .cra_blocksize = 1,
2423                         },
2424                         .setkey = aead_setkey,
2425                         .setauthsize = aead_setauthsize,
2426                         .encrypt = aead_encrypt,
2427                         .decrypt = aead_decrypt,
2428                         .ivsize = CTR_RFC3686_IV_SIZE,
2429                         .maxauthsize = SHA1_DIGEST_SIZE,
2430                 },
2431                 .caam = {
2432                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2433                                            OP_ALG_AAI_CTR_MOD128,
2434                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2435                                            OP_ALG_AAI_HMAC_PRECOMP,
2436                         .rfc3686 = true,
2437                 },
2438         },
2439         {
2440                 .aead = {
2441                         .base = {
2442                                 .cra_name = "seqiv(authenc("
2443                                             "hmac(sha1),rfc3686(ctr(aes))))",
2444                                 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2445                                                    "rfc3686-ctr-aes-caam-qi2",
2446                                 .cra_blocksize = 1,
2447                         },
2448                         .setkey = aead_setkey,
2449                         .setauthsize = aead_setauthsize,
2450                         .encrypt = aead_encrypt,
2451                         .decrypt = aead_decrypt,
2452                         .ivsize = CTR_RFC3686_IV_SIZE,
2453                         .maxauthsize = SHA1_DIGEST_SIZE,
2454                 },
2455                 .caam = {
2456                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2457                                            OP_ALG_AAI_CTR_MOD128,
2458                         .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2459                                            OP_ALG_AAI_HMAC_PRECOMP,
2460                         .rfc3686 = true,
2461                         .geniv = true,
2462                 },
2463         },
2464         {
2465                 .aead = {
2466                         .base = {
2467                                 .cra_name = "authenc(hmac(sha224),"
2468                                             "rfc3686(ctr(aes)))",
2469                                 .cra_driver_name = "authenc-hmac-sha224-"
2470                                                    "rfc3686-ctr-aes-caam-qi2",
2471                                 .cra_blocksize = 1,
2472                         },
2473                         .setkey = aead_setkey,
2474                         .setauthsize = aead_setauthsize,
2475                         .encrypt = aead_encrypt,
2476                         .decrypt = aead_decrypt,
2477                         .ivsize = CTR_RFC3686_IV_SIZE,
2478                         .maxauthsize = SHA224_DIGEST_SIZE,
2479                 },
2480                 .caam = {
2481                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2482                                            OP_ALG_AAI_CTR_MOD128,
2483                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2484                                            OP_ALG_AAI_HMAC_PRECOMP,
2485                         .rfc3686 = true,
2486                 },
2487         },
2488         {
2489                 .aead = {
2490                         .base = {
2491                                 .cra_name = "seqiv(authenc("
2492                                             "hmac(sha224),rfc3686(ctr(aes))))",
2493                                 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2494                                                    "rfc3686-ctr-aes-caam-qi2",
2495                                 .cra_blocksize = 1,
2496                         },
2497                         .setkey = aead_setkey,
2498                         .setauthsize = aead_setauthsize,
2499                         .encrypt = aead_encrypt,
2500                         .decrypt = aead_decrypt,
2501                         .ivsize = CTR_RFC3686_IV_SIZE,
2502                         .maxauthsize = SHA224_DIGEST_SIZE,
2503                 },
2504                 .caam = {
2505                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2506                                            OP_ALG_AAI_CTR_MOD128,
2507                         .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2508                                            OP_ALG_AAI_HMAC_PRECOMP,
2509                         .rfc3686 = true,
2510                         .geniv = true,
2511                 },
2512         },
2513         {
2514                 .aead = {
2515                         .base = {
2516                                 .cra_name = "authenc(hmac(sha256),"
2517                                             "rfc3686(ctr(aes)))",
2518                                 .cra_driver_name = "authenc-hmac-sha256-"
2519                                                    "rfc3686-ctr-aes-caam-qi2",
2520                                 .cra_blocksize = 1,
2521                         },
2522                         .setkey = aead_setkey,
2523                         .setauthsize = aead_setauthsize,
2524                         .encrypt = aead_encrypt,
2525                         .decrypt = aead_decrypt,
2526                         .ivsize = CTR_RFC3686_IV_SIZE,
2527                         .maxauthsize = SHA256_DIGEST_SIZE,
2528                 },
2529                 .caam = {
2530                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2531                                            OP_ALG_AAI_CTR_MOD128,
2532                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2533                                            OP_ALG_AAI_HMAC_PRECOMP,
2534                         .rfc3686 = true,
2535                 },
2536         },
2537         {
2538                 .aead = {
2539                         .base = {
2540                                 .cra_name = "seqiv(authenc(hmac(sha256),"
2541                                             "rfc3686(ctr(aes))))",
2542                                 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2543                                                    "rfc3686-ctr-aes-caam-qi2",
2544                                 .cra_blocksize = 1,
2545                         },
2546                         .setkey = aead_setkey,
2547                         .setauthsize = aead_setauthsize,
2548                         .encrypt = aead_encrypt,
2549                         .decrypt = aead_decrypt,
2550                         .ivsize = CTR_RFC3686_IV_SIZE,
2551                         .maxauthsize = SHA256_DIGEST_SIZE,
2552                 },
2553                 .caam = {
2554                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2555                                            OP_ALG_AAI_CTR_MOD128,
2556                         .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2557                                            OP_ALG_AAI_HMAC_PRECOMP,
2558                         .rfc3686 = true,
2559                         .geniv = true,
2560                 },
2561         },
2562         {
2563                 .aead = {
2564                         .base = {
2565                                 .cra_name = "authenc(hmac(sha384),"
2566                                             "rfc3686(ctr(aes)))",
2567                                 .cra_driver_name = "authenc-hmac-sha384-"
2568                                                    "rfc3686-ctr-aes-caam-qi2",
2569                                 .cra_blocksize = 1,
2570                         },
2571                         .setkey = aead_setkey,
2572                         .setauthsize = aead_setauthsize,
2573                         .encrypt = aead_encrypt,
2574                         .decrypt = aead_decrypt,
2575                         .ivsize = CTR_RFC3686_IV_SIZE,
2576                         .maxauthsize = SHA384_DIGEST_SIZE,
2577                 },
2578                 .caam = {
2579                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2580                                            OP_ALG_AAI_CTR_MOD128,
2581                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2582                                            OP_ALG_AAI_HMAC_PRECOMP,
2583                         .rfc3686 = true,
2584                 },
2585         },
2586         {
2587                 .aead = {
2588                         .base = {
2589                                 .cra_name = "seqiv(authenc(hmac(sha384),"
2590                                             "rfc3686(ctr(aes))))",
2591                                 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2592                                                    "rfc3686-ctr-aes-caam-qi2",
2593                                 .cra_blocksize = 1,
2594                         },
2595                         .setkey = aead_setkey,
2596                         .setauthsize = aead_setauthsize,
2597                         .encrypt = aead_encrypt,
2598                         .decrypt = aead_decrypt,
2599                         .ivsize = CTR_RFC3686_IV_SIZE,
2600                         .maxauthsize = SHA384_DIGEST_SIZE,
2601                 },
2602                 .caam = {
2603                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2604                                            OP_ALG_AAI_CTR_MOD128,
2605                         .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2606                                            OP_ALG_AAI_HMAC_PRECOMP,
2607                         .rfc3686 = true,
2608                         .geniv = true,
2609                 },
2610         },
2611         {
2612                 .aead = {
2613                         .base = {
2614                                 .cra_name = "authenc(hmac(sha512),"
2615                                             "rfc3686(ctr(aes)))",
2616                                 .cra_driver_name = "authenc-hmac-sha512-"
2617                                                    "rfc3686-ctr-aes-caam-qi2",
2618                                 .cra_blocksize = 1,
2619                         },
2620                         .setkey = aead_setkey,
2621                         .setauthsize = aead_setauthsize,
2622                         .encrypt = aead_encrypt,
2623                         .decrypt = aead_decrypt,
2624                         .ivsize = CTR_RFC3686_IV_SIZE,
2625                         .maxauthsize = SHA512_DIGEST_SIZE,
2626                 },
2627                 .caam = {
2628                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2629                                            OP_ALG_AAI_CTR_MOD128,
2630                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2631                                            OP_ALG_AAI_HMAC_PRECOMP,
2632                         .rfc3686 = true,
2633                 },
2634         },
2635         {
2636                 .aead = {
2637                         .base = {
2638                                 .cra_name = "seqiv(authenc(hmac(sha512),"
2639                                             "rfc3686(ctr(aes))))",
2640                                 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2641                                                    "rfc3686-ctr-aes-caam-qi2",
2642                                 .cra_blocksize = 1,
2643                         },
2644                         .setkey = aead_setkey,
2645                         .setauthsize = aead_setauthsize,
2646                         .encrypt = aead_encrypt,
2647                         .decrypt = aead_decrypt,
2648                         .ivsize = CTR_RFC3686_IV_SIZE,
2649                         .maxauthsize = SHA512_DIGEST_SIZE,
2650                 },
2651                 .caam = {
2652                         .class1_alg_type = OP_ALG_ALGSEL_AES |
2653                                            OP_ALG_AAI_CTR_MOD128,
2654                         .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2655                                            OP_ALG_AAI_HMAC_PRECOMP,
2656                         .rfc3686 = true,
2657                         .geniv = true,
2658                 },
2659         },
2660 };
2661
2662 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2663 {
2664         struct skcipher_alg *alg = &t_alg->skcipher;
2665
2666         alg->base.cra_module = THIS_MODULE;
2667         alg->base.cra_priority = CAAM_CRA_PRIORITY;
2668         alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2669         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2670
2671         alg->init = caam_cra_init_skcipher;
2672         alg->exit = caam_cra_exit;
2673 }
2674
2675 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2676 {
2677         struct aead_alg *alg = &t_alg->aead;
2678
2679         alg->base.cra_module = THIS_MODULE;
2680         alg->base.cra_priority = CAAM_CRA_PRIORITY;
2681         alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2682         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2683
2684         alg->init = caam_cra_init_aead;
2685         alg->exit = caam_cra_exit_aead;
2686 }
2687
2688 /* max hash key is max split key size */
2689 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
2690
2691 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
2692
2693 /* caam context sizes for hashes: running digest + 8 */
2694 #define HASH_MSG_LEN                    8
2695 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2696
2697 enum hash_optype {
2698         UPDATE = 0,
2699         UPDATE_FIRST,
2700         FINALIZE,
2701         DIGEST,
2702         HASH_NUM_OP
2703 };
2704
2705 /**
2706  * caam_hash_ctx - ahash per-session context
2707  * @flc: Flow Contexts array
2708  * @flc_dma: I/O virtual addresses of the Flow Contexts
2709  * @dev: dpseci device
2710  * @ctx_len: size of Context Register
2711  * @adata: hashing algorithm details
2712  */
2713 struct caam_hash_ctx {
2714         struct caam_flc flc[HASH_NUM_OP];
2715         dma_addr_t flc_dma[HASH_NUM_OP];
2716         struct device *dev;
2717         int ctx_len;
2718         struct alginfo adata;
2719 };
2720
2721 /* ahash state */
2722 struct caam_hash_state {
2723         struct caam_request caam_req;
2724         dma_addr_t buf_dma;
2725         dma_addr_t ctx_dma;
2726         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2727         int buflen_0;
2728         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2729         int buflen_1;
2730         u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2731         int (*update)(struct ahash_request *req);
2732         int (*final)(struct ahash_request *req);
2733         int (*finup)(struct ahash_request *req);
2734         int current_buf;
2735 };
2736
2737 struct caam_export_state {
2738         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2739         u8 caam_ctx[MAX_CTX_LEN];
2740         int buflen;
2741         int (*update)(struct ahash_request *req);
2742         int (*final)(struct ahash_request *req);
2743         int (*finup)(struct ahash_request *req);
2744 };
2745
2746 static inline void switch_buf(struct caam_hash_state *state)
2747 {
2748         state->current_buf ^= 1;
2749 }
2750
2751 static inline u8 *current_buf(struct caam_hash_state *state)
2752 {
2753         return state->current_buf ? state->buf_1 : state->buf_0;
2754 }
2755
2756 static inline u8 *alt_buf(struct caam_hash_state *state)
2757 {
2758         return state->current_buf ? state->buf_0 : state->buf_1;
2759 }
2760
2761 static inline int *current_buflen(struct caam_hash_state *state)
2762 {
2763         return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2764 }
2765
2766 static inline int *alt_buflen(struct caam_hash_state *state)
2767 {
2768         return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2769 }
2770
2771 /* Map current buffer in state (if length > 0) and put it in link table */
2772 static inline int buf_map_to_qm_sg(struct device *dev,
2773                                    struct dpaa2_sg_entry *qm_sg,
2774                                    struct caam_hash_state *state)
2775 {
2776         int buflen = *current_buflen(state);
2777
2778         if (!buflen)
2779                 return 0;
2780
2781         state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2782                                         DMA_TO_DEVICE);
2783         if (dma_mapping_error(dev, state->buf_dma)) {
2784                 dev_err(dev, "unable to map buf\n");
2785                 state->buf_dma = 0;
2786                 return -ENOMEM;
2787         }
2788
2789         dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2790
2791         return 0;
2792 }
2793
2794 /* Map state->caam_ctx, and add it to link table */
2795 static inline int ctx_map_to_qm_sg(struct device *dev,
2796                                    struct caam_hash_state *state, int ctx_len,
2797                                    struct dpaa2_sg_entry *qm_sg, u32 flag)
2798 {
2799         state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2800         if (dma_mapping_error(dev, state->ctx_dma)) {
2801                 dev_err(dev, "unable to map ctx\n");
2802                 state->ctx_dma = 0;
2803                 return -ENOMEM;
2804         }
2805
2806         dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2807
2808         return 0;
2809 }
2810
2811 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2812 {
2813         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2814         int digestsize = crypto_ahash_digestsize(ahash);
2815         struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2816         struct caam_flc *flc;
2817         u32 *desc;
2818
2819         /* ahash_update shared descriptor */
2820         flc = &ctx->flc[UPDATE];
2821         desc = flc->sh_desc;
2822         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
2823                           ctx->ctx_len, true, priv->sec_attr.era);
2824         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2825         dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
2826                                    desc_bytes(desc), DMA_BIDIRECTIONAL);
2827         print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
2828                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2829                              1);
2830
2831         /* ahash_update_first shared descriptor */
2832         flc = &ctx->flc[UPDATE_FIRST];
2833         desc = flc->sh_desc;
2834         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
2835                           ctx->ctx_len, false, priv->sec_attr.era);
2836         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2837         dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
2838                                    desc_bytes(desc), DMA_BIDIRECTIONAL);
2839         print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
2840                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2841                              1);
2842
2843         /* ahash_final shared descriptor */
2844         flc = &ctx->flc[FINALIZE];
2845         desc = flc->sh_desc;
2846         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
2847                           ctx->ctx_len, true, priv->sec_attr.era);
2848         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2849         dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
2850                                    desc_bytes(desc), DMA_BIDIRECTIONAL);
2851         print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
2852                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2853                              1);
2854
2855         /* ahash_digest shared descriptor */
2856         flc = &ctx->flc[DIGEST];
2857         desc = flc->sh_desc;
2858         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
2859                           ctx->ctx_len, false, priv->sec_attr.era);
2860         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2861         dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
2862                                    desc_bytes(desc), DMA_BIDIRECTIONAL);
2863         print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
2864                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2865                              1);
2866
2867         return 0;
2868 }
2869
2870 struct split_key_sh_result {
2871         struct completion completion;
2872         int err;
2873         struct device *dev;
2874 };
2875
2876 static void split_key_sh_done(void *cbk_ctx, u32 err)
2877 {
2878         struct split_key_sh_result *res = cbk_ctx;
2879
2880         dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
2881
2882         if (err)
2883                 caam_qi2_strstatus(res->dev, err);
2884
2885         res->err = err;
2886         complete(&res->completion);
2887 }
2888
2889 /* Digest hash size if it is too large */
2890 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
2891                            u32 *keylen, u8 *key_out, u32 digestsize)
2892 {
2893         struct caam_request *req_ctx;
2894         u32 *desc;
2895         struct split_key_sh_result result;
2896         dma_addr_t src_dma, dst_dma;
2897         struct caam_flc *flc;
2898         dma_addr_t flc_dma;
2899         int ret = -ENOMEM;
2900         struct dpaa2_fl_entry *in_fle, *out_fle;
2901
2902         req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
2903         if (!req_ctx)
2904                 return -ENOMEM;
2905
2906         in_fle = &req_ctx->fd_flt[1];
2907         out_fle = &req_ctx->fd_flt[0];
2908
2909         flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
2910         if (!flc)
2911                 goto err_flc;
2912
2913         src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
2914                                  DMA_TO_DEVICE);
2915         if (dma_mapping_error(ctx->dev, src_dma)) {
2916                 dev_err(ctx->dev, "unable to map key input memory\n");
2917                 goto err_src_dma;
2918         }
2919         dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
2920                                  DMA_FROM_DEVICE);
2921         if (dma_mapping_error(ctx->dev, dst_dma)) {
2922                 dev_err(ctx->dev, "unable to map key output memory\n");
2923                 goto err_dst_dma;
2924         }
2925
2926         desc = flc->sh_desc;
2927
2928         init_sh_desc(desc, 0);
2929
2930         /* descriptor to perform unkeyed hash on key_in */
2931         append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
2932                          OP_ALG_AS_INITFINAL);
2933         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
2934                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
2935         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
2936                          LDST_SRCDST_BYTE_CONTEXT);
2937
2938         flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
2939         flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
2940                                  desc_bytes(desc), DMA_TO_DEVICE);
2941         if (dma_mapping_error(ctx->dev, flc_dma)) {
2942                 dev_err(ctx->dev, "unable to map shared descriptor\n");
2943                 goto err_flc_dma;
2944         }
2945
2946         dpaa2_fl_set_final(in_fle, true);
2947         dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
2948         dpaa2_fl_set_addr(in_fle, src_dma);
2949         dpaa2_fl_set_len(in_fle, *keylen);
2950         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
2951         dpaa2_fl_set_addr(out_fle, dst_dma);
2952         dpaa2_fl_set_len(out_fle, digestsize);
2953
2954         print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
2955                              DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
2956         print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
2957                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2958                              1);
2959
2960         result.err = 0;
2961         init_completion(&result.completion);
2962         result.dev = ctx->dev;
2963
2964         req_ctx->flc = flc;
2965         req_ctx->flc_dma = flc_dma;
2966         req_ctx->cbk = split_key_sh_done;
2967         req_ctx->ctx = &result;
2968
2969         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
2970         if (ret == -EINPROGRESS) {
2971                 /* in progress */
2972                 wait_for_completion(&result.completion);
2973                 ret = result.err;
2974                 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
2975                                      DUMP_PREFIX_ADDRESS, 16, 4, key_in,
2976                                      digestsize, 1);
2977         }
2978
2979         dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
2980                          DMA_TO_DEVICE);
2981 err_flc_dma:
2982         dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
2983 err_dst_dma:
2984         dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
2985 err_src_dma:
2986         kfree(flc);
2987 err_flc:
2988         kfree(req_ctx);
2989
2990         *keylen = digestsize;
2991
2992         return ret;
2993 }
2994
2995 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2996                         unsigned int keylen)
2997 {
2998         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2999         unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3000         unsigned int digestsize = crypto_ahash_digestsize(ahash);
3001         int ret;
3002         u8 *hashed_key = NULL;
3003
3004         dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3005
3006         if (keylen > blocksize) {
3007                 hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
3008                                            GFP_KERNEL | GFP_DMA);
3009                 if (!hashed_key)
3010                         return -ENOMEM;
3011                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
3012                                       digestsize);
3013                 if (ret)
3014                         goto bad_free_key;
3015                 key = hashed_key;
3016         }
3017
3018         ctx->adata.keylen = keylen;
3019         ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3020                                               OP_ALG_ALGSEL_MASK);
3021         if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3022                 goto bad_free_key;
3023
3024         ctx->adata.key_virt = key;
3025         ctx->adata.key_inline = true;
3026
3027         ret = ahash_set_sh_desc(ahash);
3028         kfree(hashed_key);
3029         return ret;
3030 bad_free_key:
3031         kfree(hashed_key);
3032         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3033         return -EINVAL;
3034 }
3035
3036 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3037                                struct ahash_request *req, int dst_len)
3038 {
3039         struct caam_hash_state *state = ahash_request_ctx(req);
3040
3041         if (edesc->src_nents)
3042                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3043         if (edesc->dst_dma)
3044                 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3045
3046         if (edesc->qm_sg_bytes)
3047                 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3048                                  DMA_TO_DEVICE);
3049
3050         if (state->buf_dma) {
3051                 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3052                                  DMA_TO_DEVICE);
3053                 state->buf_dma = 0;
3054         }
3055 }
3056
3057 static inline void ahash_unmap_ctx(struct device *dev,
3058                                    struct ahash_edesc *edesc,
3059                                    struct ahash_request *req, int dst_len,
3060                                    u32 flag)
3061 {
3062         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3063         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3064         struct caam_hash_state *state = ahash_request_ctx(req);
3065
3066         if (state->ctx_dma) {
3067                 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
3068                 state->ctx_dma = 0;
3069         }
3070         ahash_unmap(dev, edesc, req, dst_len);
3071 }
3072
3073 static void ahash_done(void *cbk_ctx, u32 status)
3074 {
3075         struct crypto_async_request *areq = cbk_ctx;
3076         struct ahash_request *req = ahash_request_cast(areq);
3077         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3078         struct caam_hash_state *state = ahash_request_ctx(req);
3079         struct ahash_edesc *edesc = state->caam_req.edesc;
3080         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3081         int digestsize = crypto_ahash_digestsize(ahash);
3082         int ecode = 0;
3083
3084         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3085
3086         if (unlikely(status)) {
3087                 caam_qi2_strstatus(ctx->dev, status);
3088                 ecode = -EIO;
3089         }
3090
3091         ahash_unmap(ctx->dev, edesc, req, digestsize);
3092         qi_cache_free(edesc);
3093
3094         print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3095                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3096                              ctx->ctx_len, 1);
3097         if (req->result)
3098                 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3099                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3100                                      digestsize, 1);
3101
3102         req->base.complete(&req->base, ecode);
3103 }
3104
3105 static void ahash_done_bi(void *cbk_ctx, u32 status)
3106 {
3107         struct crypto_async_request *areq = cbk_ctx;
3108         struct ahash_request *req = ahash_request_cast(areq);
3109         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3110         struct caam_hash_state *state = ahash_request_ctx(req);
3111         struct ahash_edesc *edesc = state->caam_req.edesc;
3112         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3113         int ecode = 0;
3114
3115         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3116
3117         if (unlikely(status)) {
3118                 caam_qi2_strstatus(ctx->dev, status);
3119                 ecode = -EIO;
3120         }
3121
3122         ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3123         switch_buf(state);
3124         qi_cache_free(edesc);
3125
3126         print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3127                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3128                              ctx->ctx_len, 1);
3129         if (req->result)
3130                 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3131                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3132                                      crypto_ahash_digestsize(ahash), 1);
3133
3134         req->base.complete(&req->base, ecode);
3135 }
3136
3137 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3138 {
3139         struct crypto_async_request *areq = cbk_ctx;
3140         struct ahash_request *req = ahash_request_cast(areq);
3141         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3142         struct caam_hash_state *state = ahash_request_ctx(req);
3143         struct ahash_edesc *edesc = state->caam_req.edesc;
3144         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3145         int digestsize = crypto_ahash_digestsize(ahash);
3146         int ecode = 0;
3147
3148         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3149
3150         if (unlikely(status)) {
3151                 caam_qi2_strstatus(ctx->dev, status);
3152                 ecode = -EIO;
3153         }
3154
3155         ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
3156         qi_cache_free(edesc);
3157
3158         print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3159                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3160                              ctx->ctx_len, 1);
3161         if (req->result)
3162                 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3163                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3164                                      digestsize, 1);
3165
3166         req->base.complete(&req->base, ecode);
3167 }
3168
3169 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3170 {
3171         struct crypto_async_request *areq = cbk_ctx;
3172         struct ahash_request *req = ahash_request_cast(areq);
3173         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3174         struct caam_hash_state *state = ahash_request_ctx(req);
3175         struct ahash_edesc *edesc = state->caam_req.edesc;
3176         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3177         int ecode = 0;
3178
3179         dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3180
3181         if (unlikely(status)) {
3182                 caam_qi2_strstatus(ctx->dev, status);
3183                 ecode = -EIO;
3184         }
3185
3186         ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
3187         switch_buf(state);
3188         qi_cache_free(edesc);
3189
3190         print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3191                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3192                              ctx->ctx_len, 1);
3193         if (req->result)
3194                 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3195                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3196                                      crypto_ahash_digestsize(ahash), 1);
3197
3198         req->base.complete(&req->base, ecode);
3199 }
3200
3201 static int ahash_update_ctx(struct ahash_request *req)
3202 {
3203         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3204         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3205         struct caam_hash_state *state = ahash_request_ctx(req);
3206         struct caam_request *req_ctx = &state->caam_req;
3207         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3208         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3209         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3210                       GFP_KERNEL : GFP_ATOMIC;
3211         u8 *buf = current_buf(state);
3212         int *buflen = current_buflen(state);
3213         u8 *next_buf = alt_buf(state);
3214         int *next_buflen = alt_buflen(state), last_buflen;
3215         int in_len = *buflen + req->nbytes, to_hash;
3216         int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3217         struct ahash_edesc *edesc;
3218         int ret = 0;
3219
3220         last_buflen = *next_buflen;
3221         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3222         to_hash = in_len - *next_buflen;
3223
3224         if (to_hash) {
3225                 struct dpaa2_sg_entry *sg_table;
3226
3227                 src_nents = sg_nents_for_len(req->src,
3228                                              req->nbytes - (*next_buflen));
3229                 if (src_nents < 0) {
3230                         dev_err(ctx->dev, "Invalid number of src SG.\n");
3231                         return src_nents;
3232                 }
3233
3234                 if (src_nents) {
3235                         mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3236                                                   DMA_TO_DEVICE);
3237                         if (!mapped_nents) {
3238                                 dev_err(ctx->dev, "unable to DMA map source\n");
3239                                 return -ENOMEM;
3240                         }
3241                 } else {
3242                         mapped_nents = 0;
3243                 }
3244
3245                 /* allocate space for base edesc and link tables */
3246                 edesc = qi_cache_zalloc(GFP_DMA | flags);
3247                 if (!edesc) {
3248                         dma_unmap_sg(ctx->dev, req->src, src_nents,
3249                                      DMA_TO_DEVICE);
3250                         return -ENOMEM;
3251                 }
3252
3253                 edesc->src_nents = src_nents;
3254                 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3255                 qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3256                               sizeof(*sg_table);
3257                 sg_table = &edesc->sgt[0];
3258
3259                 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3260                                        DMA_BIDIRECTIONAL);
3261                 if (ret)
3262                         goto unmap_ctx;
3263
3264                 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3265                 if (ret)
3266                         goto unmap_ctx;
3267
3268                 if (mapped_nents) {
3269                         sg_to_qm_sg_last(req->src, mapped_nents,
3270                                          sg_table + qm_sg_src_index, 0);
3271                         if (*next_buflen)
3272                                 scatterwalk_map_and_copy(next_buf, req->src,
3273                                                          to_hash - *buflen,
3274                                                          *next_buflen, 0);
3275                 } else {
3276                         dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3277                                            true);
3278                 }
3279
3280                 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3281                                                   qm_sg_bytes, DMA_TO_DEVICE);
3282                 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3283                         dev_err(ctx->dev, "unable to map S/G table\n");
3284                         ret = -ENOMEM;
3285                         goto unmap_ctx;
3286                 }
3287                 edesc->qm_sg_bytes = qm_sg_bytes;
3288
3289                 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3290                 dpaa2_fl_set_final(in_fle, true);
3291                 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3292                 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3293                 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3294                 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3295                 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3296                 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3297
3298                 req_ctx->flc = &ctx->flc[UPDATE];
3299                 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3300                 req_ctx->cbk = ahash_done_bi;
3301                 req_ctx->ctx = &req->base;
3302                 req_ctx->edesc = edesc;
3303
3304                 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3305                 if (ret != -EINPROGRESS &&
3306                     !(ret == -EBUSY &&
3307                       req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3308                         goto unmap_ctx;
3309         } else if (*next_buflen) {
3310                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3311                                          req->nbytes, 0);
3312                 *buflen = *next_buflen;
3313                 *next_buflen = last_buflen;
3314         }
3315
3316         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3317                              DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3318         print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3319                              DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3320                              1);
3321
3322         return ret;
3323 unmap_ctx:
3324         ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
3325         qi_cache_free(edesc);
3326         return ret;
3327 }
3328
3329 static int ahash_final_ctx(struct ahash_request *req)
3330 {
3331         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3332         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3333         struct caam_hash_state *state = ahash_request_ctx(req);
3334         struct caam_request *req_ctx = &state->caam_req;
3335         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3336         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3337         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3338                       GFP_KERNEL : GFP_ATOMIC;
3339         int buflen = *current_buflen(state);
3340         int qm_sg_bytes, qm_sg_src_index;
3341         int digestsize = crypto_ahash_digestsize(ahash);
3342         struct ahash_edesc *edesc;
3343         struct dpaa2_sg_entry *sg_table;
3344         int ret;
3345
3346         /* allocate space for base edesc and link tables */
3347         edesc = qi_cache_zalloc(GFP_DMA | flags);
3348         if (!edesc)
3349                 return -ENOMEM;
3350
3351         qm_sg_src_index = 1 + (buflen ? 1 : 0);
3352         qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3353         sg_table = &edesc->sgt[0];
3354
3355         ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3356                                DMA_TO_DEVICE);
3357         if (ret)
3358                 goto unmap_ctx;
3359
3360         ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3361         if (ret)
3362                 goto unmap_ctx;
3363
3364         dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3365
3366         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3367                                           DMA_TO_DEVICE);
3368         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3369                 dev_err(ctx->dev, "unable to map S/G table\n");
3370                 ret = -ENOMEM;
3371                 goto unmap_ctx;
3372         }
3373         edesc->qm_sg_bytes = qm_sg_bytes;
3374
3375         edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3376                                         DMA_FROM_DEVICE);
3377         if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3378                 dev_err(ctx->dev, "unable to map dst\n");
3379                 edesc->dst_dma = 0;
3380                 ret = -ENOMEM;
3381                 goto unmap_ctx;
3382         }
3383
3384         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3385         dpaa2_fl_set_final(in_fle, true);
3386         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3387         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3388         dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3389         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3390         dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3391         dpaa2_fl_set_len(out_fle, digestsize);
3392
3393         req_ctx->flc = &ctx->flc[FINALIZE];
3394         req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3395         req_ctx->cbk = ahash_done_ctx_src;
3396         req_ctx->ctx = &req->base;
3397         req_ctx->edesc = edesc;
3398
3399         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3400         if (ret == -EINPROGRESS ||
3401             (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3402                 return ret;
3403
3404 unmap_ctx:
3405         ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3406         qi_cache_free(edesc);
3407         return ret;
3408 }
3409
3410 static int ahash_finup_ctx(struct ahash_request *req)
3411 {
3412         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3413         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3414         struct caam_hash_state *state = ahash_request_ctx(req);
3415         struct caam_request *req_ctx = &state->caam_req;
3416         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3417         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3418         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3419                       GFP_KERNEL : GFP_ATOMIC;
3420         int buflen = *current_buflen(state);
3421         int qm_sg_bytes, qm_sg_src_index;
3422         int src_nents, mapped_nents;
3423         int digestsize = crypto_ahash_digestsize(ahash);
3424         struct ahash_edesc *edesc;
3425         struct dpaa2_sg_entry *sg_table;
3426         int ret;
3427
3428         src_nents = sg_nents_for_len(req->src, req->nbytes);
3429         if (src_nents < 0) {
3430                 dev_err(ctx->dev, "Invalid number of src SG.\n");
3431                 return src_nents;
3432         }
3433
3434         if (src_nents) {
3435                 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3436                                           DMA_TO_DEVICE);
3437                 if (!mapped_nents) {
3438                         dev_err(ctx->dev, "unable to DMA map source\n");
3439                         return -ENOMEM;
3440                 }
3441         } else {
3442                 mapped_nents = 0;
3443         }
3444
3445         /* allocate space for base edesc and link tables */
3446         edesc = qi_cache_zalloc(GFP_DMA | flags);
3447         if (!edesc) {
3448                 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3449                 return -ENOMEM;
3450         }
3451
3452         edesc->src_nents = src_nents;
3453         qm_sg_src_index = 1 + (buflen ? 1 : 0);
3454         qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3455         sg_table = &edesc->sgt[0];
3456
3457         ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3458                                DMA_TO_DEVICE);
3459         if (ret)
3460                 goto unmap_ctx;
3461
3462         ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3463         if (ret)
3464                 goto unmap_ctx;
3465
3466         sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3467
3468         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3469                                           DMA_TO_DEVICE);
3470         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3471                 dev_err(ctx->dev, "unable to map S/G table\n");
3472                 ret = -ENOMEM;
3473                 goto unmap_ctx;
3474         }
3475         edesc->qm_sg_bytes = qm_sg_bytes;
3476
3477         edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3478                                         DMA_FROM_DEVICE);
3479         if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3480                 dev_err(ctx->dev, "unable to map dst\n");
3481                 edesc->dst_dma = 0;
3482                 ret = -ENOMEM;
3483                 goto unmap_ctx;
3484         }
3485
3486         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3487         dpaa2_fl_set_final(in_fle, true);
3488         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3489         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3490         dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3491         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3492         dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3493         dpaa2_fl_set_len(out_fle, digestsize);
3494
3495         req_ctx->flc = &ctx->flc[FINALIZE];
3496         req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3497         req_ctx->cbk = ahash_done_ctx_src;
3498         req_ctx->ctx = &req->base;
3499         req_ctx->edesc = edesc;
3500
3501         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3502         if (ret == -EINPROGRESS ||
3503             (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3504                 return ret;
3505
3506 unmap_ctx:
3507         ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
3508         qi_cache_free(edesc);
3509         return ret;
3510 }
3511
3512 static int ahash_digest(struct ahash_request *req)
3513 {
3514         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3515         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3516         struct caam_hash_state *state = ahash_request_ctx(req);
3517         struct caam_request *req_ctx = &state->caam_req;
3518         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3519         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3520         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3521                       GFP_KERNEL : GFP_ATOMIC;
3522         int digestsize = crypto_ahash_digestsize(ahash);
3523         int src_nents, mapped_nents;
3524         struct ahash_edesc *edesc;
3525         int ret = -ENOMEM;
3526
3527         state->buf_dma = 0;
3528
3529         src_nents = sg_nents_for_len(req->src, req->nbytes);
3530         if (src_nents < 0) {
3531                 dev_err(ctx->dev, "Invalid number of src SG.\n");
3532                 return src_nents;
3533         }
3534
3535         if (src_nents) {
3536                 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3537                                           DMA_TO_DEVICE);
3538                 if (!mapped_nents) {
3539                         dev_err(ctx->dev, "unable to map source for DMA\n");
3540                         return ret;
3541                 }
3542         } else {
3543                 mapped_nents = 0;
3544         }
3545
3546         /* allocate space for base edesc and link tables */
3547         edesc = qi_cache_zalloc(GFP_DMA | flags);
3548         if (!edesc) {
3549                 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3550                 return ret;
3551         }
3552
3553         edesc->src_nents = src_nents;
3554         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3555
3556         if (mapped_nents > 1) {
3557                 int qm_sg_bytes;
3558                 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3559
3560                 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3561                 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3562                 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3563                                                   qm_sg_bytes, DMA_TO_DEVICE);
3564                 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3565                         dev_err(ctx->dev, "unable to map S/G table\n");
3566                         goto unmap;
3567                 }
3568                 edesc->qm_sg_bytes = qm_sg_bytes;
3569                 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3570                 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3571         } else {
3572                 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3573                 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3574         }
3575
3576         edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3577                                         DMA_FROM_DEVICE);
3578         if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3579                 dev_err(ctx->dev, "unable to map dst\n");
3580                 edesc->dst_dma = 0;
3581                 goto unmap;
3582         }
3583
3584         dpaa2_fl_set_final(in_fle, true);
3585         dpaa2_fl_set_len(in_fle, req->nbytes);
3586         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3587         dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3588         dpaa2_fl_set_len(out_fle, digestsize);
3589
3590         req_ctx->flc = &ctx->flc[DIGEST];
3591         req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3592         req_ctx->cbk = ahash_done;
3593         req_ctx->ctx = &req->base;
3594         req_ctx->edesc = edesc;
3595         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3596         if (ret == -EINPROGRESS ||
3597             (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3598                 return ret;
3599
3600 unmap:
3601         ahash_unmap(ctx->dev, edesc, req, digestsize);
3602         qi_cache_free(edesc);
3603         return ret;
3604 }
3605
3606 static int ahash_final_no_ctx(struct ahash_request *req)
3607 {
3608         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3609         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3610         struct caam_hash_state *state = ahash_request_ctx(req);
3611         struct caam_request *req_ctx = &state->caam_req;
3612         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3613         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3614         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3615                       GFP_KERNEL : GFP_ATOMIC;
3616         u8 *buf = current_buf(state);
3617         int buflen = *current_buflen(state);
3618         int digestsize = crypto_ahash_digestsize(ahash);
3619         struct ahash_edesc *edesc;
3620         int ret = -ENOMEM;
3621
3622         /* allocate space for base edesc and link tables */
3623         edesc = qi_cache_zalloc(GFP_DMA | flags);
3624         if (!edesc)
3625                 return ret;
3626
3627         state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
3628         if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3629                 dev_err(ctx->dev, "unable to map src\n");
3630                 goto unmap;
3631         }
3632
3633         edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3634                                         DMA_FROM_DEVICE);
3635         if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3636                 dev_err(ctx->dev, "unable to map dst\n");
3637                 edesc->dst_dma = 0;
3638                 goto unmap;
3639         }
3640
3641         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3642         dpaa2_fl_set_final(in_fle, true);
3643         dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3644         dpaa2_fl_set_addr(in_fle, state->buf_dma);
3645         dpaa2_fl_set_len(in_fle, buflen);
3646         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3647         dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3648         dpaa2_fl_set_len(out_fle, digestsize);
3649
3650         req_ctx->flc = &ctx->flc[DIGEST];
3651         req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3652         req_ctx->cbk = ahash_done;
3653         req_ctx->ctx = &req->base;
3654         req_ctx->edesc = edesc;
3655
3656         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3657         if (ret == -EINPROGRESS ||
3658             (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3659                 return ret;
3660
3661 unmap:
3662         ahash_unmap(ctx->dev, edesc, req, digestsize);
3663         qi_cache_free(edesc);
3664         return ret;
3665 }
3666
3667 static int ahash_update_no_ctx(struct ahash_request *req)
3668 {
3669         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3670         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3671         struct caam_hash_state *state = ahash_request_ctx(req);
3672         struct caam_request *req_ctx = &state->caam_req;
3673         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3674         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3675         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3676                       GFP_KERNEL : GFP_ATOMIC;
3677         u8 *buf = current_buf(state);
3678         int *buflen = current_buflen(state);
3679         u8 *next_buf = alt_buf(state);
3680         int *next_buflen = alt_buflen(state);
3681         int in_len = *buflen + req->nbytes, to_hash;
3682         int qm_sg_bytes, src_nents, mapped_nents;
3683         struct ahash_edesc *edesc;
3684         int ret = 0;
3685
3686         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3687         to_hash = in_len - *next_buflen;
3688
3689         if (to_hash) {
3690                 struct dpaa2_sg_entry *sg_table;
3691
3692                 src_nents = sg_nents_for_len(req->src,
3693                                              req->nbytes - *next_buflen);
3694                 if (src_nents < 0) {
3695                         dev_err(ctx->dev, "Invalid number of src SG.\n");
3696                         return src_nents;
3697                 }
3698
3699                 if (src_nents) {
3700                         mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3701                                                   DMA_TO_DEVICE);
3702                         if (!mapped_nents) {
3703                                 dev_err(ctx->dev, "unable to DMA map source\n");
3704                                 return -ENOMEM;
3705                         }
3706                 } else {
3707                         mapped_nents = 0;
3708                 }
3709
3710                 /* allocate space for base edesc and link tables */
3711                 edesc = qi_cache_zalloc(GFP_DMA | flags);
3712                 if (!edesc) {
3713                         dma_unmap_sg(ctx->dev, req->src, src_nents,
3714                                      DMA_TO_DEVICE);
3715                         return -ENOMEM;
3716                 }
3717
3718                 edesc->src_nents = src_nents;
3719                 qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3720                 sg_table = &edesc->sgt[0];
3721
3722                 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3723                 if (ret)
3724                         goto unmap_ctx;
3725
3726                 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3727
3728                 if (*next_buflen)
3729                         scatterwalk_map_and_copy(next_buf, req->src,
3730                                                  to_hash - *buflen,
3731                                                  *next_buflen, 0);
3732
3733                 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3734                                                   qm_sg_bytes, DMA_TO_DEVICE);
3735                 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3736                         dev_err(ctx->dev, "unable to map S/G table\n");
3737                         ret = -ENOMEM;
3738                         goto unmap_ctx;
3739                 }
3740                 edesc->qm_sg_bytes = qm_sg_bytes;
3741
3742                 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3743                                                 ctx->ctx_len, DMA_FROM_DEVICE);
3744                 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3745                         dev_err(ctx->dev, "unable to map ctx\n");
3746                         state->ctx_dma = 0;
3747                         ret = -ENOMEM;
3748                         goto unmap_ctx;
3749                 }
3750
3751                 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3752                 dpaa2_fl_set_final(in_fle, true);
3753                 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3754                 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3755                 dpaa2_fl_set_len(in_fle, to_hash);
3756                 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3757                 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3758                 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3759
3760                 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3761                 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3762                 req_ctx->cbk = ahash_done_ctx_dst;
3763                 req_ctx->ctx = &req->base;
3764                 req_ctx->edesc = edesc;
3765
3766                 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3767                 if (ret != -EINPROGRESS &&
3768                     !(ret == -EBUSY &&
3769                       req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3770                         goto unmap_ctx;
3771
3772                 state->update = ahash_update_ctx;
3773                 state->finup = ahash_finup_ctx;
3774                 state->final = ahash_final_ctx;
3775         } else if (*next_buflen) {
3776                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3777                                          req->nbytes, 0);
3778                 *buflen = *next_buflen;
3779                 *next_buflen = 0;
3780         }
3781
3782         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3783                              DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3784         print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3785                              DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3786                              1);
3787
3788         return ret;
3789 unmap_ctx:
3790         ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
3791         qi_cache_free(edesc);
3792         return ret;
3793 }
3794
3795 static int ahash_finup_no_ctx(struct ahash_request *req)
3796 {
3797         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3798         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3799         struct caam_hash_state *state = ahash_request_ctx(req);
3800         struct caam_request *req_ctx = &state->caam_req;
3801         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3802         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3803         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3804                       GFP_KERNEL : GFP_ATOMIC;
3805         int buflen = *current_buflen(state);
3806         int qm_sg_bytes, src_nents, mapped_nents;
3807         int digestsize = crypto_ahash_digestsize(ahash);
3808         struct ahash_edesc *edesc;
3809         struct dpaa2_sg_entry *sg_table;
3810         int ret;
3811
3812         src_nents = sg_nents_for_len(req->src, req->nbytes);
3813         if (src_nents < 0) {
3814                 dev_err(ctx->dev, "Invalid number of src SG.\n");
3815                 return src_nents;
3816         }
3817
3818         if (src_nents) {
3819                 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3820                                           DMA_TO_DEVICE);
3821                 if (!mapped_nents) {
3822                         dev_err(ctx->dev, "unable to DMA map source\n");
3823                         return -ENOMEM;
3824                 }
3825         } else {
3826                 mapped_nents = 0;
3827         }
3828
3829         /* allocate space for base edesc and link tables */
3830         edesc = qi_cache_zalloc(GFP_DMA | flags);
3831         if (!edesc) {
3832                 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3833                 return -ENOMEM;
3834         }
3835
3836         edesc->src_nents = src_nents;
3837         qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3838         sg_table = &edesc->sgt[0];
3839
3840         ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3841         if (ret)
3842                 goto unmap;
3843
3844         sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3845
3846         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3847                                           DMA_TO_DEVICE);
3848         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3849                 dev_err(ctx->dev, "unable to map S/G table\n");
3850                 ret = -ENOMEM;
3851                 goto unmap;
3852         }
3853         edesc->qm_sg_bytes = qm_sg_bytes;
3854
3855         edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3856                                         DMA_FROM_DEVICE);
3857         if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3858                 dev_err(ctx->dev, "unable to map dst\n");
3859                 edesc->dst_dma = 0;
3860                 ret = -ENOMEM;
3861                 goto unmap;
3862         }
3863
3864         memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3865         dpaa2_fl_set_final(in_fle, true);
3866         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3867         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3868         dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
3869         dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3870         dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
3871         dpaa2_fl_set_len(out_fle, digestsize);
3872
3873         req_ctx->flc = &ctx->flc[DIGEST];
3874         req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3875         req_ctx->cbk = ahash_done;
3876         req_ctx->ctx = &req->base;
3877         req_ctx->edesc = edesc;
3878         ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3879         if (ret != -EINPROGRESS &&
3880             !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3881                 goto unmap;
3882
3883         return ret;
3884 unmap:
3885         ahash_unmap(ctx->dev, edesc, req, digestsize);
3886         qi_cache_free(edesc);
3887         return -ENOMEM;
3888 }
3889
3890 static int ahash_update_first(struct ahash_request *req)
3891 {
3892         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3893         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3894         struct caam_hash_state *state = ahash_request_ctx(req);
3895         struct caam_request *req_ctx = &state->caam_req;
3896         struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3897         struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3898         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3899                       GFP_KERNEL : GFP_ATOMIC;
3900         u8 *next_buf = alt_buf(state);
3901         int *next_buflen = alt_buflen(state);
3902         int to_hash;
3903         int src_nents, mapped_nents;
3904         struct ahash_edesc *edesc;
3905         int ret = 0;
3906
3907         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
3908                                       1);
3909         to_hash = req->nbytes - *next_buflen;
3910
3911         if (to_hash) {
3912                 struct dpaa2_sg_entry *sg_table;
3913
3914                 src_nents = sg_nents_for_len(req->src,
3915                                              req->nbytes - (*next_buflen));
3916                 if (src_nents < 0) {
3917                         dev_err(ctx->dev, "Invalid number of src SG.\n");
3918                         return src_nents;
3919                 }
3920
3921                 if (src_nents) {
3922                         mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3923                                                   DMA_TO_DEVICE);
3924                         if (!mapped_nents) {
3925                                 dev_err(ctx->dev, "unable to map source for DMA\n");
3926                                 return -ENOMEM;
3927                         }
3928                 } else {
3929                         mapped_nents = 0;
3930                 }
3931
3932                 /* allocate space for base edesc and link tables */
3933                 edesc = qi_cache_zalloc(GFP_DMA | flags);
3934                 if (!edesc) {
3935                         dma_unmap_sg(ctx->dev, req->src, src_nents,
3936                                      DMA_TO_DEVICE);
3937                         return -ENOMEM;
3938                 }
3939
3940                 edesc->src_nents = src_nents;
3941                 sg_table = &edesc->sgt[0];
3942
3943                 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3944                 dpaa2_fl_set_final(in_fle, true);
3945                 dpaa2_fl_set_len(in_fle, to_hash);
3946
3947                 if (mapped_nents > 1) {
3948                         int qm_sg_bytes;
3949
3950                         sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3951                         qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3952                         edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3953                                                           qm_sg_bytes,
3954                                                           DMA_TO_DEVICE);
3955                         if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3956                                 dev_err(ctx->dev, "unable to map S/G table\n");
3957                                 ret = -ENOMEM;
3958                                 goto unmap_ctx;
3959                         }
3960                         edesc->qm_sg_bytes = qm_sg_bytes;
3961                         dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3962                         dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3963                 } else {
3964                         dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3965                         dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3966                 }
3967
3968                 if (*next_buflen)
3969                         scatterwalk_map_and_copy(next_buf, req->src, to_hash,
3970                                                  *next_buflen, 0);
3971
3972                 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3973                                                 ctx->ctx_len, DMA_FROM_DEVICE);
3974                 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3975                         dev_err(ctx->dev, "unable to map ctx\n");
3976                         state->ctx_dma = 0;
3977                         ret = -ENOMEM;
3978                         goto unmap_ctx;
3979                 }
3980
3981                 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3982                 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3983                 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3984
3985                 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3986                 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3987                 req_ctx->cbk = ahash_done_ctx_dst;
3988                 req_ctx->ctx = &req->base;
3989                 req_ctx->edesc = edesc;
3990
3991                 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3992                 if (ret != -EINPROGRESS &&
3993                     !(ret == -EBUSY && req->base.flags &
3994                       CRYPTO_TFM_REQ_MAY_BACKLOG))
3995                         goto unmap_ctx;
3996
3997                 state->update = ahash_update_ctx;
3998                 state->finup = ahash_finup_ctx;
3999                 state->final = ahash_final_ctx;
4000         } else if (*next_buflen) {
4001                 state->update = ahash_update_no_ctx;
4002                 state->finup = ahash_finup_no_ctx;
4003                 state->final = ahash_final_no_ctx;
4004                 scatterwalk_map_and_copy(next_buf, req->src, 0,
4005                                          req->nbytes, 0);
4006                 switch_buf(state);
4007         }
4008
4009         print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4010                              DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4011                              1);
4012
4013         return ret;
4014 unmap_ctx:
4015         ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
4016         qi_cache_free(edesc);
4017         return ret;
4018 }
4019
4020 static int ahash_finup_first(struct ahash_request *req)
4021 {
4022         return ahash_digest(req);
4023 }
4024
4025 static int ahash_init(struct ahash_request *req)
4026 {
4027         struct caam_hash_state *state = ahash_request_ctx(req);
4028
4029         state->update = ahash_update_first;
4030         state->finup = ahash_finup_first;
4031         state->final = ahash_final_no_ctx;
4032
4033         state->ctx_dma = 0;
4034         state->current_buf = 0;
4035         state->buf_dma = 0;
4036         state->buflen_0 = 0;
4037         state->buflen_1 = 0;
4038
4039         return 0;
4040 }
4041
4042 static int ahash_update(struct ahash_request *req)
4043 {
4044         struct caam_hash_state *state = ahash_request_ctx(req);
4045
4046         return state->update(req);
4047 }
4048
4049 static int ahash_finup(struct ahash_request *req)
4050 {
4051         struct caam_hash_state *state = ahash_request_ctx(req);
4052
4053         return state->finup(req);
4054 }
4055
4056 static int ahash_final(struct ahash_request *req)
4057 {
4058         struct caam_hash_state *state = ahash_request_ctx(req);
4059
4060         return state->final(req);
4061 }
4062
4063 static int ahash_export(struct ahash_request *req, void *out)
4064 {
4065         struct caam_hash_state *state = ahash_request_ctx(req);
4066         struct caam_export_state *export = out;
4067         int len;
4068         u8 *buf;
4069
4070         if (state->current_buf) {
4071                 buf = state->buf_1;
4072                 len = state->buflen_1;
4073         } else {
4074                 buf = state->buf_0;
4075                 len = state->buflen_0;
4076         }
4077
4078         memcpy(export->buf, buf, len);
4079         memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4080         export->buflen = len;
4081         export->update = state->update;
4082         export->final = state->final;
4083         export->finup = state->finup;
4084
4085         return 0;
4086 }
4087
4088 static int ahash_import(struct ahash_request *req, const void *in)
4089 {
4090         struct caam_hash_state *state = ahash_request_ctx(req);
4091         const struct caam_export_state *export = in;
4092
4093         memset(state, 0, sizeof(*state));
4094         memcpy(state->buf_0, export->buf, export->buflen);
4095         memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4096         state->buflen_0 = export->buflen;
4097         state->update = export->update;
4098         state->final = export->final;
4099         state->finup = export->finup;
4100
4101         return 0;
4102 }
4103
4104 struct caam_hash_template {
4105         char name[CRYPTO_MAX_ALG_NAME];
4106         char driver_name[CRYPTO_MAX_ALG_NAME];
4107         char hmac_name[CRYPTO_MAX_ALG_NAME];
4108         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4109         unsigned int blocksize;
4110         struct ahash_alg template_ahash;
4111         u32 alg_type;
4112 };
4113
4114 /* ahash descriptors */
4115 static struct caam_hash_template driver_hash[] = {
4116         {
4117                 .name = "sha1",
4118                 .driver_name = "sha1-caam-qi2",
4119                 .hmac_name = "hmac(sha1)",
4120                 .hmac_driver_name = "hmac-sha1-caam-qi2",
4121                 .blocksize = SHA1_BLOCK_SIZE,
4122                 .template_ahash = {
4123                         .init = ahash_init,
4124                         .update = ahash_update,
4125                         .final = ahash_final,
4126                         .finup = ahash_finup,
4127                         .digest = ahash_digest,
4128                         .export = ahash_export,
4129                         .import = ahash_import,
4130                         .setkey = ahash_setkey,
4131                         .halg = {
4132                                 .digestsize = SHA1_DIGEST_SIZE,
4133                                 .statesize = sizeof(struct caam_export_state),
4134                         },
4135                 },
4136                 .alg_type = OP_ALG_ALGSEL_SHA1,
4137         }, {
4138                 .name = "sha224",
4139                 .driver_name = "sha224-caam-qi2",
4140                 .hmac_name = "hmac(sha224)",
4141                 .hmac_driver_name = "hmac-sha224-caam-qi2",
4142                 .blocksize = SHA224_BLOCK_SIZE,
4143                 .template_ahash = {
4144                         .init = ahash_init,
4145                         .update = ahash_update,
4146                         .final = ahash_final,
4147                         .finup = ahash_finup,
4148                         .digest = ahash_digest,
4149                         .export = ahash_export,
4150                         .import = ahash_import,
4151                         .setkey = ahash_setkey,
4152                         .halg = {
4153                                 .digestsize = SHA224_DIGEST_SIZE,
4154                                 .statesize = sizeof(struct caam_export_state),
4155                         },
4156                 },
4157                 .alg_type = OP_ALG_ALGSEL_SHA224,
4158         }, {
4159                 .name = "sha256",
4160                 .driver_name = "sha256-caam-qi2",
4161                 .hmac_name = "hmac(sha256)",
4162                 .hmac_driver_name = "hmac-sha256-caam-qi2",
4163                 .blocksize = SHA256_BLOCK_SIZE,
4164                 .template_ahash = {
4165                         .init = ahash_init,
4166                         .update = ahash_update,
4167                         .final = ahash_final,
4168                         .finup = ahash_finup,
4169                         .digest = ahash_digest,
4170                         .export = ahash_export,
4171                         .import = ahash_import,
4172                         .setkey = ahash_setkey,
4173                         .halg = {
4174                                 .digestsize = SHA256_DIGEST_SIZE,
4175                                 .statesize = sizeof(struct caam_export_state),
4176                         },
4177                 },
4178                 .alg_type = OP_ALG_ALGSEL_SHA256,
4179         }, {
4180                 .name = "sha384",
4181                 .driver_name = "sha384-caam-qi2",
4182                 .hmac_name = "hmac(sha384)",
4183                 .hmac_driver_name = "hmac-sha384-caam-qi2",
4184                 .blocksize = SHA384_BLOCK_SIZE,
4185                 .template_ahash = {
4186                         .init = ahash_init,
4187                         .update = ahash_update,
4188                         .final = ahash_final,
4189                         .finup = ahash_finup,
4190                         .digest = ahash_digest,
4191                         .export = ahash_export,
4192                         .import = ahash_import,
4193                         .setkey = ahash_setkey,
4194                         .halg = {
4195                                 .digestsize = SHA384_DIGEST_SIZE,
4196                                 .statesize = sizeof(struct caam_export_state),
4197                         },
4198                 },
4199                 .alg_type = OP_ALG_ALGSEL_SHA384,
4200         }, {
4201                 .name = "sha512",
4202                 .driver_name = "sha512-caam-qi2",
4203                 .hmac_name = "hmac(sha512)",
4204                 .hmac_driver_name = "hmac-sha512-caam-qi2",
4205                 .blocksize = SHA512_BLOCK_SIZE,
4206                 .template_ahash = {
4207                         .init = ahash_init,
4208                         .update = ahash_update,
4209                         .final = ahash_final,
4210                         .finup = ahash_finup,
4211                         .digest = ahash_digest,
4212                         .export = ahash_export,
4213                         .import = ahash_import,
4214                         .setkey = ahash_setkey,
4215                         .halg = {
4216                                 .digestsize = SHA512_DIGEST_SIZE,
4217                                 .statesize = sizeof(struct caam_export_state),
4218                         },
4219                 },
4220                 .alg_type = OP_ALG_ALGSEL_SHA512,
4221         }, {
4222                 .name = "md5",
4223                 .driver_name = "md5-caam-qi2",
4224                 .hmac_name = "hmac(md5)",
4225                 .hmac_driver_name = "hmac-md5-caam-qi2",
4226                 .blocksize = MD5_BLOCK_WORDS * 4,
4227                 .template_ahash = {
4228                         .init = ahash_init,
4229                         .update = ahash_update,
4230                         .final = ahash_final,
4231                         .finup = ahash_finup,
4232                         .digest = ahash_digest,
4233                         .export = ahash_export,
4234                         .import = ahash_import,
4235                         .setkey = ahash_setkey,
4236                         .halg = {
4237                                 .digestsize = MD5_DIGEST_SIZE,
4238                                 .statesize = sizeof(struct caam_export_state),
4239                         },
4240                 },
4241                 .alg_type = OP_ALG_ALGSEL_MD5,
4242         }
4243 };
4244
4245 struct caam_hash_alg {
4246         struct list_head entry;
4247         struct device *dev;
4248         int alg_type;
4249         struct ahash_alg ahash_alg;
4250 };
4251
4252 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4253 {
4254         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4255         struct crypto_alg *base = tfm->__crt_alg;
4256         struct hash_alg_common *halg =
4257                  container_of(base, struct hash_alg_common, base);
4258         struct ahash_alg *alg =
4259                  container_of(halg, struct ahash_alg, halg);
4260         struct caam_hash_alg *caam_hash =
4261                  container_of(alg, struct caam_hash_alg, ahash_alg);
4262         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4263         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4264         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4265                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4266                                          HASH_MSG_LEN + 32,
4267                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4268                                          HASH_MSG_LEN + 64,
4269                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4270         dma_addr_t dma_addr;
4271         int i;
4272
4273         ctx->dev = caam_hash->dev;
4274
4275         dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4276                                         DMA_BIDIRECTIONAL,
4277                                         DMA_ATTR_SKIP_CPU_SYNC);
4278         if (dma_mapping_error(ctx->dev, dma_addr)) {
4279                 dev_err(ctx->dev, "unable to map shared descriptors\n");
4280                 return -ENOMEM;
4281         }
4282
4283         for (i = 0; i < HASH_NUM_OP; i++)
4284                 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4285
4286         /* copy descriptor header template value */
4287         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4288
4289         ctx->ctx_len = runninglen[(ctx->adata.algtype &
4290                                    OP_ALG_ALGSEL_SUBMASK) >>
4291                                   OP_ALG_ALGSEL_SHIFT];
4292
4293         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4294                                  sizeof(struct caam_hash_state));
4295
4296         return ahash_set_sh_desc(ahash);
4297 }
4298
4299 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4300 {
4301         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4302
4303         dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4304                                DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4305 }
4306
4307 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4308         struct caam_hash_template *template, bool keyed)
4309 {
4310         struct caam_hash_alg *t_alg;
4311         struct ahash_alg *halg;
4312         struct crypto_alg *alg;
4313
4314         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4315         if (!t_alg)
4316                 return ERR_PTR(-ENOMEM);
4317
4318         t_alg->ahash_alg = template->template_ahash;
4319         halg = &t_alg->ahash_alg;
4320         alg = &halg->halg.base;
4321
4322         if (keyed) {
4323                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4324                          template->hmac_name);
4325                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4326                          template->hmac_driver_name);
4327         } else {
4328                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4329                          template->name);
4330                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4331                          template->driver_name);
4332                 t_alg->ahash_alg.setkey = NULL;
4333         }
4334         alg->cra_module = THIS_MODULE;
4335         alg->cra_init = caam_hash_cra_init;
4336         alg->cra_exit = caam_hash_cra_exit;
4337         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4338         alg->cra_priority = CAAM_CRA_PRIORITY;
4339         alg->cra_blocksize = template->blocksize;
4340         alg->cra_alignmask = 0;
4341         alg->cra_flags = CRYPTO_ALG_ASYNC;
4342
4343         t_alg->alg_type = template->alg_type;
4344         t_alg->dev = dev;
4345
4346         return t_alg;
4347 }
4348
4349 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4350 {
4351         struct dpaa2_caam_priv_per_cpu *ppriv;
4352
4353         ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4354         napi_schedule_irqoff(&ppriv->napi);
4355 }
4356
4357 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4358 {
4359         struct device *dev = priv->dev;
4360         struct dpaa2_io_notification_ctx *nctx;
4361         struct dpaa2_caam_priv_per_cpu *ppriv;
4362         int err, i = 0, cpu;
4363
4364         for_each_online_cpu(cpu) {
4365                 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4366                 ppriv->priv = priv;
4367                 nctx = &ppriv->nctx;
4368                 nctx->is_cdan = 0;
4369                 nctx->id = ppriv->rsp_fqid;
4370                 nctx->desired_cpu = cpu;
4371                 nctx->cb = dpaa2_caam_fqdan_cb;
4372
4373                 /* Register notification callbacks */
4374                 err = dpaa2_io_service_register(NULL, nctx);
4375                 if (unlikely(err)) {
4376                         dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4377                         nctx->cb = NULL;
4378                         /*
4379                          * If no affine DPIO for this core, there's probably
4380                          * none available for next cores either. Signal we want
4381                          * to retry later, in case the DPIO devices weren't
4382                          * probed yet.
4383                          */
4384                         err = -EPROBE_DEFER;
4385                         goto err;
4386                 }
4387
4388                 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4389                                                      dev);
4390                 if (unlikely(!ppriv->store)) {
4391                         dev_err(dev, "dpaa2_io_store_create() failed\n");
4392                         err = -ENOMEM;
4393                         goto err;
4394                 }
4395
4396                 if (++i == priv->num_pairs)
4397                         break;
4398         }
4399
4400         return 0;
4401
4402 err:
4403         for_each_online_cpu(cpu) {
4404                 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4405                 if (!ppriv->nctx.cb)
4406                         break;
4407                 dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4408         }
4409
4410         for_each_online_cpu(cpu) {
4411                 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4412                 if (!ppriv->store)
4413                         break;
4414                 dpaa2_io_store_destroy(ppriv->store);
4415         }
4416
4417         return err;
4418 }
4419
4420 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4421 {
4422         struct dpaa2_caam_priv_per_cpu *ppriv;
4423         int i = 0, cpu;
4424
4425         for_each_online_cpu(cpu) {
4426                 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4427                 dpaa2_io_service_deregister(NULL, &ppriv->nctx);
4428                 dpaa2_io_store_destroy(ppriv->store);
4429
4430                 if (++i == priv->num_pairs)
4431                         return;
4432         }
4433 }
4434
4435 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4436 {
4437         struct dpseci_rx_queue_cfg rx_queue_cfg;
4438         struct device *dev = priv->dev;
4439         struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4440         struct dpaa2_caam_priv_per_cpu *ppriv;
4441         int err = 0, i = 0, cpu;
4442
4443         /* Configure Rx queues */
4444         for_each_online_cpu(cpu) {
4445                 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4446
4447                 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4448                                        DPSECI_QUEUE_OPT_USER_CTX;
4449                 rx_queue_cfg.order_preservation_en = 0;
4450                 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4451                 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4452                 /*
4453                  * Rx priority (WQ) doesn't really matter, since we use
4454                  * pull mode, i.e. volatile dequeues from specific FQs
4455                  */
4456                 rx_queue_cfg.dest_cfg.priority = 0;
4457                 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4458
4459                 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4460                                           &rx_queue_cfg);
4461                 if (err) {
4462                         dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4463                                 err);
4464                         return err;
4465                 }
4466
4467                 if (++i == priv->num_pairs)
4468                         break;
4469         }
4470
4471         return err;
4472 }
4473
4474 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4475 {
4476         struct device *dev = priv->dev;
4477
4478         if (!priv->cscn_mem)
4479                 return;
4480
4481         dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4482         kfree(priv->cscn_mem);
4483 }
4484
4485 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4486 {
4487         struct device *dev = priv->dev;
4488         struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4489
4490         dpaa2_dpseci_congestion_free(priv);
4491         dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4492 }
4493
4494 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4495                                   const struct dpaa2_fd *fd)
4496 {
4497         struct caam_request *req;
4498         u32 fd_err;
4499
4500         if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4501                 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4502                 return;
4503         }
4504
4505         fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4506         if (unlikely(fd_err))
4507                 dev_err(priv->dev, "FD error: %08x\n", fd_err);
4508
4509         /*
4510          * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4511          * in FD[ERR] or FD[FRC].
4512          */
4513         req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4514         dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4515                          DMA_BIDIRECTIONAL);
4516         req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4517 }
4518
4519 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4520 {
4521         int err;
4522
4523         /* Retry while portal is busy */
4524         do {
4525                 err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
4526                                                ppriv->store);
4527         } while (err == -EBUSY);
4528
4529         if (unlikely(err))
4530                 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4531
4532         return err;
4533 }
4534
4535 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4536 {
4537         struct dpaa2_dq *dq;
4538         int cleaned = 0, is_last;
4539
4540         do {
4541                 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4542                 if (unlikely(!dq)) {
4543                         if (unlikely(!is_last)) {
4544                                 dev_dbg(ppriv->priv->dev,
4545                                         "FQ %d returned no valid frames\n",
4546                                         ppriv->rsp_fqid);
4547                                 /*
4548                                  * MUST retry until we get some sort of
4549                                  * valid response token (be it "empty dequeue"
4550                                  * or a valid frame).
4551                                  */
4552                                 continue;
4553                         }
4554                         break;
4555                 }
4556
4557                 /* Process FD */
4558                 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4559                 cleaned++;
4560         } while (!is_last);
4561
4562         return cleaned;
4563 }
4564
4565 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4566 {
4567         struct dpaa2_caam_priv_per_cpu *ppriv;
4568         struct dpaa2_caam_priv *priv;
4569         int err, cleaned = 0, store_cleaned;
4570
4571         ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4572         priv = ppriv->priv;
4573
4574         if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4575                 return 0;
4576
4577         do {
4578                 store_cleaned = dpaa2_caam_store_consume(ppriv);
4579                 cleaned += store_cleaned;
4580
4581                 if (store_cleaned == 0 ||
4582                     cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4583                         break;
4584
4585                 /* Try to dequeue some more */
4586                 err = dpaa2_caam_pull_fq(ppriv);
4587                 if (unlikely(err))
4588                         break;
4589         } while (1);
4590
4591         if (cleaned < budget) {
4592                 napi_complete_done(napi, cleaned);
4593                 err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
4594                 if (unlikely(err))
4595                         dev_err(priv->dev, "Notification rearm failed: %d\n",
4596                                 err);
4597         }
4598
4599         return cleaned;
4600 }
4601
4602 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4603                                          u16 token)
4604 {
4605         struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4606         struct device *dev = priv->dev;
4607         int err;
4608
4609         /*
4610          * Congestion group feature supported starting with DPSECI API v5.1
4611          * and only when object has been created with this capability.
4612          */
4613         if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4614             !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4615                 return 0;
4616
4617         priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4618                                  GFP_KERNEL | GFP_DMA);
4619         if (!priv->cscn_mem)
4620                 return -ENOMEM;
4621
4622         priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4623         priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4624                                         DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4625         if (dma_mapping_error(dev, priv->cscn_dma)) {
4626                 dev_err(dev, "Error mapping CSCN memory area\n");
4627                 err = -ENOMEM;
4628                 goto err_dma_map;
4629         }
4630
4631         cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4632         cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4633         cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4634         cong_notif_cfg.message_ctx = (uintptr_t)priv;
4635         cong_notif_cfg.message_iova = priv->cscn_dma;
4636         cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4637                                         DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4638                                         DPSECI_CGN_MODE_COHERENT_WRITE;
4639
4640         err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4641                                                  &cong_notif_cfg);
4642         if (err) {
4643                 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4644                 goto err_set_cong;
4645         }
4646
4647         return 0;
4648
4649 err_set_cong:
4650         dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4651 err_dma_map:
4652         kfree(priv->cscn_mem);
4653
4654         return err;
4655 }
4656
4657 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4658 {
4659         struct device *dev = &ls_dev->dev;
4660         struct dpaa2_caam_priv *priv;
4661         struct dpaa2_caam_priv_per_cpu *ppriv;
4662         int err, cpu;
4663         u8 i;
4664
4665         priv = dev_get_drvdata(dev);
4666
4667         priv->dev = dev;
4668         priv->dpsec_id = ls_dev->obj_desc.id;
4669
4670         /* Get a handle for the DPSECI this interface is associate with */
4671         err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4672         if (err) {
4673                 dev_err(dev, "dpseci_open() failed: %d\n", err);
4674                 goto err_open;
4675         }
4676
4677         err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4678                                      &priv->minor_ver);
4679         if (err) {
4680                 dev_err(dev, "dpseci_get_api_version() failed\n");
4681                 goto err_get_vers;
4682         }
4683
4684         dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4685
4686         err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4687                                     &priv->dpseci_attr);
4688         if (err) {
4689                 dev_err(dev, "dpseci_get_attributes() failed\n");
4690                 goto err_get_vers;
4691         }
4692
4693         err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4694                                   &priv->sec_attr);
4695         if (err) {
4696                 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4697                 goto err_get_vers;
4698         }
4699
4700         err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4701         if (err) {
4702                 dev_err(dev, "setup_congestion() failed\n");
4703                 goto err_get_vers;
4704         }
4705
4706         priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4707                               priv->dpseci_attr.num_tx_queues);
4708         if (priv->num_pairs > num_online_cpus()) {
4709                 dev_warn(dev, "%d queues won't be used\n",
4710                          priv->num_pairs - num_online_cpus());
4711                 priv->num_pairs = num_online_cpus();
4712         }
4713
4714         for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4715                 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4716                                           &priv->rx_queue_attr[i]);
4717                 if (err) {
4718                         dev_err(dev, "dpseci_get_rx_queue() failed\n");
4719                         goto err_get_rx_queue;
4720                 }
4721         }
4722
4723         for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4724                 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4725                                           &priv->tx_queue_attr[i]);
4726                 if (err) {
4727                         dev_err(dev, "dpseci_get_tx_queue() failed\n");
4728                         goto err_get_rx_queue;
4729                 }
4730         }
4731
4732         i = 0;
4733         for_each_online_cpu(cpu) {
4734                 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
4735                         priv->rx_queue_attr[i].fqid,
4736                         priv->tx_queue_attr[i].fqid);
4737
4738                 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4739                 ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
4740                 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
4741                 ppriv->prio = i;
4742
4743                 ppriv->net_dev.dev = *dev;
4744                 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4745                 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4746                                DPAA2_CAAM_NAPI_WEIGHT);
4747                 if (++i == priv->num_pairs)
4748                         break;
4749         }
4750
4751         return 0;
4752
4753 err_get_rx_queue:
4754         dpaa2_dpseci_congestion_free(priv);
4755 err_get_vers:
4756         dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4757 err_open:
4758         return err;
4759 }
4760
4761 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4762 {
4763         struct device *dev = priv->dev;
4764         struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4765         struct dpaa2_caam_priv_per_cpu *ppriv;
4766         int i;
4767
4768         for (i = 0; i < priv->num_pairs; i++) {
4769                 ppriv = per_cpu_ptr(priv->ppriv, i);
4770                 napi_enable(&ppriv->napi);
4771         }
4772
4773         return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4774 }
4775
4776 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4777 {
4778         struct device *dev = priv->dev;
4779         struct dpaa2_caam_priv_per_cpu *ppriv;
4780         struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4781         int i, err = 0, enabled;
4782
4783         err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4784         if (err) {
4785                 dev_err(dev, "dpseci_disable() failed\n");
4786                 return err;
4787         }
4788
4789         err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4790         if (err) {
4791                 dev_err(dev, "dpseci_is_enabled() failed\n");
4792                 return err;
4793         }
4794
4795         dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4796
4797         for (i = 0; i < priv->num_pairs; i++) {
4798                 ppriv = per_cpu_ptr(priv->ppriv, i);
4799                 napi_disable(&ppriv->napi);
4800                 netif_napi_del(&ppriv->napi);
4801         }
4802
4803         return 0;
4804 }
4805
4806 static struct list_head hash_list;
4807
4808 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4809 {
4810         struct device *dev;
4811         struct dpaa2_caam_priv *priv;
4812         int i, err = 0;
4813         bool registered = false;
4814
4815         /*
4816          * There is no way to get CAAM endianness - there is no direct register
4817          * space access and MC f/w does not provide this attribute.
4818          * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4819          * property.
4820          */
4821         caam_little_end = true;
4822
4823         caam_imx = false;
4824
4825         dev = &dpseci_dev->dev;
4826
4827         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4828         if (!priv)
4829                 return -ENOMEM;
4830
4831         dev_set_drvdata(dev, priv);
4832
4833         priv->domain = iommu_get_domain_for_dev(dev);
4834
4835         qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
4836                                      0, SLAB_CACHE_DMA, NULL);
4837         if (!qi_cache) {
4838                 dev_err(dev, "Can't allocate SEC cache\n");
4839                 return -ENOMEM;
4840         }
4841
4842         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
4843         if (err) {
4844                 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
4845                 goto err_dma_mask;
4846         }
4847
4848         /* Obtain a MC portal */
4849         err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
4850         if (err) {
4851                 if (err == -ENXIO)
4852                         err = -EPROBE_DEFER;
4853                 else
4854                         dev_err(dev, "MC portal allocation failed\n");
4855
4856                 goto err_dma_mask;
4857         }
4858
4859         priv->ppriv = alloc_percpu(*priv->ppriv);
4860         if (!priv->ppriv) {
4861                 dev_err(dev, "alloc_percpu() failed\n");
4862                 err = -ENOMEM;
4863                 goto err_alloc_ppriv;
4864         }
4865
4866         /* DPSECI initialization */
4867         err = dpaa2_dpseci_setup(dpseci_dev);
4868         if (err) {
4869                 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
4870                 goto err_dpseci_setup;
4871         }
4872
4873         /* DPIO */
4874         err = dpaa2_dpseci_dpio_setup(priv);
4875         if (err) {
4876                 if (err != -EPROBE_DEFER)
4877                         dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
4878                 goto err_dpio_setup;
4879         }
4880
4881         /* DPSECI binding to DPIO */
4882         err = dpaa2_dpseci_bind(priv);
4883         if (err) {
4884                 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
4885                 goto err_bind;
4886         }
4887
4888         /* DPSECI enable */
4889         err = dpaa2_dpseci_enable(priv);
4890         if (err) {
4891                 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
4892                 goto err_bind;
4893         }
4894
4895         /* register crypto algorithms the device supports */
4896         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4897                 struct caam_skcipher_alg *t_alg = driver_algs + i;
4898                 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
4899
4900                 /* Skip DES algorithms if not supported by device */
4901                 if (!priv->sec_attr.des_acc_num &&
4902                     (alg_sel == OP_ALG_ALGSEL_3DES ||
4903                      alg_sel == OP_ALG_ALGSEL_DES))
4904                         continue;
4905
4906                 /* Skip AES algorithms if not supported by device */
4907                 if (!priv->sec_attr.aes_acc_num &&
4908                     alg_sel == OP_ALG_ALGSEL_AES)
4909                         continue;
4910
4911                 t_alg->caam.dev = dev;
4912                 caam_skcipher_alg_init(t_alg);
4913
4914                 err = crypto_register_skcipher(&t_alg->skcipher);
4915                 if (err) {
4916                         dev_warn(dev, "%s alg registration failed: %d\n",
4917                                  t_alg->skcipher.base.cra_driver_name, err);
4918                         continue;
4919                 }
4920
4921                 t_alg->registered = true;
4922                 registered = true;
4923         }
4924
4925         for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
4926                 struct caam_aead_alg *t_alg = driver_aeads + i;
4927                 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
4928                                  OP_ALG_ALGSEL_MASK;
4929                 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
4930                                  OP_ALG_ALGSEL_MASK;
4931
4932                 /* Skip DES algorithms if not supported by device */
4933                 if (!priv->sec_attr.des_acc_num &&
4934                     (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
4935                      c1_alg_sel == OP_ALG_ALGSEL_DES))
4936                         continue;
4937
4938                 /* Skip AES algorithms if not supported by device */
4939                 if (!priv->sec_attr.aes_acc_num &&
4940                     c1_alg_sel == OP_ALG_ALGSEL_AES)
4941                         continue;
4942
4943                 /*
4944                  * Skip algorithms requiring message digests
4945                  * if MD not supported by device.
4946                  */
4947                 if (!priv->sec_attr.md_acc_num && c2_alg_sel)
4948                         continue;
4949
4950                 t_alg->caam.dev = dev;
4951                 caam_aead_alg_init(t_alg);
4952
4953                 err = crypto_register_aead(&t_alg->aead);
4954                 if (err) {
4955                         dev_warn(dev, "%s alg registration failed: %d\n",
4956                                  t_alg->aead.base.cra_driver_name, err);
4957                         continue;
4958                 }
4959
4960                 t_alg->registered = true;
4961                 registered = true;
4962         }
4963         if (registered)
4964                 dev_info(dev, "algorithms registered in /proc/crypto\n");
4965
4966         /* register hash algorithms the device supports */
4967         INIT_LIST_HEAD(&hash_list);
4968
4969         /*
4970          * Skip registration of any hashing algorithms if MD block
4971          * is not present.
4972          */
4973         if (!priv->sec_attr.md_acc_num)
4974                 return 0;
4975
4976         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
4977                 struct caam_hash_alg *t_alg;
4978                 struct caam_hash_template *alg = driver_hash + i;
4979
4980                 /* register hmac version */
4981                 t_alg = caam_hash_alloc(dev, alg, true);
4982                 if (IS_ERR(t_alg)) {
4983                         err = PTR_ERR(t_alg);
4984                         dev_warn(dev, "%s hash alg allocation failed: %d\n",
4985                                  alg->driver_name, err);
4986                         continue;
4987                 }
4988
4989                 err = crypto_register_ahash(&t_alg->ahash_alg);
4990                 if (err) {
4991                         dev_warn(dev, "%s alg registration failed: %d\n",
4992                                  t_alg->ahash_alg.halg.base.cra_driver_name,
4993                                  err);
4994                         kfree(t_alg);
4995                 } else {
4996                         list_add_tail(&t_alg->entry, &hash_list);
4997                 }
4998
4999                 /* register unkeyed version */
5000                 t_alg = caam_hash_alloc(dev, alg, false);
5001                 if (IS_ERR(t_alg)) {
5002                         err = PTR_ERR(t_alg);
5003                         dev_warn(dev, "%s alg allocation failed: %d\n",
5004                                  alg->driver_name, err);
5005                         continue;
5006                 }
5007
5008                 err = crypto_register_ahash(&t_alg->ahash_alg);
5009                 if (err) {
5010                         dev_warn(dev, "%s alg registration failed: %d\n",
5011                                  t_alg->ahash_alg.halg.base.cra_driver_name,
5012                                  err);
5013                         kfree(t_alg);
5014                 } else {
5015                         list_add_tail(&t_alg->entry, &hash_list);
5016                 }
5017         }
5018         if (!list_empty(&hash_list))
5019                 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5020
5021         return err;
5022
5023 err_bind:
5024         dpaa2_dpseci_dpio_free(priv);
5025 err_dpio_setup:
5026         dpaa2_dpseci_free(priv);
5027 err_dpseci_setup:
5028         free_percpu(priv->ppriv);
5029 err_alloc_ppriv:
5030         fsl_mc_portal_free(priv->mc_io);
5031 err_dma_mask:
5032         kmem_cache_destroy(qi_cache);
5033
5034         return err;
5035 }
5036
5037 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5038 {
5039         struct device *dev;
5040         struct dpaa2_caam_priv *priv;
5041         int i;
5042
5043         dev = &ls_dev->dev;
5044         priv = dev_get_drvdata(dev);
5045
5046         for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5047                 struct caam_aead_alg *t_alg = driver_aeads + i;
5048
5049                 if (t_alg->registered)
5050                         crypto_unregister_aead(&t_alg->aead);
5051         }
5052
5053         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5054                 struct caam_skcipher_alg *t_alg = driver_algs + i;
5055
5056                 if (t_alg->registered)
5057                         crypto_unregister_skcipher(&t_alg->skcipher);
5058         }
5059
5060         if (hash_list.next) {
5061                 struct caam_hash_alg *t_hash_alg, *p;
5062
5063                 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5064                         crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5065                         list_del(&t_hash_alg->entry);
5066                         kfree(t_hash_alg);
5067                 }
5068         }
5069
5070         dpaa2_dpseci_disable(priv);
5071         dpaa2_dpseci_dpio_free(priv);
5072         dpaa2_dpseci_free(priv);
5073         free_percpu(priv->ppriv);
5074         fsl_mc_portal_free(priv->mc_io);
5075         kmem_cache_destroy(qi_cache);
5076
5077         return 0;
5078 }
5079
5080 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5081 {
5082         struct dpaa2_fd fd;
5083         struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5084         int err = 0, i, id;
5085
5086         if (IS_ERR(req))
5087                 return PTR_ERR(req);
5088
5089         if (priv->cscn_mem) {
5090                 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5091                                         DPAA2_CSCN_SIZE,
5092                                         DMA_FROM_DEVICE);
5093                 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5094                         dev_dbg_ratelimited(dev, "Dropping request\n");
5095                         return -EBUSY;
5096                 }
5097         }
5098
5099         dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5100
5101         req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5102                                          DMA_BIDIRECTIONAL);
5103         if (dma_mapping_error(dev, req->fd_flt_dma)) {
5104                 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5105                 goto err_out;
5106         }
5107
5108         memset(&fd, 0, sizeof(fd));
5109         dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5110         dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5111         dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5112         dpaa2_fd_set_flc(&fd, req->flc_dma);
5113
5114         /*
5115          * There is no guarantee that preemption is disabled here,
5116          * thus take action.
5117          */
5118         preempt_disable();
5119         id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
5120         for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5121                 err = dpaa2_io_service_enqueue_fq(NULL,
5122                                                   priv->tx_queue_attr[id].fqid,
5123                                                   &fd);
5124                 if (err != -EBUSY)
5125                         break;
5126         }
5127         preempt_enable();
5128
5129         if (unlikely(err)) {
5130                 dev_err(dev, "Error enqueuing frame: %d\n", err);
5131                 goto err_out;
5132         }
5133
5134         return -EINPROGRESS;
5135
5136 err_out:
5137         dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5138                          DMA_BIDIRECTIONAL);
5139         return -EIO;
5140 }
5141 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5142
5143 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5144         {
5145                 .vendor = FSL_MC_VENDOR_FREESCALE,
5146                 .obj_type = "dpseci",
5147         },
5148         { .vendor = 0x0 }
5149 };
5150
5151 static struct fsl_mc_driver dpaa2_caam_driver = {
5152         .driver = {
5153                 .name           = KBUILD_MODNAME,
5154                 .owner          = THIS_MODULE,
5155         },
5156         .probe          = dpaa2_caam_probe,
5157         .remove         = dpaa2_caam_remove,
5158         .match_id_table = dpaa2_caam_match_id_table
5159 };
5160
5161 MODULE_LICENSE("Dual BSD/GPL");
5162 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5163 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5164
5165 module_fsl_mc_driver(dpaa2_caam_driver);