OSDN Git Service

ARM: dts: at91: sama5d3: define clock rate range for tcb1
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59                            bool is_sec1)
60 {
61         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62         if (!is_sec1)
63                 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67                              struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69         dst_ptr->ptr = src_ptr->ptr;
70         if (!is_sec1)
71                 dst_ptr->eptr = src_ptr->eptr;
72 }
73
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75                                bool is_sec1)
76 {
77         if (is_sec1) {
78                 ptr->res = 0;
79                 ptr->len1 = cpu_to_be16(len);
80         } else {
81                 ptr->len = cpu_to_be16(len);
82         }
83 }
84
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86                                            bool is_sec1)
87 {
88         if (is_sec1)
89                 return be16_to_cpu(ptr->len1);
90         else
91                 return be16_to_cpu(ptr->len);
92 }
93
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
95 {
96         if (!is_sec1)
97                 ptr->j_extent = 0;
98 }
99
100 /*
101  * map virtual single (contiguous) pointer to h/w descriptor pointer
102  */
103 static void map_single_talitos_ptr(struct device *dev,
104                                    struct talitos_ptr *ptr,
105                                    unsigned int len, void *data,
106                                    enum dma_data_direction dir)
107 {
108         dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109         struct talitos_private *priv = dev_get_drvdata(dev);
110         bool is_sec1 = has_ftr_sec1(priv);
111
112         to_talitos_ptr_len(ptr, len, is_sec1);
113         to_talitos_ptr(ptr, dma_addr, is_sec1);
114         to_talitos_ptr_extent_clear(ptr, is_sec1);
115 }
116
117 /*
118  * unmap bus single (contiguous) h/w descriptor pointer
119  */
120 static void unmap_single_talitos_ptr(struct device *dev,
121                                      struct talitos_ptr *ptr,
122                                      enum dma_data_direction dir)
123 {
124         struct talitos_private *priv = dev_get_drvdata(dev);
125         bool is_sec1 = has_ftr_sec1(priv);
126
127         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128                          from_talitos_ptr_len(ptr, is_sec1), dir);
129 }
130
131 static int reset_channel(struct device *dev, int ch)
132 {
133         struct talitos_private *priv = dev_get_drvdata(dev);
134         unsigned int timeout = TALITOS_TIMEOUT;
135         bool is_sec1 = has_ftr_sec1(priv);
136
137         if (is_sec1) {
138                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139                           TALITOS1_CCCR_LO_RESET);
140
141                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142                         TALITOS1_CCCR_LO_RESET) && --timeout)
143                         cpu_relax();
144         } else {
145                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146                           TALITOS2_CCCR_RESET);
147
148                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149                         TALITOS2_CCCR_RESET) && --timeout)
150                         cpu_relax();
151         }
152
153         if (timeout == 0) {
154                 dev_err(dev, "failed to reset channel %d\n", ch);
155                 return -EIO;
156         }
157
158         /* set 36-bit addressing, done writeback enable and done IRQ enable */
159         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
161
162         /* and ICCR writeback, if available */
163         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165                           TALITOS_CCCR_LO_IWSE);
166
167         return 0;
168 }
169
170 static int reset_device(struct device *dev)
171 {
172         struct talitos_private *priv = dev_get_drvdata(dev);
173         unsigned int timeout = TALITOS_TIMEOUT;
174         bool is_sec1 = has_ftr_sec1(priv);
175         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
176
177         setbits32(priv->reg + TALITOS_MCR, mcr);
178
179         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
180                && --timeout)
181                 cpu_relax();
182
183         if (priv->irq[1]) {
184                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185                 setbits32(priv->reg + TALITOS_MCR, mcr);
186         }
187
188         if (timeout == 0) {
189                 dev_err(dev, "failed to reset device\n");
190                 return -EIO;
191         }
192
193         return 0;
194 }
195
196 /*
197  * Reset and initialize the device
198  */
199 static int init_device(struct device *dev)
200 {
201         struct talitos_private *priv = dev_get_drvdata(dev);
202         int ch, err;
203         bool is_sec1 = has_ftr_sec1(priv);
204
205         /*
206          * Master reset
207          * errata documentation: warning: certain SEC interrupts
208          * are not fully cleared by writing the MCR:SWR bit,
209          * set bit twice to completely reset
210          */
211         err = reset_device(dev);
212         if (err)
213                 return err;
214
215         err = reset_device(dev);
216         if (err)
217                 return err;
218
219         /* reset channels */
220         for (ch = 0; ch < priv->num_channels; ch++) {
221                 err = reset_channel(dev, ch);
222                 if (err)
223                         return err;
224         }
225
226         /* enable channel done and error interrupts */
227         if (is_sec1) {
228                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230                 /* disable parity error check in DEU (erroneous? test vect.) */
231                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232         } else {
233                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
235         }
236
237         /* disable integrity check error interrupts (use writeback instead) */
238         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240                           TALITOS_MDEUICR_LO_ICE);
241
242         return 0;
243 }
244
245 /**
246  * talitos_submit - submits a descriptor to the device for processing
247  * @dev:        the SEC device to be used
248  * @ch:         the SEC device channel to be used
249  * @desc:       the descriptor to be processed by the device
250  * @callback:   whom to call when processing is complete
251  * @context:    a handle for use by caller (optional)
252  *
253  * desc must contain valid dma-mapped (bus physical) address pointers.
254  * callback must check err and feedback in descriptor header
255  * for device processing status.
256  */
257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258                    void (*callback)(struct device *dev,
259                                     struct talitos_desc *desc,
260                                     void *context, int error),
261                    void *context)
262 {
263         struct talitos_private *priv = dev_get_drvdata(dev);
264         struct talitos_request *request;
265         unsigned long flags;
266         int head;
267         bool is_sec1 = has_ftr_sec1(priv);
268
269         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
270
271         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272                 /* h/w fifo is full */
273                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
274                 return -EAGAIN;
275         }
276
277         head = priv->chan[ch].head;
278         request = &priv->chan[ch].fifo[head];
279
280         /* map descriptor and save caller data */
281         if (is_sec1) {
282                 desc->hdr1 = desc->hdr;
283                 desc->next_desc = 0;
284                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285                                                    TALITOS_DESC_SIZE,
286                                                    DMA_BIDIRECTIONAL);
287         } else {
288                 request->dma_desc = dma_map_single(dev, desc,
289                                                    TALITOS_DESC_SIZE,
290                                                    DMA_BIDIRECTIONAL);
291         }
292         request->callback = callback;
293         request->context = context;
294
295         /* increment fifo head */
296         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
297
298         smp_wmb();
299         request->desc = desc;
300
301         /* GO! */
302         wmb();
303         out_be32(priv->chan[ch].reg + TALITOS_FF,
304                  upper_32_bits(request->dma_desc));
305         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306                  lower_32_bits(request->dma_desc));
307
308         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
309
310         return -EINPROGRESS;
311 }
312 EXPORT_SYMBOL(talitos_submit);
313
314 /*
315  * process what was done, notify callback of error if not
316  */
317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
318 {
319         struct talitos_private *priv = dev_get_drvdata(dev);
320         struct talitos_request *request, saved_req;
321         unsigned long flags;
322         int tail, status;
323         bool is_sec1 = has_ftr_sec1(priv);
324
325         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
326
327         tail = priv->chan[ch].tail;
328         while (priv->chan[ch].fifo[tail].desc) {
329                 __be32 hdr;
330
331                 request = &priv->chan[ch].fifo[tail];
332
333                 /* descriptors with their done bits set don't get the error */
334                 rmb();
335                 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
336
337                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
338                         status = 0;
339                 else
340                         if (!error)
341                                 break;
342                         else
343                                 status = error;
344
345                 dma_unmap_single(dev, request->dma_desc,
346                                  TALITOS_DESC_SIZE,
347                                  DMA_BIDIRECTIONAL);
348
349                 /* copy entries so we can call callback outside lock */
350                 saved_req.desc = request->desc;
351                 saved_req.callback = request->callback;
352                 saved_req.context = request->context;
353
354                 /* release request entry in fifo */
355                 smp_wmb();
356                 request->desc = NULL;
357
358                 /* increment fifo tail */
359                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
360
361                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
362
363                 atomic_dec(&priv->chan[ch].submit_count);
364
365                 saved_req.callback(dev, saved_req.desc, saved_req.context,
366                                    status);
367                 /* channel may resume processing in single desc error case */
368                 if (error && !reset_ch && status == error)
369                         return;
370                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371                 tail = priv->chan[ch].tail;
372         }
373
374         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
375 }
376
377 /*
378  * process completed requests for channels that have done status
379  */
380 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
381 static void talitos1_done_##name(unsigned long data)                    \
382 {                                                                       \
383         struct device *dev = (struct device *)data;                     \
384         struct talitos_private *priv = dev_get_drvdata(dev);            \
385         unsigned long flags;                                            \
386                                                                         \
387         if (ch_done_mask & 0x10000000)                                  \
388                 flush_channel(dev, 0, 0, 0);                    \
389         if (priv->num_channels == 1)                                    \
390                 goto out;                                               \
391         if (ch_done_mask & 0x40000000)                                  \
392                 flush_channel(dev, 1, 0, 0);                    \
393         if (ch_done_mask & 0x00010000)                                  \
394                 flush_channel(dev, 2, 0, 0);                    \
395         if (ch_done_mask & 0x00040000)                                  \
396                 flush_channel(dev, 3, 0, 0);                    \
397                                                                         \
398 out:                                                                    \
399         /* At this point, all completed channels have been processed */ \
400         /* Unmask done interrupts for channels completed later on. */   \
401         spin_lock_irqsave(&priv->reg_lock, flags);                      \
402         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
403         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
404         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
405 }
406
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
408
409 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
410 static void talitos2_done_##name(unsigned long data)                    \
411 {                                                                       \
412         struct device *dev = (struct device *)data;                     \
413         struct talitos_private *priv = dev_get_drvdata(dev);            \
414         unsigned long flags;                                            \
415                                                                         \
416         if (ch_done_mask & 1)                                           \
417                 flush_channel(dev, 0, 0, 0);                            \
418         if (priv->num_channels == 1)                                    \
419                 goto out;                                               \
420         if (ch_done_mask & (1 << 2))                                    \
421                 flush_channel(dev, 1, 0, 0);                            \
422         if (ch_done_mask & (1 << 4))                                    \
423                 flush_channel(dev, 2, 0, 0);                            \
424         if (ch_done_mask & (1 << 6))                                    \
425                 flush_channel(dev, 3, 0, 0);                            \
426                                                                         \
427 out:                                                                    \
428         /* At this point, all completed channels have been processed */ \
429         /* Unmask done interrupts for channels completed later on. */   \
430         spin_lock_irqsave(&priv->reg_lock, flags);                      \
431         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
432         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
433         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
434 }
435
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
439
440 /*
441  * locate current (offending) descriptor
442  */
443 static u32 current_desc_hdr(struct device *dev, int ch)
444 {
445         struct talitos_private *priv = dev_get_drvdata(dev);
446         int tail, iter;
447         dma_addr_t cur_desc;
448
449         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
451
452         if (!cur_desc) {
453                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454                 return 0;
455         }
456
457         tail = priv->chan[ch].tail;
458
459         iter = tail;
460         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461                 iter = (iter + 1) & (priv->fifo_len - 1);
462                 if (iter == tail) {
463                         dev_err(dev, "couldn't locate current descriptor\n");
464                         return 0;
465                 }
466         }
467
468         return priv->chan[ch].fifo[iter].desc->hdr;
469 }
470
471 /*
472  * user diagnostics; report root cause of error based on execution unit status
473  */
474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
475 {
476         struct talitos_private *priv = dev_get_drvdata(dev);
477         int i;
478
479         if (!desc_hdr)
480                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
481
482         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483         case DESC_HDR_SEL0_AFEU:
484                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485                         in_be32(priv->reg_afeu + TALITOS_EUISR),
486                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
487                 break;
488         case DESC_HDR_SEL0_DEU:
489                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490                         in_be32(priv->reg_deu + TALITOS_EUISR),
491                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
492                 break;
493         case DESC_HDR_SEL0_MDEUA:
494         case DESC_HDR_SEL0_MDEUB:
495                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
497                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
498                 break;
499         case DESC_HDR_SEL0_RNG:
500                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501                         in_be32(priv->reg_rngu + TALITOS_ISR),
502                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
503                 break;
504         case DESC_HDR_SEL0_PKEU:
505                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
507                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
508                 break;
509         case DESC_HDR_SEL0_AESU:
510                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511                         in_be32(priv->reg_aesu + TALITOS_EUISR),
512                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
513                 break;
514         case DESC_HDR_SEL0_CRCU:
515                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516                         in_be32(priv->reg_crcu + TALITOS_EUISR),
517                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
518                 break;
519         case DESC_HDR_SEL0_KEU:
520                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
522                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523                 break;
524         }
525
526         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527         case DESC_HDR_SEL1_MDEUA:
528         case DESC_HDR_SEL1_MDEUB:
529                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
531                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
532                 break;
533         case DESC_HDR_SEL1_CRCU:
534                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535                         in_be32(priv->reg_crcu + TALITOS_EUISR),
536                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
537                 break;
538         }
539
540         for (i = 0; i < 8; i++)
541                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
544 }
545
546 /*
547  * recover from error interrupts
548  */
549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
550 {
551         struct talitos_private *priv = dev_get_drvdata(dev);
552         unsigned int timeout = TALITOS_TIMEOUT;
553         int ch, error, reset_dev = 0;
554         u32 v_lo;
555         bool is_sec1 = has_ftr_sec1(priv);
556         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
557
558         for (ch = 0; ch < priv->num_channels; ch++) {
559                 /* skip channels without errors */
560                 if (is_sec1) {
561                         /* bits 29, 31, 17, 19 */
562                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563                                 continue;
564                 } else {
565                         if (!(isr & (1 << (ch * 2 + 1))))
566                                 continue;
567                 }
568
569                 error = -EINVAL;
570
571                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
572
573                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574                         dev_err(dev, "double fetch fifo overflow error\n");
575                         error = -EAGAIN;
576                         reset_ch = 1;
577                 }
578                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579                         /* h/w dropped descriptor */
580                         dev_err(dev, "single fetch fifo overflow error\n");
581                         error = -EAGAIN;
582                 }
583                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584                         dev_err(dev, "master data transfer error\n");
585                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586                         dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587                                              : "s/g data length zero error\n");
588                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589                         dev_err(dev, is_sec1 ? "parity error\n"
590                                              : "fetch pointer zero error\n");
591                 if (v_lo & TALITOS_CCPSR_LO_IDH)
592                         dev_err(dev, "illegal descriptor header error\n");
593                 if (v_lo & TALITOS_CCPSR_LO_IEU)
594                         dev_err(dev, is_sec1 ? "static assignment error\n"
595                                              : "invalid exec unit error\n");
596                 if (v_lo & TALITOS_CCPSR_LO_EU)
597                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
598                 if (!is_sec1) {
599                         if (v_lo & TALITOS_CCPSR_LO_GB)
600                                 dev_err(dev, "gather boundary error\n");
601                         if (v_lo & TALITOS_CCPSR_LO_GRL)
602                                 dev_err(dev, "gather return/length error\n");
603                         if (v_lo & TALITOS_CCPSR_LO_SB)
604                                 dev_err(dev, "scatter boundary error\n");
605                         if (v_lo & TALITOS_CCPSR_LO_SRL)
606                                 dev_err(dev, "scatter return/length error\n");
607                 }
608
609                 flush_channel(dev, ch, error, reset_ch);
610
611                 if (reset_ch) {
612                         reset_channel(dev, ch);
613                 } else {
614                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
615                                   TALITOS2_CCCR_CONT);
616                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618                                TALITOS2_CCCR_CONT) && --timeout)
619                                 cpu_relax();
620                         if (timeout == 0) {
621                                 dev_err(dev, "failed to restart channel %d\n",
622                                         ch);
623                                 reset_dev = 1;
624                         }
625                 }
626         }
627         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631                                 isr, isr_lo);
632                 else
633                         dev_err(dev, "done overflow, internal time out, or "
634                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
635
636                 /* purge request queues */
637                 for (ch = 0; ch < priv->num_channels; ch++)
638                         flush_channel(dev, ch, -EIO, 1);
639
640                 /* reset and reinitialize the device */
641                 init_device(dev);
642         }
643 }
644
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
647 {                                                                              \
648         struct device *dev = data;                                             \
649         struct talitos_private *priv = dev_get_drvdata(dev);                   \
650         u32 isr, isr_lo;                                                       \
651         unsigned long flags;                                                   \
652                                                                                \
653         spin_lock_irqsave(&priv->reg_lock, flags);                             \
654         isr = in_be32(priv->reg + TALITOS_ISR);                                \
655         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
656         /* Acknowledge interrupt */                                            \
657         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
659                                                                                \
660         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
661                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
662                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
663         }                                                                      \
664         else {                                                                 \
665                 if (likely(isr & ch_done_mask)) {                              \
666                         /* mask further done interrupts. */                    \
667                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
668                         /* done_task will unmask done interrupts at exit */    \
669                         tasklet_schedule(&priv->done_task[tlet]);              \
670                 }                                                              \
671                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
672         }                                                                      \
673                                                                                \
674         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
675                                                                 IRQ_NONE;      \
676 }
677
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
679
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
682 {                                                                              \
683         struct device *dev = data;                                             \
684         struct talitos_private *priv = dev_get_drvdata(dev);                   \
685         u32 isr, isr_lo;                                                       \
686         unsigned long flags;                                                   \
687                                                                                \
688         spin_lock_irqsave(&priv->reg_lock, flags);                             \
689         isr = in_be32(priv->reg + TALITOS_ISR);                                \
690         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
691         /* Acknowledge interrupt */                                            \
692         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
694                                                                                \
695         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
696                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
697                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
698         }                                                                      \
699         else {                                                                 \
700                 if (likely(isr & ch_done_mask)) {                              \
701                         /* mask further done interrupts. */                    \
702                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
703                         /* done_task will unmask done interrupts at exit */    \
704                         tasklet_schedule(&priv->done_task[tlet]);              \
705                 }                                                              \
706                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
707         }                                                                      \
708                                                                                \
709         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
710                                                                 IRQ_NONE;      \
711 }
712
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
715                        0)
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
717                        1)
718
719 /*
720  * hwrng
721  */
722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
723 {
724         struct device *dev = (struct device *)rng->priv;
725         struct talitos_private *priv = dev_get_drvdata(dev);
726         u32 ofl;
727         int i;
728
729         for (i = 0; i < 20; i++) {
730                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731                       TALITOS_RNGUSR_LO_OFL;
732                 if (ofl || !wait)
733                         break;
734                 udelay(10);
735         }
736
737         return !!ofl;
738 }
739
740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
741 {
742         struct device *dev = (struct device *)rng->priv;
743         struct talitos_private *priv = dev_get_drvdata(dev);
744
745         /* rng fifo requires 64-bit accesses */
746         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
748
749         return sizeof(u32);
750 }
751
752 static int talitos_rng_init(struct hwrng *rng)
753 {
754         struct device *dev = (struct device *)rng->priv;
755         struct talitos_private *priv = dev_get_drvdata(dev);
756         unsigned int timeout = TALITOS_TIMEOUT;
757
758         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760                  & TALITOS_RNGUSR_LO_RD)
761                && --timeout)
762                 cpu_relax();
763         if (timeout == 0) {
764                 dev_err(dev, "failed to reset rng hw\n");
765                 return -ENODEV;
766         }
767
768         /* start generating */
769         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
770
771         return 0;
772 }
773
774 static int talitos_register_rng(struct device *dev)
775 {
776         struct talitos_private *priv = dev_get_drvdata(dev);
777         int err;
778
779         priv->rng.name          = dev_driver_string(dev),
780         priv->rng.init          = talitos_rng_init,
781         priv->rng.data_present  = talitos_rng_data_present,
782         priv->rng.data_read     = talitos_rng_data_read,
783         priv->rng.priv          = (unsigned long)dev;
784
785         err = hwrng_register(&priv->rng);
786         if (!err)
787                 priv->rng_registered = true;
788
789         return err;
790 }
791
792 static void talitos_unregister_rng(struct device *dev)
793 {
794         struct talitos_private *priv = dev_get_drvdata(dev);
795
796         if (!priv->rng_registered)
797                 return;
798
799         hwrng_unregister(&priv->rng);
800         priv->rng_registered = false;
801 }
802
803 /*
804  * crypto alg
805  */
806 #define TALITOS_CRA_PRIORITY            3000
807 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
808 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
809
810 struct talitos_ctx {
811         struct device *dev;
812         int ch;
813         __be32 desc_hdr_template;
814         u8 key[TALITOS_MAX_KEY_SIZE];
815         u8 iv[TALITOS_MAX_IV_LENGTH];
816         unsigned int keylen;
817         unsigned int enckeylen;
818         unsigned int authkeylen;
819 };
820
821 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
823
824 struct talitos_ahash_req_ctx {
825         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826         unsigned int hw_context_size;
827         u8 buf[HASH_MAX_BLOCK_SIZE];
828         u8 bufnext[HASH_MAX_BLOCK_SIZE];
829         unsigned int swinit;
830         unsigned int first;
831         unsigned int last;
832         unsigned int to_hash_later;
833         unsigned int nbuf;
834         struct scatterlist bufsl[2];
835         struct scatterlist *psrc;
836 };
837
838 struct talitos_export_state {
839         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
840         u8 buf[HASH_MAX_BLOCK_SIZE];
841         unsigned int swinit;
842         unsigned int first;
843         unsigned int last;
844         unsigned int to_hash_later;
845         unsigned int nbuf;
846 };
847
848 static int aead_setkey(struct crypto_aead *authenc,
849                        const u8 *key, unsigned int keylen)
850 {
851         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
852         struct crypto_authenc_keys keys;
853
854         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
855                 goto badkey;
856
857         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
858                 goto badkey;
859
860         memcpy(ctx->key, keys.authkey, keys.authkeylen);
861         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
862
863         ctx->keylen = keys.authkeylen + keys.enckeylen;
864         ctx->enckeylen = keys.enckeylen;
865         ctx->authkeylen = keys.authkeylen;
866
867         return 0;
868
869 badkey:
870         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
871         return -EINVAL;
872 }
873
874 /*
875  * talitos_edesc - s/w-extended descriptor
876  * @src_nents: number of segments in input scatterlist
877  * @dst_nents: number of segments in output scatterlist
878  * @icv_ool: whether ICV is out-of-line
879  * @iv_dma: dma address of iv for checking continuity and link table
880  * @dma_len: length of dma mapped link_tbl space
881  * @dma_link_tbl: bus physical address of link_tbl/buf
882  * @desc: h/w descriptor
883  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
884  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
885  *
886  * if decrypting (with authcheck), or either one of src_nents or dst_nents
887  * is greater than 1, an integrity check value is concatenated to the end
888  * of link_tbl data
889  */
890 struct talitos_edesc {
891         int src_nents;
892         int dst_nents;
893         bool icv_ool;
894         dma_addr_t iv_dma;
895         int dma_len;
896         dma_addr_t dma_link_tbl;
897         struct talitos_desc desc;
898         union {
899                 struct talitos_ptr link_tbl[0];
900                 u8 buf[0];
901         };
902 };
903
904 static void talitos_sg_unmap(struct device *dev,
905                              struct talitos_edesc *edesc,
906                              struct scatterlist *src,
907                              struct scatterlist *dst)
908 {
909         unsigned int src_nents = edesc->src_nents ? : 1;
910         unsigned int dst_nents = edesc->dst_nents ? : 1;
911
912         if (src != dst) {
913                 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
914
915                 if (dst) {
916                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
917                 }
918         } else
919                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
920 }
921
922 static void ipsec_esp_unmap(struct device *dev,
923                             struct talitos_edesc *edesc,
924                             struct aead_request *areq)
925 {
926         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
927         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
928         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
929         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
930
931         talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
932
933         if (edesc->dma_len)
934                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
935                                  DMA_BIDIRECTIONAL);
936 }
937
938 /*
939  * ipsec_esp descriptor callbacks
940  */
941 static void ipsec_esp_encrypt_done(struct device *dev,
942                                    struct talitos_desc *desc, void *context,
943                                    int err)
944 {
945         struct aead_request *areq = context;
946         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
947         unsigned int authsize = crypto_aead_authsize(authenc);
948         struct talitos_edesc *edesc;
949         struct scatterlist *sg;
950         void *icvdata;
951
952         edesc = container_of(desc, struct talitos_edesc, desc);
953
954         ipsec_esp_unmap(dev, edesc, areq);
955
956         /* copy the generated ICV to dst */
957         if (edesc->icv_ool) {
958                 icvdata = &edesc->link_tbl[edesc->src_nents +
959                                            edesc->dst_nents + 2];
960                 sg = sg_last(areq->dst, edesc->dst_nents);
961                 memcpy((char *)sg_virt(sg) + sg->length - authsize,
962                        icvdata, authsize);
963         }
964
965         kfree(edesc);
966
967         aead_request_complete(areq, err);
968 }
969
970 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
971                                           struct talitos_desc *desc,
972                                           void *context, int err)
973 {
974         struct aead_request *req = context;
975         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
976         unsigned int authsize = crypto_aead_authsize(authenc);
977         struct talitos_edesc *edesc;
978         struct scatterlist *sg;
979         char *oicv, *icv;
980
981         edesc = container_of(desc, struct talitos_edesc, desc);
982
983         ipsec_esp_unmap(dev, edesc, req);
984
985         if (!err) {
986                 /* auth check */
987                 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
988                 icv = (char *)sg_virt(sg) + sg->length - authsize;
989
990                 if (edesc->dma_len) {
991                         oicv = (char *)&edesc->link_tbl[edesc->src_nents +
992                                                         edesc->dst_nents + 2];
993                         if (edesc->icv_ool)
994                                 icv = oicv + authsize;
995                 } else
996                         oicv = (char *)&edesc->link_tbl[0];
997
998                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
999         }
1000
1001         kfree(edesc);
1002
1003         aead_request_complete(req, err);
1004 }
1005
1006 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1007                                           struct talitos_desc *desc,
1008                                           void *context, int err)
1009 {
1010         struct aead_request *req = context;
1011         struct talitos_edesc *edesc;
1012
1013         edesc = container_of(desc, struct talitos_edesc, desc);
1014
1015         ipsec_esp_unmap(dev, edesc, req);
1016
1017         /* check ICV auth status */
1018         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1019                      DESC_HDR_LO_ICCR1_PASS))
1020                 err = -EBADMSG;
1021
1022         kfree(edesc);
1023
1024         aead_request_complete(req, err);
1025 }
1026
1027 /*
1028  * convert scatterlist to SEC h/w link table format
1029  * stop at cryptlen bytes
1030  */
1031 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1032                                  unsigned int offset, int cryptlen,
1033                                  struct talitos_ptr *link_tbl_ptr)
1034 {
1035         int n_sg = sg_count;
1036         int count = 0;
1037
1038         while (cryptlen && sg && n_sg--) {
1039                 unsigned int len = sg_dma_len(sg);
1040
1041                 if (offset >= len) {
1042                         offset -= len;
1043                         goto next;
1044                 }
1045
1046                 len -= offset;
1047
1048                 if (len > cryptlen)
1049                         len = cryptlen;
1050
1051                 to_talitos_ptr(link_tbl_ptr + count,
1052                                sg_dma_address(sg) + offset, 0);
1053                 link_tbl_ptr[count].len = cpu_to_be16(len);
1054                 link_tbl_ptr[count].j_extent = 0;
1055                 count++;
1056                 cryptlen -= len;
1057                 offset = 0;
1058
1059 next:
1060                 sg = sg_next(sg);
1061         }
1062
1063         /* tag end of link table */
1064         if (count > 0)
1065                 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1066
1067         return count;
1068 }
1069
1070 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1071                                  int cryptlen,
1072                                  struct talitos_ptr *link_tbl_ptr)
1073 {
1074         return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1075                                      link_tbl_ptr);
1076 }
1077
1078 /*
1079  * fill in and submit ipsec_esp descriptor
1080  */
1081 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1082                      void (*callback)(struct device *dev,
1083                                       struct talitos_desc *desc,
1084                                       void *context, int error))
1085 {
1086         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1087         unsigned int authsize = crypto_aead_authsize(aead);
1088         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1089         struct device *dev = ctx->dev;
1090         struct talitos_desc *desc = &edesc->desc;
1091         unsigned int cryptlen = areq->cryptlen;
1092         unsigned int ivsize = crypto_aead_ivsize(aead);
1093         int tbl_off = 0;
1094         int sg_count, ret;
1095         int sg_link_tbl_len;
1096
1097         /* hmac key */
1098         map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1099                                DMA_TO_DEVICE);
1100
1101         sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1102                               (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1103                                                            : DMA_TO_DEVICE);
1104         /* hmac data */
1105         desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1106         if (sg_count > 1 &&
1107             (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1108                                          areq->assoclen,
1109                                          &edesc->link_tbl[tbl_off])) > 1) {
1110                 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1111                                sizeof(struct talitos_ptr), 0);
1112                 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1113
1114                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1115                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1116
1117                 tbl_off += ret;
1118         } else {
1119                 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1120                 desc->ptr[1].j_extent = 0;
1121         }
1122
1123         /* cipher iv */
1124         to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1125         desc->ptr[2].len = cpu_to_be16(ivsize);
1126         desc->ptr[2].j_extent = 0;
1127
1128         /* cipher key */
1129         map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1130                                (char *)&ctx->key + ctx->authkeylen,
1131                                DMA_TO_DEVICE);
1132
1133         /*
1134          * cipher in
1135          * map and adjust cipher len to aead request cryptlen.
1136          * extent is bytes of HMAC postpended to ciphertext,
1137          * typically 12 for ipsec
1138          */
1139         desc->ptr[4].len = cpu_to_be16(cryptlen);
1140         desc->ptr[4].j_extent = authsize;
1141
1142         sg_link_tbl_len = cryptlen;
1143         if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1144                 sg_link_tbl_len += authsize;
1145
1146         if (sg_count == 1) {
1147                 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1148                                areq->assoclen, 0);
1149         } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1150                                                 areq->assoclen, sg_link_tbl_len,
1151                                                 &edesc->link_tbl[tbl_off])) >
1152                    1) {
1153                 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1154                 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1155                                               tbl_off *
1156                                               sizeof(struct talitos_ptr), 0);
1157                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1158                                            edesc->dma_len,
1159                                            DMA_BIDIRECTIONAL);
1160                 tbl_off += ret;
1161         } else {
1162                 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1163         }
1164
1165         /* cipher out */
1166         desc->ptr[5].len = cpu_to_be16(cryptlen);
1167         desc->ptr[5].j_extent = authsize;
1168
1169         if (areq->src != areq->dst)
1170                 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1171                                       DMA_FROM_DEVICE);
1172
1173         edesc->icv_ool = false;
1174
1175         if (sg_count == 1) {
1176                 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1177                                areq->assoclen, 0);
1178         } else if ((sg_count =
1179                         sg_to_link_tbl_offset(areq->dst, sg_count,
1180                                               areq->assoclen, cryptlen,
1181                                               &edesc->link_tbl[tbl_off])) > 1) {
1182                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1183
1184                 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1185                                tbl_off * sizeof(struct talitos_ptr), 0);
1186
1187                 /* Add an entry to the link table for ICV data */
1188                 tbl_ptr += sg_count - 1;
1189                 tbl_ptr->j_extent = 0;
1190                 tbl_ptr++;
1191                 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1192                 tbl_ptr->len = cpu_to_be16(authsize);
1193
1194                 /* icv data follows link tables */
1195                 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1196                                         (edesc->src_nents + edesc->dst_nents +
1197                                          2) * sizeof(struct talitos_ptr) +
1198                                         authsize, 0);
1199                 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1200                 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1201                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1202
1203                 edesc->icv_ool = true;
1204         } else {
1205                 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1206         }
1207
1208         /* iv out */
1209         map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1210                                DMA_FROM_DEVICE);
1211
1212         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1213         if (ret != -EINPROGRESS) {
1214                 ipsec_esp_unmap(dev, edesc, areq);
1215                 kfree(edesc);
1216         }
1217         return ret;
1218 }
1219
1220 /*
1221  * allocate and map the extended descriptor
1222  */
1223 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1224                                                  struct scatterlist *src,
1225                                                  struct scatterlist *dst,
1226                                                  u8 *iv,
1227                                                  unsigned int assoclen,
1228                                                  unsigned int cryptlen,
1229                                                  unsigned int authsize,
1230                                                  unsigned int ivsize,
1231                                                  int icv_stashing,
1232                                                  u32 cryptoflags,
1233                                                  bool encrypt)
1234 {
1235         struct talitos_edesc *edesc;
1236         int src_nents, dst_nents, alloc_len, dma_len;
1237         dma_addr_t iv_dma = 0;
1238         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1239                       GFP_ATOMIC;
1240         struct talitos_private *priv = dev_get_drvdata(dev);
1241         bool is_sec1 = has_ftr_sec1(priv);
1242         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1243
1244         if (cryptlen + authsize > max_len) {
1245                 dev_err(dev, "length exceeds h/w max limit\n");
1246                 return ERR_PTR(-EINVAL);
1247         }
1248
1249         if (ivsize)
1250                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1251
1252         if (!dst || dst == src) {
1253                 src_nents = sg_nents_for_len(src,
1254                                              assoclen + cryptlen + authsize);
1255                 src_nents = (src_nents == 1) ? 0 : src_nents;
1256                 dst_nents = dst ? src_nents : 0;
1257         } else { /* dst && dst != src*/
1258                 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1259                                                  (encrypt ? 0 : authsize));
1260                 src_nents = (src_nents == 1) ? 0 : src_nents;
1261                 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1262                                                  (encrypt ? authsize : 0));
1263                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1264         }
1265
1266         /*
1267          * allocate space for base edesc plus the link tables,
1268          * allowing for two separate entries for AD and generated ICV (+ 2),
1269          * and space for two sets of ICVs (stashed and generated)
1270          */
1271         alloc_len = sizeof(struct talitos_edesc);
1272         if (src_nents || dst_nents) {
1273                 if (is_sec1)
1274                         dma_len = (src_nents ? cryptlen : 0) +
1275                                   (dst_nents ? cryptlen : 0);
1276                 else
1277                         dma_len = (src_nents + dst_nents + 2) *
1278                                   sizeof(struct talitos_ptr) + authsize * 2;
1279                 alloc_len += dma_len;
1280         } else {
1281                 dma_len = 0;
1282                 alloc_len += icv_stashing ? authsize : 0;
1283         }
1284
1285         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1286         if (!edesc) {
1287                 if (iv_dma)
1288                         dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1289
1290                 dev_err(dev, "could not allocate edescriptor\n");
1291                 return ERR_PTR(-ENOMEM);
1292         }
1293
1294         edesc->src_nents = src_nents;
1295         edesc->dst_nents = dst_nents;
1296         edesc->iv_dma = iv_dma;
1297         edesc->dma_len = dma_len;
1298         if (dma_len)
1299                 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1300                                                      edesc->dma_len,
1301                                                      DMA_BIDIRECTIONAL);
1302
1303         return edesc;
1304 }
1305
1306 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1307                                               int icv_stashing, bool encrypt)
1308 {
1309         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1310         unsigned int authsize = crypto_aead_authsize(authenc);
1311         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1312         unsigned int ivsize = crypto_aead_ivsize(authenc);
1313
1314         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1315                                    iv, areq->assoclen, areq->cryptlen,
1316                                    authsize, ivsize, icv_stashing,
1317                                    areq->base.flags, encrypt);
1318 }
1319
1320 static int aead_encrypt(struct aead_request *req)
1321 {
1322         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1323         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1324         struct talitos_edesc *edesc;
1325
1326         /* allocate extended descriptor */
1327         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1328         if (IS_ERR(edesc))
1329                 return PTR_ERR(edesc);
1330
1331         /* set encrypt */
1332         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1333
1334         return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1335 }
1336
1337 static int aead_decrypt(struct aead_request *req)
1338 {
1339         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1340         unsigned int authsize = crypto_aead_authsize(authenc);
1341         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1342         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1343         struct talitos_edesc *edesc;
1344         struct scatterlist *sg;
1345         void *icvdata;
1346
1347         req->cryptlen -= authsize;
1348
1349         /* allocate extended descriptor */
1350         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1351         if (IS_ERR(edesc))
1352                 return PTR_ERR(edesc);
1353
1354         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1355             ((!edesc->src_nents && !edesc->dst_nents) ||
1356              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1357
1358                 /* decrypt and check the ICV */
1359                 edesc->desc.hdr = ctx->desc_hdr_template |
1360                                   DESC_HDR_DIR_INBOUND |
1361                                   DESC_HDR_MODE1_MDEU_CICV;
1362
1363                 /* reset integrity check result bits */
1364                 edesc->desc.hdr_lo = 0;
1365
1366                 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1367         }
1368
1369         /* Have to check the ICV with software */
1370         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1371
1372         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1373         if (edesc->dma_len)
1374                 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1375                                                    edesc->dst_nents + 2];
1376         else
1377                 icvdata = &edesc->link_tbl[0];
1378
1379         sg = sg_last(req->src, edesc->src_nents ? : 1);
1380
1381         memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1382
1383         return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1384 }
1385
1386 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1387                              const u8 *key, unsigned int keylen)
1388 {
1389         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1390
1391         if (keylen > TALITOS_MAX_KEY_SIZE) {
1392                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1393                 return -EINVAL;
1394         }
1395
1396         memcpy(&ctx->key, key, keylen);
1397         ctx->keylen = keylen;
1398
1399         return 0;
1400 }
1401
1402 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1403                                  struct scatterlist *dst, unsigned int len,
1404                                  struct talitos_edesc *edesc)
1405 {
1406         struct talitos_private *priv = dev_get_drvdata(dev);
1407         bool is_sec1 = has_ftr_sec1(priv);
1408
1409         if (is_sec1) {
1410                 if (!edesc->src_nents) {
1411                         dma_unmap_sg(dev, src, 1,
1412                                      dst != src ? DMA_TO_DEVICE
1413                                                 : DMA_BIDIRECTIONAL);
1414                 }
1415                 if (dst && edesc->dst_nents) {
1416                         dma_sync_single_for_device(dev,
1417                                                    edesc->dma_link_tbl + len,
1418                                                    len, DMA_FROM_DEVICE);
1419                         sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1420                                             edesc->buf + len, len);
1421                 } else if (dst && dst != src) {
1422                         dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1423                 }
1424         } else {
1425                 talitos_sg_unmap(dev, edesc, src, dst);
1426         }
1427 }
1428
1429 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1430                                   const u8 *key, unsigned int keylen)
1431 {
1432         if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1433             keylen == AES_KEYSIZE_256)
1434                 return ablkcipher_setkey(cipher, key, keylen);
1435
1436         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1437
1438         return -EINVAL;
1439 }
1440
1441 static void common_nonsnoop_unmap(struct device *dev,
1442                                   struct talitos_edesc *edesc,
1443                                   struct ablkcipher_request *areq)
1444 {
1445         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1446
1447         unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1448         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1449         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1450
1451         if (edesc->dma_len)
1452                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1453                                  DMA_BIDIRECTIONAL);
1454 }
1455
1456 static void ablkcipher_done(struct device *dev,
1457                             struct talitos_desc *desc, void *context,
1458                             int err)
1459 {
1460         struct ablkcipher_request *areq = context;
1461         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1462         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1463         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1464         struct talitos_edesc *edesc;
1465
1466         edesc = container_of(desc, struct talitos_edesc, desc);
1467
1468         common_nonsnoop_unmap(dev, edesc, areq);
1469         memcpy(areq->info, ctx->iv, ivsize);
1470
1471         kfree(edesc);
1472
1473         areq->base.complete(&areq->base, err);
1474 }
1475
1476 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1477                           unsigned int len, struct talitos_edesc *edesc,
1478                           enum dma_data_direction dir, struct talitos_ptr *ptr)
1479 {
1480         int sg_count;
1481         struct talitos_private *priv = dev_get_drvdata(dev);
1482         bool is_sec1 = has_ftr_sec1(priv);
1483
1484         to_talitos_ptr_len(ptr, len, is_sec1);
1485
1486         if (is_sec1) {
1487                 sg_count = edesc->src_nents ? : 1;
1488
1489                 if (sg_count == 1) {
1490                         dma_map_sg(dev, src, 1, dir);
1491                         to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1492                 } else {
1493                         sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1494                         to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1495                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1496                                                    len, DMA_TO_DEVICE);
1497                 }
1498         } else {
1499                 to_talitos_ptr_extent_clear(ptr, is_sec1);
1500
1501                 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1502
1503                 if (sg_count == 1) {
1504                         to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1505                 } else {
1506                         sg_count = sg_to_link_tbl(src, sg_count, len,
1507                                                   &edesc->link_tbl[0]);
1508                         if (sg_count > 1) {
1509                                 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1510                                 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1511                                 dma_sync_single_for_device(dev,
1512                                                            edesc->dma_link_tbl,
1513                                                            edesc->dma_len,
1514                                                            DMA_BIDIRECTIONAL);
1515                         } else {
1516                                 /* Only one segment now, so no link tbl needed*/
1517                                 to_talitos_ptr(ptr, sg_dma_address(src),
1518                                                is_sec1);
1519                         }
1520                 }
1521         }
1522         return sg_count;
1523 }
1524
1525 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1526                             unsigned int len, struct talitos_edesc *edesc,
1527                             enum dma_data_direction dir,
1528                             struct talitos_ptr *ptr, int sg_count)
1529 {
1530         struct talitos_private *priv = dev_get_drvdata(dev);
1531         bool is_sec1 = has_ftr_sec1(priv);
1532
1533         if (dir != DMA_NONE)
1534                 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1535
1536         to_talitos_ptr_len(ptr, len, is_sec1);
1537
1538         if (is_sec1) {
1539                 if (sg_count == 1) {
1540                         if (dir != DMA_NONE)
1541                                 dma_map_sg(dev, dst, 1, dir);
1542                         to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1543                 } else {
1544                         to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1545                         dma_sync_single_for_device(dev,
1546                                                    edesc->dma_link_tbl + len,
1547                                                    len, DMA_FROM_DEVICE);
1548                 }
1549         } else {
1550                 to_talitos_ptr_extent_clear(ptr, is_sec1);
1551
1552                 if (sg_count == 1) {
1553                         to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1554                 } else {
1555                         struct talitos_ptr *link_tbl_ptr =
1556                                 &edesc->link_tbl[edesc->src_nents + 1];
1557
1558                         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1559                                             (edesc->src_nents + 1) *
1560                                              sizeof(struct talitos_ptr), 0);
1561                         ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1562                         sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1563                         dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1564                                                    edesc->dma_len,
1565                                                    DMA_BIDIRECTIONAL);
1566                 }
1567         }
1568 }
1569
1570 static int common_nonsnoop(struct talitos_edesc *edesc,
1571                            struct ablkcipher_request *areq,
1572                            void (*callback) (struct device *dev,
1573                                              struct talitos_desc *desc,
1574                                              void *context, int error))
1575 {
1576         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1577         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1578         struct device *dev = ctx->dev;
1579         struct talitos_desc *desc = &edesc->desc;
1580         unsigned int cryptlen = areq->nbytes;
1581         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1582         int sg_count, ret;
1583         struct talitos_private *priv = dev_get_drvdata(dev);
1584         bool is_sec1 = has_ftr_sec1(priv);
1585
1586         /* first DWORD empty */
1587         desc->ptr[0] = zero_entry;
1588
1589         /* cipher iv */
1590         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1591         to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1592         to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1593
1594         /* cipher key */
1595         map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1596                                (char *)&ctx->key, DMA_TO_DEVICE);
1597
1598         /*
1599          * cipher in
1600          */
1601         sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1602                                          (areq->src == areq->dst) ?
1603                                           DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1604                                           &desc->ptr[3]);
1605
1606         /* cipher out */
1607         map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1608                                (areq->src == areq->dst) ? DMA_NONE
1609                                                         : DMA_FROM_DEVICE,
1610                                &desc->ptr[4], sg_count);
1611
1612         /* iv out */
1613         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1614                                DMA_FROM_DEVICE);
1615
1616         /* last DWORD empty */
1617         desc->ptr[6] = zero_entry;
1618
1619         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1620         if (ret != -EINPROGRESS) {
1621                 common_nonsnoop_unmap(dev, edesc, areq);
1622                 kfree(edesc);
1623         }
1624         return ret;
1625 }
1626
1627 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1628                                                     areq, bool encrypt)
1629 {
1630         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1631         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1632         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1633
1634         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1635                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1636                                    areq->base.flags, encrypt);
1637 }
1638
1639 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1640 {
1641         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1642         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1643         struct talitos_edesc *edesc;
1644         unsigned int blocksize =
1645                         crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1646
1647         if (!areq->nbytes)
1648                 return 0;
1649
1650         if (areq->nbytes % blocksize)
1651                 return -EINVAL;
1652
1653         /* allocate extended descriptor */
1654         edesc = ablkcipher_edesc_alloc(areq, true);
1655         if (IS_ERR(edesc))
1656                 return PTR_ERR(edesc);
1657
1658         /* set encrypt */
1659         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1660
1661         return common_nonsnoop(edesc, areq, ablkcipher_done);
1662 }
1663
1664 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1665 {
1666         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1667         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1668         struct talitos_edesc *edesc;
1669         unsigned int blocksize =
1670                         crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1671
1672         if (!areq->nbytes)
1673                 return 0;
1674
1675         if (areq->nbytes % blocksize)
1676                 return -EINVAL;
1677
1678         /* allocate extended descriptor */
1679         edesc = ablkcipher_edesc_alloc(areq, false);
1680         if (IS_ERR(edesc))
1681                 return PTR_ERR(edesc);
1682
1683         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1684
1685         return common_nonsnoop(edesc, areq, ablkcipher_done);
1686 }
1687
1688 static void common_nonsnoop_hash_unmap(struct device *dev,
1689                                        struct talitos_edesc *edesc,
1690                                        struct ahash_request *areq)
1691 {
1692         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1693         struct talitos_private *priv = dev_get_drvdata(dev);
1694         bool is_sec1 = has_ftr_sec1(priv);
1695
1696         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1697
1698         unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1699
1700         /* When using hashctx-in, must unmap it. */
1701         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1702                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1703                                          DMA_TO_DEVICE);
1704
1705         if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1706                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1707                                          DMA_TO_DEVICE);
1708
1709         if (edesc->dma_len)
1710                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1711                                  DMA_BIDIRECTIONAL);
1712
1713 }
1714
1715 static void ahash_done(struct device *dev,
1716                        struct talitos_desc *desc, void *context,
1717                        int err)
1718 {
1719         struct ahash_request *areq = context;
1720         struct talitos_edesc *edesc =
1721                  container_of(desc, struct talitos_edesc, desc);
1722         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1723
1724         if (!req_ctx->last && req_ctx->to_hash_later) {
1725                 /* Position any partial block for next update/final/finup */
1726                 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1727                 req_ctx->nbuf = req_ctx->to_hash_later;
1728         }
1729         common_nonsnoop_hash_unmap(dev, edesc, areq);
1730
1731         kfree(edesc);
1732
1733         areq->base.complete(&areq->base, err);
1734 }
1735
1736 /*
1737  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1738  * ourself and submit a padded block
1739  */
1740 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1741                                struct talitos_edesc *edesc,
1742                                struct talitos_ptr *ptr)
1743 {
1744         static u8 padded_hash[64] = {
1745                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1746                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1747                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1748                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1749         };
1750
1751         pr_err_once("Bug in SEC1, padding ourself\n");
1752         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1753         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1754                                (char *)padded_hash, DMA_TO_DEVICE);
1755 }
1756
1757 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1758                                 struct ahash_request *areq, unsigned int length,
1759                                 void (*callback) (struct device *dev,
1760                                                   struct talitos_desc *desc,
1761                                                   void *context, int error))
1762 {
1763         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1764         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1765         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1766         struct device *dev = ctx->dev;
1767         struct talitos_desc *desc = &edesc->desc;
1768         int ret;
1769         struct talitos_private *priv = dev_get_drvdata(dev);
1770         bool is_sec1 = has_ftr_sec1(priv);
1771
1772         /* first DWORD empty */
1773         desc->ptr[0] = zero_entry;
1774
1775         /* hash context in */
1776         if (!req_ctx->first || req_ctx->swinit) {
1777                 map_single_talitos_ptr(dev, &desc->ptr[1],
1778                                        req_ctx->hw_context_size,
1779                                        (char *)req_ctx->hw_context,
1780                                        DMA_TO_DEVICE);
1781                 req_ctx->swinit = 0;
1782         } else {
1783                 desc->ptr[1] = zero_entry;
1784         }
1785         /* Indicate next op is not the first. */
1786         req_ctx->first = 0;
1787
1788         /* HMAC key */
1789         if (ctx->keylen)
1790                 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1791                                        (char *)&ctx->key, DMA_TO_DEVICE);
1792         else
1793                 desc->ptr[2] = zero_entry;
1794
1795         /*
1796          * data in
1797          */
1798         map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1799                               DMA_TO_DEVICE, &desc->ptr[3]);
1800
1801         /* fifth DWORD empty */
1802         desc->ptr[4] = zero_entry;
1803
1804         /* hash/HMAC out -or- hash context out */
1805         if (req_ctx->last)
1806                 map_single_talitos_ptr(dev, &desc->ptr[5],
1807                                        crypto_ahash_digestsize(tfm),
1808                                        areq->result, DMA_FROM_DEVICE);
1809         else
1810                 map_single_talitos_ptr(dev, &desc->ptr[5],
1811                                        req_ctx->hw_context_size,
1812                                        req_ctx->hw_context, DMA_FROM_DEVICE);
1813
1814         /* last DWORD empty */
1815         desc->ptr[6] = zero_entry;
1816
1817         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1818                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1819
1820         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1821         if (ret != -EINPROGRESS) {
1822                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1823                 kfree(edesc);
1824         }
1825         return ret;
1826 }
1827
1828 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1829                                                unsigned int nbytes)
1830 {
1831         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1832         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1833         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1834
1835         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1836                                    nbytes, 0, 0, 0, areq->base.flags, false);
1837 }
1838
1839 static int ahash_init(struct ahash_request *areq)
1840 {
1841         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1842         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1843
1844         /* Initialize the context */
1845         req_ctx->nbuf = 0;
1846         req_ctx->first = 1; /* first indicates h/w must init its context */
1847         req_ctx->swinit = 0; /* assume h/w init of context */
1848         req_ctx->hw_context_size =
1849                 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1850                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1851                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1852
1853         return 0;
1854 }
1855
1856 /*
1857  * on h/w without explicit sha224 support, we initialize h/w context
1858  * manually with sha224 constants, and tell it to run sha256.
1859  */
1860 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1861 {
1862         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1863
1864         ahash_init(areq);
1865         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1866
1867         req_ctx->hw_context[0] = SHA224_H0;
1868         req_ctx->hw_context[1] = SHA224_H1;
1869         req_ctx->hw_context[2] = SHA224_H2;
1870         req_ctx->hw_context[3] = SHA224_H3;
1871         req_ctx->hw_context[4] = SHA224_H4;
1872         req_ctx->hw_context[5] = SHA224_H5;
1873         req_ctx->hw_context[6] = SHA224_H6;
1874         req_ctx->hw_context[7] = SHA224_H7;
1875
1876         /* init 64-bit count */
1877         req_ctx->hw_context[8] = 0;
1878         req_ctx->hw_context[9] = 0;
1879
1880         return 0;
1881 }
1882
1883 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1884 {
1885         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1886         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1887         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1888         struct talitos_edesc *edesc;
1889         unsigned int blocksize =
1890                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1891         unsigned int nbytes_to_hash;
1892         unsigned int to_hash_later;
1893         unsigned int nsg;
1894
1895         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1896                 /* Buffer up to one whole block */
1897                 sg_copy_to_buffer(areq->src,
1898                                   sg_nents_for_len(areq->src, nbytes),
1899                                   req_ctx->buf + req_ctx->nbuf, nbytes);
1900                 req_ctx->nbuf += nbytes;
1901                 return 0;
1902         }
1903
1904         /* At least (blocksize + 1) bytes are available to hash */
1905         nbytes_to_hash = nbytes + req_ctx->nbuf;
1906         to_hash_later = nbytes_to_hash & (blocksize - 1);
1907
1908         if (req_ctx->last)
1909                 to_hash_later = 0;
1910         else if (to_hash_later)
1911                 /* There is a partial block. Hash the full block(s) now */
1912                 nbytes_to_hash -= to_hash_later;
1913         else {
1914                 /* Keep one block buffered */
1915                 nbytes_to_hash -= blocksize;
1916                 to_hash_later = blocksize;
1917         }
1918
1919         /* Chain in any previously buffered data */
1920         if (req_ctx->nbuf) {
1921                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1922                 sg_init_table(req_ctx->bufsl, nsg);
1923                 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1924                 if (nsg > 1)
1925                         sg_chain(req_ctx->bufsl, 2, areq->src);
1926                 req_ctx->psrc = req_ctx->bufsl;
1927         } else
1928                 req_ctx->psrc = areq->src;
1929
1930         if (to_hash_later) {
1931                 int nents = sg_nents_for_len(areq->src, nbytes);
1932                 sg_pcopy_to_buffer(areq->src, nents,
1933                                       req_ctx->bufnext,
1934                                       to_hash_later,
1935                                       nbytes - to_hash_later);
1936         }
1937         req_ctx->to_hash_later = to_hash_later;
1938
1939         /* Allocate extended descriptor */
1940         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1941         if (IS_ERR(edesc))
1942                 return PTR_ERR(edesc);
1943
1944         edesc->desc.hdr = ctx->desc_hdr_template;
1945
1946         /* On last one, request SEC to pad; otherwise continue */
1947         if (req_ctx->last)
1948                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1949         else
1950                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1951
1952         /* request SEC to INIT hash. */
1953         if (req_ctx->first && !req_ctx->swinit)
1954                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1955
1956         /* When the tfm context has a keylen, it's an HMAC.
1957          * A first or last (ie. not middle) descriptor must request HMAC.
1958          */
1959         if (ctx->keylen && (req_ctx->first || req_ctx->last))
1960                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1961
1962         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1963                                     ahash_done);
1964 }
1965
1966 static int ahash_update(struct ahash_request *areq)
1967 {
1968         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1969
1970         req_ctx->last = 0;
1971
1972         return ahash_process_req(areq, areq->nbytes);
1973 }
1974
1975 static int ahash_final(struct ahash_request *areq)
1976 {
1977         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1978
1979         req_ctx->last = 1;
1980
1981         return ahash_process_req(areq, 0);
1982 }
1983
1984 static int ahash_finup(struct ahash_request *areq)
1985 {
1986         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1987
1988         req_ctx->last = 1;
1989
1990         return ahash_process_req(areq, areq->nbytes);
1991 }
1992
1993 static int ahash_digest(struct ahash_request *areq)
1994 {
1995         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1996         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1997
1998         ahash->init(areq);
1999         req_ctx->last = 1;
2000
2001         return ahash_process_req(areq, areq->nbytes);
2002 }
2003
2004 static int ahash_export(struct ahash_request *areq, void *out)
2005 {
2006         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2007         struct talitos_export_state *export = out;
2008
2009         memcpy(export->hw_context, req_ctx->hw_context,
2010                req_ctx->hw_context_size);
2011         memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2012         export->swinit = req_ctx->swinit;
2013         export->first = req_ctx->first;
2014         export->last = req_ctx->last;
2015         export->to_hash_later = req_ctx->to_hash_later;
2016         export->nbuf = req_ctx->nbuf;
2017
2018         return 0;
2019 }
2020
2021 static int ahash_import(struct ahash_request *areq, const void *in)
2022 {
2023         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2024         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2025         const struct talitos_export_state *export = in;
2026
2027         memset(req_ctx, 0, sizeof(*req_ctx));
2028         req_ctx->hw_context_size =
2029                 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2030                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2031                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2032         memcpy(req_ctx->hw_context, export->hw_context,
2033                req_ctx->hw_context_size);
2034         memcpy(req_ctx->buf, export->buf, export->nbuf);
2035         req_ctx->swinit = export->swinit;
2036         req_ctx->first = export->first;
2037         req_ctx->last = export->last;
2038         req_ctx->to_hash_later = export->to_hash_later;
2039         req_ctx->nbuf = export->nbuf;
2040
2041         return 0;
2042 }
2043
2044 struct keyhash_result {
2045         struct completion completion;
2046         int err;
2047 };
2048
2049 static void keyhash_complete(struct crypto_async_request *req, int err)
2050 {
2051         struct keyhash_result *res = req->data;
2052
2053         if (err == -EINPROGRESS)
2054                 return;
2055
2056         res->err = err;
2057         complete(&res->completion);
2058 }
2059
2060 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2061                    u8 *hash)
2062 {
2063         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2064
2065         struct scatterlist sg[1];
2066         struct ahash_request *req;
2067         struct keyhash_result hresult;
2068         int ret;
2069
2070         init_completion(&hresult.completion);
2071
2072         req = ahash_request_alloc(tfm, GFP_KERNEL);
2073         if (!req)
2074                 return -ENOMEM;
2075
2076         /* Keep tfm keylen == 0 during hash of the long key */
2077         ctx->keylen = 0;
2078         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2079                                    keyhash_complete, &hresult);
2080
2081         sg_init_one(&sg[0], key, keylen);
2082
2083         ahash_request_set_crypt(req, sg, hash, keylen);
2084         ret = crypto_ahash_digest(req);
2085         switch (ret) {
2086         case 0:
2087                 break;
2088         case -EINPROGRESS:
2089         case -EBUSY:
2090                 ret = wait_for_completion_interruptible(
2091                         &hresult.completion);
2092                 if (!ret)
2093                         ret = hresult.err;
2094                 break;
2095         default:
2096                 break;
2097         }
2098         ahash_request_free(req);
2099
2100         return ret;
2101 }
2102
2103 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2104                         unsigned int keylen)
2105 {
2106         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2107         unsigned int blocksize =
2108                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2109         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2110         unsigned int keysize = keylen;
2111         u8 hash[SHA512_DIGEST_SIZE];
2112         int ret;
2113
2114         if (keylen <= blocksize)
2115                 memcpy(ctx->key, key, keysize);
2116         else {
2117                 /* Must get the hash of the long key */
2118                 ret = keyhash(tfm, key, keylen, hash);
2119
2120                 if (ret) {
2121                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2122                         return -EINVAL;
2123                 }
2124
2125                 keysize = digestsize;
2126                 memcpy(ctx->key, hash, digestsize);
2127         }
2128
2129         ctx->keylen = keysize;
2130
2131         return 0;
2132 }
2133
2134
2135 struct talitos_alg_template {
2136         u32 type;
2137         union {
2138                 struct crypto_alg crypto;
2139                 struct ahash_alg hash;
2140                 struct aead_alg aead;
2141         } alg;
2142         __be32 desc_hdr_template;
2143 };
2144
2145 static struct talitos_alg_template driver_algs[] = {
2146         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2147         {       .type = CRYPTO_ALG_TYPE_AEAD,
2148                 .alg.aead = {
2149                         .base = {
2150                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2151                                 .cra_driver_name = "authenc-hmac-sha1-"
2152                                                    "cbc-aes-talitos",
2153                                 .cra_blocksize = AES_BLOCK_SIZE,
2154                                 .cra_flags = CRYPTO_ALG_ASYNC,
2155                         },
2156                         .ivsize = AES_BLOCK_SIZE,
2157                         .maxauthsize = SHA1_DIGEST_SIZE,
2158                 },
2159                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2160                                      DESC_HDR_SEL0_AESU |
2161                                      DESC_HDR_MODE0_AESU_CBC |
2162                                      DESC_HDR_SEL1_MDEUA |
2163                                      DESC_HDR_MODE1_MDEU_INIT |
2164                                      DESC_HDR_MODE1_MDEU_PAD |
2165                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2166         },
2167         {       .type = CRYPTO_ALG_TYPE_AEAD,
2168                 .alg.aead = {
2169                         .base = {
2170                                 .cra_name = "authenc(hmac(sha1),"
2171                                             "cbc(des3_ede))",
2172                                 .cra_driver_name = "authenc-hmac-sha1-"
2173                                                    "cbc-3des-talitos",
2174                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2175                                 .cra_flags = CRYPTO_ALG_ASYNC,
2176                         },
2177                         .ivsize = DES3_EDE_BLOCK_SIZE,
2178                         .maxauthsize = SHA1_DIGEST_SIZE,
2179                 },
2180                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2181                                      DESC_HDR_SEL0_DEU |
2182                                      DESC_HDR_MODE0_DEU_CBC |
2183                                      DESC_HDR_MODE0_DEU_3DES |
2184                                      DESC_HDR_SEL1_MDEUA |
2185                                      DESC_HDR_MODE1_MDEU_INIT |
2186                                      DESC_HDR_MODE1_MDEU_PAD |
2187                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2188         },
2189         {       .type = CRYPTO_ALG_TYPE_AEAD,
2190                 .alg.aead = {
2191                         .base = {
2192                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2193                                 .cra_driver_name = "authenc-hmac-sha224-"
2194                                                    "cbc-aes-talitos",
2195                                 .cra_blocksize = AES_BLOCK_SIZE,
2196                                 .cra_flags = CRYPTO_ALG_ASYNC,
2197                         },
2198                         .ivsize = AES_BLOCK_SIZE,
2199                         .maxauthsize = SHA224_DIGEST_SIZE,
2200                 },
2201                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2202                                      DESC_HDR_SEL0_AESU |
2203                                      DESC_HDR_MODE0_AESU_CBC |
2204                                      DESC_HDR_SEL1_MDEUA |
2205                                      DESC_HDR_MODE1_MDEU_INIT |
2206                                      DESC_HDR_MODE1_MDEU_PAD |
2207                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2208         },
2209         {       .type = CRYPTO_ALG_TYPE_AEAD,
2210                 .alg.aead = {
2211                         .base = {
2212                                 .cra_name = "authenc(hmac(sha224),"
2213                                             "cbc(des3_ede))",
2214                                 .cra_driver_name = "authenc-hmac-sha224-"
2215                                                    "cbc-3des-talitos",
2216                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2217                                 .cra_flags = CRYPTO_ALG_ASYNC,
2218                         },
2219                         .ivsize = DES3_EDE_BLOCK_SIZE,
2220                         .maxauthsize = SHA224_DIGEST_SIZE,
2221                 },
2222                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2223                                      DESC_HDR_SEL0_DEU |
2224                                      DESC_HDR_MODE0_DEU_CBC |
2225                                      DESC_HDR_MODE0_DEU_3DES |
2226                                      DESC_HDR_SEL1_MDEUA |
2227                                      DESC_HDR_MODE1_MDEU_INIT |
2228                                      DESC_HDR_MODE1_MDEU_PAD |
2229                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2230         },
2231         {       .type = CRYPTO_ALG_TYPE_AEAD,
2232                 .alg.aead = {
2233                         .base = {
2234                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2235                                 .cra_driver_name = "authenc-hmac-sha256-"
2236                                                    "cbc-aes-talitos",
2237                                 .cra_blocksize = AES_BLOCK_SIZE,
2238                                 .cra_flags = CRYPTO_ALG_ASYNC,
2239                         },
2240                         .ivsize = AES_BLOCK_SIZE,
2241                         .maxauthsize = SHA256_DIGEST_SIZE,
2242                 },
2243                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2244                                      DESC_HDR_SEL0_AESU |
2245                                      DESC_HDR_MODE0_AESU_CBC |
2246                                      DESC_HDR_SEL1_MDEUA |
2247                                      DESC_HDR_MODE1_MDEU_INIT |
2248                                      DESC_HDR_MODE1_MDEU_PAD |
2249                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2250         },
2251         {       .type = CRYPTO_ALG_TYPE_AEAD,
2252                 .alg.aead = {
2253                         .base = {
2254                                 .cra_name = "authenc(hmac(sha256),"
2255                                             "cbc(des3_ede))",
2256                                 .cra_driver_name = "authenc-hmac-sha256-"
2257                                                    "cbc-3des-talitos",
2258                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2259                                 .cra_flags = CRYPTO_ALG_ASYNC,
2260                         },
2261                         .ivsize = DES3_EDE_BLOCK_SIZE,
2262                         .maxauthsize = SHA256_DIGEST_SIZE,
2263                 },
2264                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2265                                      DESC_HDR_SEL0_DEU |
2266                                      DESC_HDR_MODE0_DEU_CBC |
2267                                      DESC_HDR_MODE0_DEU_3DES |
2268                                      DESC_HDR_SEL1_MDEUA |
2269                                      DESC_HDR_MODE1_MDEU_INIT |
2270                                      DESC_HDR_MODE1_MDEU_PAD |
2271                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2272         },
2273         {       .type = CRYPTO_ALG_TYPE_AEAD,
2274                 .alg.aead = {
2275                         .base = {
2276                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2277                                 .cra_driver_name = "authenc-hmac-sha384-"
2278                                                    "cbc-aes-talitos",
2279                                 .cra_blocksize = AES_BLOCK_SIZE,
2280                                 .cra_flags = CRYPTO_ALG_ASYNC,
2281                         },
2282                         .ivsize = AES_BLOCK_SIZE,
2283                         .maxauthsize = SHA384_DIGEST_SIZE,
2284                 },
2285                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2286                                      DESC_HDR_SEL0_AESU |
2287                                      DESC_HDR_MODE0_AESU_CBC |
2288                                      DESC_HDR_SEL1_MDEUB |
2289                                      DESC_HDR_MODE1_MDEU_INIT |
2290                                      DESC_HDR_MODE1_MDEU_PAD |
2291                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2292         },
2293         {       .type = CRYPTO_ALG_TYPE_AEAD,
2294                 .alg.aead = {
2295                         .base = {
2296                                 .cra_name = "authenc(hmac(sha384),"
2297                                             "cbc(des3_ede))",
2298                                 .cra_driver_name = "authenc-hmac-sha384-"
2299                                                    "cbc-3des-talitos",
2300                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2301                                 .cra_flags = CRYPTO_ALG_ASYNC,
2302                         },
2303                         .ivsize = DES3_EDE_BLOCK_SIZE,
2304                         .maxauthsize = SHA384_DIGEST_SIZE,
2305                 },
2306                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2307                                      DESC_HDR_SEL0_DEU |
2308                                      DESC_HDR_MODE0_DEU_CBC |
2309                                      DESC_HDR_MODE0_DEU_3DES |
2310                                      DESC_HDR_SEL1_MDEUB |
2311                                      DESC_HDR_MODE1_MDEU_INIT |
2312                                      DESC_HDR_MODE1_MDEU_PAD |
2313                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2314         },
2315         {       .type = CRYPTO_ALG_TYPE_AEAD,
2316                 .alg.aead = {
2317                         .base = {
2318                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2319                                 .cra_driver_name = "authenc-hmac-sha512-"
2320                                                    "cbc-aes-talitos",
2321                                 .cra_blocksize = AES_BLOCK_SIZE,
2322                                 .cra_flags = CRYPTO_ALG_ASYNC,
2323                         },
2324                         .ivsize = AES_BLOCK_SIZE,
2325                         .maxauthsize = SHA512_DIGEST_SIZE,
2326                 },
2327                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2328                                      DESC_HDR_SEL0_AESU |
2329                                      DESC_HDR_MODE0_AESU_CBC |
2330                                      DESC_HDR_SEL1_MDEUB |
2331                                      DESC_HDR_MODE1_MDEU_INIT |
2332                                      DESC_HDR_MODE1_MDEU_PAD |
2333                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2334         },
2335         {       .type = CRYPTO_ALG_TYPE_AEAD,
2336                 .alg.aead = {
2337                         .base = {
2338                                 .cra_name = "authenc(hmac(sha512),"
2339                                             "cbc(des3_ede))",
2340                                 .cra_driver_name = "authenc-hmac-sha512-"
2341                                                    "cbc-3des-talitos",
2342                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2343                                 .cra_flags = CRYPTO_ALG_ASYNC,
2344                         },
2345                         .ivsize = DES3_EDE_BLOCK_SIZE,
2346                         .maxauthsize = SHA512_DIGEST_SIZE,
2347                 },
2348                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2349                                      DESC_HDR_SEL0_DEU |
2350                                      DESC_HDR_MODE0_DEU_CBC |
2351                                      DESC_HDR_MODE0_DEU_3DES |
2352                                      DESC_HDR_SEL1_MDEUB |
2353                                      DESC_HDR_MODE1_MDEU_INIT |
2354                                      DESC_HDR_MODE1_MDEU_PAD |
2355                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2356         },
2357         {       .type = CRYPTO_ALG_TYPE_AEAD,
2358                 .alg.aead = {
2359                         .base = {
2360                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2361                                 .cra_driver_name = "authenc-hmac-md5-"
2362                                                    "cbc-aes-talitos",
2363                                 .cra_blocksize = AES_BLOCK_SIZE,
2364                                 .cra_flags = CRYPTO_ALG_ASYNC,
2365                         },
2366                         .ivsize = AES_BLOCK_SIZE,
2367                         .maxauthsize = MD5_DIGEST_SIZE,
2368                 },
2369                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370                                      DESC_HDR_SEL0_AESU |
2371                                      DESC_HDR_MODE0_AESU_CBC |
2372                                      DESC_HDR_SEL1_MDEUA |
2373                                      DESC_HDR_MODE1_MDEU_INIT |
2374                                      DESC_HDR_MODE1_MDEU_PAD |
2375                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2376         },
2377         {       .type = CRYPTO_ALG_TYPE_AEAD,
2378                 .alg.aead = {
2379                         .base = {
2380                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2381                                 .cra_driver_name = "authenc-hmac-md5-"
2382                                                    "cbc-3des-talitos",
2383                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2384                                 .cra_flags = CRYPTO_ALG_ASYNC,
2385                         },
2386                         .ivsize = DES3_EDE_BLOCK_SIZE,
2387                         .maxauthsize = MD5_DIGEST_SIZE,
2388                 },
2389                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2390                                      DESC_HDR_SEL0_DEU |
2391                                      DESC_HDR_MODE0_DEU_CBC |
2392                                      DESC_HDR_MODE0_DEU_3DES |
2393                                      DESC_HDR_SEL1_MDEUA |
2394                                      DESC_HDR_MODE1_MDEU_INIT |
2395                                      DESC_HDR_MODE1_MDEU_PAD |
2396                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2397         },
2398         /* ABLKCIPHER algorithms. */
2399         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2400                 .alg.crypto = {
2401                         .cra_name = "cbc(aes)",
2402                         .cra_driver_name = "cbc-aes-talitos",
2403                         .cra_blocksize = AES_BLOCK_SIZE,
2404                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2405                                      CRYPTO_ALG_ASYNC,
2406                         .cra_ablkcipher = {
2407                                 .min_keysize = AES_MIN_KEY_SIZE,
2408                                 .max_keysize = AES_MAX_KEY_SIZE,
2409                                 .ivsize = AES_BLOCK_SIZE,
2410                                 .setkey = ablkcipher_aes_setkey,
2411                         }
2412                 },
2413                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2414                                      DESC_HDR_SEL0_AESU |
2415                                      DESC_HDR_MODE0_AESU_CBC,
2416         },
2417         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2418                 .alg.crypto = {
2419                         .cra_name = "cbc(des3_ede)",
2420                         .cra_driver_name = "cbc-3des-talitos",
2421                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2422                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2423                                      CRYPTO_ALG_ASYNC,
2424                         .cra_ablkcipher = {
2425                                 .min_keysize = DES3_EDE_KEY_SIZE,
2426                                 .max_keysize = DES3_EDE_KEY_SIZE,
2427                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2428                         }
2429                 },
2430                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2431                                      DESC_HDR_SEL0_DEU |
2432                                      DESC_HDR_MODE0_DEU_CBC |
2433                                      DESC_HDR_MODE0_DEU_3DES,
2434         },
2435         /* AHASH algorithms. */
2436         {       .type = CRYPTO_ALG_TYPE_AHASH,
2437                 .alg.hash = {
2438                         .halg.digestsize = MD5_DIGEST_SIZE,
2439                         .halg.statesize = sizeof(struct talitos_export_state),
2440                         .halg.base = {
2441                                 .cra_name = "md5",
2442                                 .cra_driver_name = "md5-talitos",
2443                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2444                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2445                                              CRYPTO_ALG_ASYNC,
2446                         }
2447                 },
2448                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2449                                      DESC_HDR_SEL0_MDEUA |
2450                                      DESC_HDR_MODE0_MDEU_MD5,
2451         },
2452         {       .type = CRYPTO_ALG_TYPE_AHASH,
2453                 .alg.hash = {
2454                         .halg.digestsize = SHA1_DIGEST_SIZE,
2455                         .halg.statesize = sizeof(struct talitos_export_state),
2456                         .halg.base = {
2457                                 .cra_name = "sha1",
2458                                 .cra_driver_name = "sha1-talitos",
2459                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2460                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2461                                              CRYPTO_ALG_ASYNC,
2462                         }
2463                 },
2464                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2465                                      DESC_HDR_SEL0_MDEUA |
2466                                      DESC_HDR_MODE0_MDEU_SHA1,
2467         },
2468         {       .type = CRYPTO_ALG_TYPE_AHASH,
2469                 .alg.hash = {
2470                         .halg.digestsize = SHA224_DIGEST_SIZE,
2471                         .halg.statesize = sizeof(struct talitos_export_state),
2472                         .halg.base = {
2473                                 .cra_name = "sha224",
2474                                 .cra_driver_name = "sha224-talitos",
2475                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2476                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2477                                              CRYPTO_ALG_ASYNC,
2478                         }
2479                 },
2480                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2481                                      DESC_HDR_SEL0_MDEUA |
2482                                      DESC_HDR_MODE0_MDEU_SHA224,
2483         },
2484         {       .type = CRYPTO_ALG_TYPE_AHASH,
2485                 .alg.hash = {
2486                         .halg.digestsize = SHA256_DIGEST_SIZE,
2487                         .halg.statesize = sizeof(struct talitos_export_state),
2488                         .halg.base = {
2489                                 .cra_name = "sha256",
2490                                 .cra_driver_name = "sha256-talitos",
2491                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2492                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2493                                              CRYPTO_ALG_ASYNC,
2494                         }
2495                 },
2496                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2497                                      DESC_HDR_SEL0_MDEUA |
2498                                      DESC_HDR_MODE0_MDEU_SHA256,
2499         },
2500         {       .type = CRYPTO_ALG_TYPE_AHASH,
2501                 .alg.hash = {
2502                         .halg.digestsize = SHA384_DIGEST_SIZE,
2503                         .halg.statesize = sizeof(struct talitos_export_state),
2504                         .halg.base = {
2505                                 .cra_name = "sha384",
2506                                 .cra_driver_name = "sha384-talitos",
2507                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2508                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2509                                              CRYPTO_ALG_ASYNC,
2510                         }
2511                 },
2512                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2513                                      DESC_HDR_SEL0_MDEUB |
2514                                      DESC_HDR_MODE0_MDEUB_SHA384,
2515         },
2516         {       .type = CRYPTO_ALG_TYPE_AHASH,
2517                 .alg.hash = {
2518                         .halg.digestsize = SHA512_DIGEST_SIZE,
2519                         .halg.statesize = sizeof(struct talitos_export_state),
2520                         .halg.base = {
2521                                 .cra_name = "sha512",
2522                                 .cra_driver_name = "sha512-talitos",
2523                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2524                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2525                                              CRYPTO_ALG_ASYNC,
2526                         }
2527                 },
2528                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2529                                      DESC_HDR_SEL0_MDEUB |
2530                                      DESC_HDR_MODE0_MDEUB_SHA512,
2531         },
2532         {       .type = CRYPTO_ALG_TYPE_AHASH,
2533                 .alg.hash = {
2534                         .halg.digestsize = MD5_DIGEST_SIZE,
2535                         .halg.statesize = sizeof(struct talitos_export_state),
2536                         .halg.base = {
2537                                 .cra_name = "hmac(md5)",
2538                                 .cra_driver_name = "hmac-md5-talitos",
2539                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2540                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2541                                              CRYPTO_ALG_ASYNC,
2542                         }
2543                 },
2544                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2545                                      DESC_HDR_SEL0_MDEUA |
2546                                      DESC_HDR_MODE0_MDEU_MD5,
2547         },
2548         {       .type = CRYPTO_ALG_TYPE_AHASH,
2549                 .alg.hash = {
2550                         .halg.digestsize = SHA1_DIGEST_SIZE,
2551                         .halg.statesize = sizeof(struct talitos_export_state),
2552                         .halg.base = {
2553                                 .cra_name = "hmac(sha1)",
2554                                 .cra_driver_name = "hmac-sha1-talitos",
2555                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2556                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2557                                              CRYPTO_ALG_ASYNC,
2558                         }
2559                 },
2560                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2561                                      DESC_HDR_SEL0_MDEUA |
2562                                      DESC_HDR_MODE0_MDEU_SHA1,
2563         },
2564         {       .type = CRYPTO_ALG_TYPE_AHASH,
2565                 .alg.hash = {
2566                         .halg.digestsize = SHA224_DIGEST_SIZE,
2567                         .halg.statesize = sizeof(struct talitos_export_state),
2568                         .halg.base = {
2569                                 .cra_name = "hmac(sha224)",
2570                                 .cra_driver_name = "hmac-sha224-talitos",
2571                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2572                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2573                                              CRYPTO_ALG_ASYNC,
2574                         }
2575                 },
2576                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2577                                      DESC_HDR_SEL0_MDEUA |
2578                                      DESC_HDR_MODE0_MDEU_SHA224,
2579         },
2580         {       .type = CRYPTO_ALG_TYPE_AHASH,
2581                 .alg.hash = {
2582                         .halg.digestsize = SHA256_DIGEST_SIZE,
2583                         .halg.statesize = sizeof(struct talitos_export_state),
2584                         .halg.base = {
2585                                 .cra_name = "hmac(sha256)",
2586                                 .cra_driver_name = "hmac-sha256-talitos",
2587                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2588                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2589                                              CRYPTO_ALG_ASYNC,
2590                         }
2591                 },
2592                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2593                                      DESC_HDR_SEL0_MDEUA |
2594                                      DESC_HDR_MODE0_MDEU_SHA256,
2595         },
2596         {       .type = CRYPTO_ALG_TYPE_AHASH,
2597                 .alg.hash = {
2598                         .halg.digestsize = SHA384_DIGEST_SIZE,
2599                         .halg.statesize = sizeof(struct talitos_export_state),
2600                         .halg.base = {
2601                                 .cra_name = "hmac(sha384)",
2602                                 .cra_driver_name = "hmac-sha384-talitos",
2603                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2604                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2605                                              CRYPTO_ALG_ASYNC,
2606                         }
2607                 },
2608                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2609                                      DESC_HDR_SEL0_MDEUB |
2610                                      DESC_HDR_MODE0_MDEUB_SHA384,
2611         },
2612         {       .type = CRYPTO_ALG_TYPE_AHASH,
2613                 .alg.hash = {
2614                         .halg.digestsize = SHA512_DIGEST_SIZE,
2615                         .halg.statesize = sizeof(struct talitos_export_state),
2616                         .halg.base = {
2617                                 .cra_name = "hmac(sha512)",
2618                                 .cra_driver_name = "hmac-sha512-talitos",
2619                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2620                                 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2621                                              CRYPTO_ALG_ASYNC,
2622                         }
2623                 },
2624                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2625                                      DESC_HDR_SEL0_MDEUB |
2626                                      DESC_HDR_MODE0_MDEUB_SHA512,
2627         }
2628 };
2629
2630 struct talitos_crypto_alg {
2631         struct list_head entry;
2632         struct device *dev;
2633         struct talitos_alg_template algt;
2634 };
2635
2636 static int talitos_init_common(struct talitos_ctx *ctx,
2637                                struct talitos_crypto_alg *talitos_alg)
2638 {
2639         struct talitos_private *priv;
2640
2641         /* update context with ptr to dev */
2642         ctx->dev = talitos_alg->dev;
2643
2644         /* assign SEC channel to tfm in round-robin fashion */
2645         priv = dev_get_drvdata(ctx->dev);
2646         ctx->ch = atomic_inc_return(&priv->last_chan) &
2647                   (priv->num_channels - 1);
2648
2649         /* copy descriptor header template value */
2650         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2651
2652         /* select done notification */
2653         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2654
2655         return 0;
2656 }
2657
2658 static int talitos_cra_init(struct crypto_tfm *tfm)
2659 {
2660         struct crypto_alg *alg = tfm->__crt_alg;
2661         struct talitos_crypto_alg *talitos_alg;
2662         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2663
2664         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2665                 talitos_alg = container_of(__crypto_ahash_alg(alg),
2666                                            struct talitos_crypto_alg,
2667                                            algt.alg.hash);
2668         else
2669                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2670                                            algt.alg.crypto);
2671
2672         return talitos_init_common(ctx, talitos_alg);
2673 }
2674
2675 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2676 {
2677         struct aead_alg *alg = crypto_aead_alg(tfm);
2678         struct talitos_crypto_alg *talitos_alg;
2679         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2680
2681         talitos_alg = container_of(alg, struct talitos_crypto_alg,
2682                                    algt.alg.aead);
2683
2684         return talitos_init_common(ctx, talitos_alg);
2685 }
2686
2687 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2688 {
2689         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2690
2691         talitos_cra_init(tfm);
2692
2693         ctx->keylen = 0;
2694         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2695                                  sizeof(struct talitos_ahash_req_ctx));
2696
2697         return 0;
2698 }
2699
2700 /*
2701  * given the alg's descriptor header template, determine whether descriptor
2702  * type and primary/secondary execution units required match the hw
2703  * capabilities description provided in the device tree node.
2704  */
2705 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2706 {
2707         struct talitos_private *priv = dev_get_drvdata(dev);
2708         int ret;
2709
2710         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2711               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2712
2713         if (SECONDARY_EU(desc_hdr_template))
2714                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2715                               & priv->exec_units);
2716
2717         return ret;
2718 }
2719
2720 static int talitos_remove(struct platform_device *ofdev)
2721 {
2722         struct device *dev = &ofdev->dev;
2723         struct talitos_private *priv = dev_get_drvdata(dev);
2724         struct talitos_crypto_alg *t_alg, *n;
2725         int i;
2726
2727         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2728                 switch (t_alg->algt.type) {
2729                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2730                         break;
2731                 case CRYPTO_ALG_TYPE_AEAD:
2732                         crypto_unregister_aead(&t_alg->algt.alg.aead);
2733                         break;
2734                 case CRYPTO_ALG_TYPE_AHASH:
2735                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
2736                         break;
2737                 }
2738                 list_del(&t_alg->entry);
2739                 kfree(t_alg);
2740         }
2741
2742         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2743                 talitos_unregister_rng(dev);
2744
2745         for (i = 0; priv->chan && i < priv->num_channels; i++)
2746                 kfree(priv->chan[i].fifo);
2747
2748         kfree(priv->chan);
2749
2750         for (i = 0; i < 2; i++)
2751                 if (priv->irq[i]) {
2752                         free_irq(priv->irq[i], dev);
2753                         irq_dispose_mapping(priv->irq[i]);
2754                 }
2755
2756         tasklet_kill(&priv->done_task[0]);
2757         if (priv->irq[1])
2758                 tasklet_kill(&priv->done_task[1]);
2759
2760         iounmap(priv->reg);
2761
2762         kfree(priv);
2763
2764         return 0;
2765 }
2766
2767 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2768                                                     struct talitos_alg_template
2769                                                            *template)
2770 {
2771         struct talitos_private *priv = dev_get_drvdata(dev);
2772         struct talitos_crypto_alg *t_alg;
2773         struct crypto_alg *alg;
2774
2775         t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2776         if (!t_alg)
2777                 return ERR_PTR(-ENOMEM);
2778
2779         t_alg->algt = *template;
2780
2781         switch (t_alg->algt.type) {
2782         case CRYPTO_ALG_TYPE_ABLKCIPHER:
2783                 alg = &t_alg->algt.alg.crypto;
2784                 alg->cra_init = talitos_cra_init;
2785                 alg->cra_type = &crypto_ablkcipher_type;
2786                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2787                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2788                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2789                 alg->cra_ablkcipher.geniv = "eseqiv";
2790                 break;
2791         case CRYPTO_ALG_TYPE_AEAD:
2792                 alg = &t_alg->algt.alg.aead.base;
2793                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2794                 t_alg->algt.alg.aead.setkey = aead_setkey;
2795                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2796                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2797                 break;
2798         case CRYPTO_ALG_TYPE_AHASH:
2799                 alg = &t_alg->algt.alg.hash.halg.base;
2800                 alg->cra_init = talitos_cra_init_ahash;
2801                 alg->cra_type = &crypto_ahash_type;
2802                 t_alg->algt.alg.hash.init = ahash_init;
2803                 t_alg->algt.alg.hash.update = ahash_update;
2804                 t_alg->algt.alg.hash.final = ahash_final;
2805                 t_alg->algt.alg.hash.finup = ahash_finup;
2806                 t_alg->algt.alg.hash.digest = ahash_digest;
2807                 if (!strncmp(alg->cra_name, "hmac", 4))
2808                         t_alg->algt.alg.hash.setkey = ahash_setkey;
2809                 t_alg->algt.alg.hash.import = ahash_import;
2810                 t_alg->algt.alg.hash.export = ahash_export;
2811
2812                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2813                     !strncmp(alg->cra_name, "hmac", 4)) {
2814                         kfree(t_alg);
2815                         return ERR_PTR(-ENOTSUPP);
2816                 }
2817                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2818                     (!strcmp(alg->cra_name, "sha224") ||
2819                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
2820                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2821                         t_alg->algt.desc_hdr_template =
2822                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2823                                         DESC_HDR_SEL0_MDEUA |
2824                                         DESC_HDR_MODE0_MDEU_SHA256;
2825                 }
2826                 break;
2827         default:
2828                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2829                 kfree(t_alg);
2830                 return ERR_PTR(-EINVAL);
2831         }
2832
2833         alg->cra_module = THIS_MODULE;
2834         alg->cra_priority = TALITOS_CRA_PRIORITY;
2835         alg->cra_alignmask = 0;
2836         alg->cra_ctxsize = sizeof(struct talitos_ctx);
2837         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2838
2839         t_alg->dev = dev;
2840
2841         return t_alg;
2842 }
2843
2844 static int talitos_probe_irq(struct platform_device *ofdev)
2845 {
2846         struct device *dev = &ofdev->dev;
2847         struct device_node *np = ofdev->dev.of_node;
2848         struct talitos_private *priv = dev_get_drvdata(dev);
2849         int err;
2850         bool is_sec1 = has_ftr_sec1(priv);
2851
2852         priv->irq[0] = irq_of_parse_and_map(np, 0);
2853         if (!priv->irq[0]) {
2854                 dev_err(dev, "failed to map irq\n");
2855                 return -EINVAL;
2856         }
2857         if (is_sec1) {
2858                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2859                                   dev_driver_string(dev), dev);
2860                 goto primary_out;
2861         }
2862
2863         priv->irq[1] = irq_of_parse_and_map(np, 1);
2864
2865         /* get the primary irq line */
2866         if (!priv->irq[1]) {
2867                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2868                                   dev_driver_string(dev), dev);
2869                 goto primary_out;
2870         }
2871
2872         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2873                           dev_driver_string(dev), dev);
2874         if (err)
2875                 goto primary_out;
2876
2877         /* get the secondary irq line */
2878         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2879                           dev_driver_string(dev), dev);
2880         if (err) {
2881                 dev_err(dev, "failed to request secondary irq\n");
2882                 irq_dispose_mapping(priv->irq[1]);
2883                 priv->irq[1] = 0;
2884         }
2885
2886         return err;
2887
2888 primary_out:
2889         if (err) {
2890                 dev_err(dev, "failed to request primary irq\n");
2891                 irq_dispose_mapping(priv->irq[0]);
2892                 priv->irq[0] = 0;
2893         }
2894
2895         return err;
2896 }
2897
2898 static int talitos_probe(struct platform_device *ofdev)
2899 {
2900         struct device *dev = &ofdev->dev;
2901         struct device_node *np = ofdev->dev.of_node;
2902         struct talitos_private *priv;
2903         const unsigned int *prop;
2904         int i, err;
2905         int stride;
2906
2907         priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2908         if (!priv)
2909                 return -ENOMEM;
2910
2911         INIT_LIST_HEAD(&priv->alg_list);
2912
2913         dev_set_drvdata(dev, priv);
2914
2915         priv->ofdev = ofdev;
2916
2917         spin_lock_init(&priv->reg_lock);
2918
2919         priv->reg = of_iomap(np, 0);
2920         if (!priv->reg) {
2921                 dev_err(dev, "failed to of_iomap\n");
2922                 err = -ENOMEM;
2923                 goto err_out;
2924         }
2925
2926         /* get SEC version capabilities from device tree */
2927         prop = of_get_property(np, "fsl,num-channels", NULL);
2928         if (prop)
2929                 priv->num_channels = *prop;
2930
2931         prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2932         if (prop)
2933                 priv->chfifo_len = *prop;
2934
2935         prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2936         if (prop)
2937                 priv->exec_units = *prop;
2938
2939         prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2940         if (prop)
2941                 priv->desc_types = *prop;
2942
2943         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2944             !priv->exec_units || !priv->desc_types) {
2945                 dev_err(dev, "invalid property data in device tree node\n");
2946                 err = -EINVAL;
2947                 goto err_out;
2948         }
2949
2950         if (of_device_is_compatible(np, "fsl,sec3.0"))
2951                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2952
2953         if (of_device_is_compatible(np, "fsl,sec2.1"))
2954                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2955                                   TALITOS_FTR_SHA224_HWINIT |
2956                                   TALITOS_FTR_HMAC_OK;
2957
2958         if (of_device_is_compatible(np, "fsl,sec1.0"))
2959                 priv->features |= TALITOS_FTR_SEC1;
2960
2961         if (of_device_is_compatible(np, "fsl,sec1.2")) {
2962                 priv->reg_deu = priv->reg + TALITOS12_DEU;
2963                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2964                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2965                 stride = TALITOS1_CH_STRIDE;
2966         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2967                 priv->reg_deu = priv->reg + TALITOS10_DEU;
2968                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2969                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2970                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2971                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2972                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2973                 stride = TALITOS1_CH_STRIDE;
2974         } else {
2975                 priv->reg_deu = priv->reg + TALITOS2_DEU;
2976                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2977                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2978                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2979                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2980                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2981                 priv->reg_keu = priv->reg + TALITOS2_KEU;
2982                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2983                 stride = TALITOS2_CH_STRIDE;
2984         }
2985
2986         err = talitos_probe_irq(ofdev);
2987         if (err)
2988                 goto err_out;
2989
2990         if (of_device_is_compatible(np, "fsl,sec1.0")) {
2991                 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2992                              (unsigned long)dev);
2993         } else {
2994                 if (!priv->irq[1]) {
2995                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2996                                      (unsigned long)dev);
2997                 } else {
2998                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2999                                      (unsigned long)dev);
3000                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3001                                      (unsigned long)dev);
3002                 }
3003         }
3004
3005         priv->chan = kzalloc(sizeof(struct talitos_channel) *
3006                              priv->num_channels, GFP_KERNEL);
3007         if (!priv->chan) {
3008                 dev_err(dev, "failed to allocate channel management space\n");
3009                 err = -ENOMEM;
3010                 goto err_out;
3011         }
3012
3013         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3014
3015         for (i = 0; i < priv->num_channels; i++) {
3016                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3017                 if (!priv->irq[1] || !(i & 1))
3018                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3019
3020                 spin_lock_init(&priv->chan[i].head_lock);
3021                 spin_lock_init(&priv->chan[i].tail_lock);
3022
3023                 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3024                                              priv->fifo_len, GFP_KERNEL);
3025                 if (!priv->chan[i].fifo) {
3026                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3027                         err = -ENOMEM;
3028                         goto err_out;
3029                 }
3030
3031                 atomic_set(&priv->chan[i].submit_count,
3032                            -(priv->chfifo_len - 1));
3033         }
3034
3035         dma_set_mask(dev, DMA_BIT_MASK(36));
3036
3037         /* reset and initialize the h/w */
3038         err = init_device(dev);
3039         if (err) {
3040                 dev_err(dev, "failed to initialize device\n");
3041                 goto err_out;
3042         }
3043
3044         /* register the RNG, if available */
3045         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3046                 err = talitos_register_rng(dev);
3047                 if (err) {
3048                         dev_err(dev, "failed to register hwrng: %d\n", err);
3049                         goto err_out;
3050                 } else
3051                         dev_info(dev, "hwrng\n");
3052         }
3053
3054         /* register crypto algorithms the device supports */
3055         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3056                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3057                         struct talitos_crypto_alg *t_alg;
3058                         struct crypto_alg *alg = NULL;
3059
3060                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3061                         if (IS_ERR(t_alg)) {
3062                                 err = PTR_ERR(t_alg);
3063                                 if (err == -ENOTSUPP)
3064                                         continue;
3065                                 goto err_out;
3066                         }
3067
3068                         switch (t_alg->algt.type) {
3069                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3070                                 err = crypto_register_alg(
3071                                                 &t_alg->algt.alg.crypto);
3072                                 alg = &t_alg->algt.alg.crypto;
3073                                 break;
3074
3075                         case CRYPTO_ALG_TYPE_AEAD:
3076                                 err = crypto_register_aead(
3077                                         &t_alg->algt.alg.aead);
3078                                 alg = &t_alg->algt.alg.aead.base;
3079                                 break;
3080
3081                         case CRYPTO_ALG_TYPE_AHASH:
3082                                 err = crypto_register_ahash(
3083                                                 &t_alg->algt.alg.hash);
3084                                 alg = &t_alg->algt.alg.hash.halg.base;
3085                                 break;
3086                         }
3087                         if (err) {
3088                                 dev_err(dev, "%s alg registration failed\n",
3089                                         alg->cra_driver_name);
3090                                 kfree(t_alg);
3091                         } else
3092                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3093                 }
3094         }
3095         if (!list_empty(&priv->alg_list))
3096                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3097                          (char *)of_get_property(np, "compatible", NULL));
3098
3099         return 0;
3100
3101 err_out:
3102         talitos_remove(ofdev);
3103
3104         return err;
3105 }
3106
3107 static const struct of_device_id talitos_match[] = {
3108 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3109         {
3110                 .compatible = "fsl,sec1.0",
3111         },
3112 #endif
3113 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3114         {
3115                 .compatible = "fsl,sec2.0",
3116         },
3117 #endif
3118         {},
3119 };
3120 MODULE_DEVICE_TABLE(of, talitos_match);
3121
3122 static struct platform_driver talitos_driver = {
3123         .driver = {
3124                 .name = "talitos",
3125                 .of_match_table = talitos_match,
3126         },
3127         .probe = talitos_probe,
3128         .remove = talitos_remove,
3129 };
3130
3131 module_platform_driver(talitos_driver);
3132
3133 MODULE_LICENSE("GPL");
3134 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3135 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");