2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
63 ptr->eptr = upper_32_bits(dma_addr);
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
69 dst_ptr->ptr = src_ptr->ptr;
71 dst_ptr->eptr = src_ptr->eptr;
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
79 ptr->len1 = cpu_to_be16(len);
81 ptr->len = cpu_to_be16(len);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
89 return be16_to_cpu(ptr->len1);
91 return be16_to_cpu(ptr->len);
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
103 static void map_single_talitos_ptr(struct device *dev,
104 struct talitos_ptr *ptr,
105 unsigned int len, void *data,
106 enum dma_data_direction dir)
108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109 struct talitos_private *priv = dev_get_drvdata(dev);
110 bool is_sec1 = has_ftr_sec1(priv);
112 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1);
118 * unmap bus single (contiguous) h/w descriptor pointer
120 static void unmap_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 enum dma_data_direction dir)
124 struct talitos_private *priv = dev_get_drvdata(dev);
125 bool is_sec1 = has_ftr_sec1(priv);
127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128 from_talitos_ptr_len(ptr, is_sec1), dir);
131 static int reset_channel(struct device *dev, int ch)
133 struct talitos_private *priv = dev_get_drvdata(dev);
134 unsigned int timeout = TALITOS_TIMEOUT;
135 bool is_sec1 = has_ftr_sec1(priv);
138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139 TALITOS1_CCCR_LO_RESET);
141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142 TALITOS1_CCCR_LO_RESET) && --timeout)
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146 TALITOS2_CCCR_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149 TALITOS2_CCCR_RESET) && --timeout)
154 dev_err(dev, "failed to reset channel %d\n", ch);
158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
162 /* and ICCR writeback, if available */
163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165 TALITOS_CCCR_LO_IWSE);
170 static int reset_device(struct device *dev)
172 struct talitos_private *priv = dev_get_drvdata(dev);
173 unsigned int timeout = TALITOS_TIMEOUT;
174 bool is_sec1 = has_ftr_sec1(priv);
175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185 setbits32(priv->reg + TALITOS_MCR, mcr);
189 dev_err(dev, "failed to reset device\n");
197 * Reset and initialize the device
199 static int init_device(struct device *dev)
201 struct talitos_private *priv = dev_get_drvdata(dev);
203 bool is_sec1 = has_ftr_sec1(priv);
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
211 err = reset_device(dev);
215 err = reset_device(dev);
220 for (ch = 0; ch < priv->num_channels; ch++) {
221 err = reset_channel(dev, ch);
226 /* enable channel done and error interrupts */
228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240 TALITOS_MDEUICR_LO_ICE);
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
248 * @ch: the SEC device channel to be used
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258 void (*callback)(struct device *dev,
259 struct talitos_desc *desc,
260 void *context, int error),
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request;
267 bool is_sec1 = has_ftr_sec1(priv);
269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272 /* h/w fifo is full */
273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
277 head = priv->chan[ch].head;
278 request = &priv->chan[ch].fifo[head];
280 /* map descriptor and save caller data */
282 desc->hdr1 = desc->hdr;
284 request->dma_desc = dma_map_single(dev, &desc->hdr1,
288 request->dma_desc = dma_map_single(dev, desc,
292 request->callback = callback;
293 request->context = context;
295 /* increment fifo head */
296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
299 request->desc = desc;
303 out_be32(priv->chan[ch].reg + TALITOS_FF,
304 upper_32_bits(request->dma_desc));
305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306 lower_32_bits(request->dma_desc));
308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
312 EXPORT_SYMBOL(talitos_submit);
315 * process what was done, notify callback of error if not
317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
319 struct talitos_private *priv = dev_get_drvdata(dev);
320 struct talitos_request *request, saved_req;
323 bool is_sec1 = has_ftr_sec1(priv);
325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
327 tail = priv->chan[ch].tail;
328 while (priv->chan[ch].fifo[tail].desc) {
331 request = &priv->chan[ch].fifo[tail];
333 /* descriptors with their done bits set don't get the error */
335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 dma_unmap_single(dev, request->dma_desc,
349 /* copy entries so we can call callback outside lock */
350 saved_req.desc = request->desc;
351 saved_req.callback = request->callback;
352 saved_req.context = request->context;
354 /* release request entry in fifo */
356 request->desc = NULL;
358 /* increment fifo tail */
359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
363 atomic_dec(&priv->chan[ch].submit_count);
365 saved_req.callback(dev, saved_req.desc, saved_req.context,
367 /* channel may resume processing in single desc error case */
368 if (error && !reset_ch && status == error)
370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371 tail = priv->chan[ch].tail;
374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
378 * process completed requests for channels that have done status
380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
381 static void talitos1_done_##name(unsigned long data) \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
410 static void talitos2_done_##name(unsigned long data) \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
414 unsigned long flags; \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
430 spin_lock_irqsave(&priv->reg_lock, flags); \
431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
441 * locate current (offending) descriptor
443 static u32 current_desc_hdr(struct device *dev, int ch)
445 struct talitos_private *priv = dev_get_drvdata(dev);
449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
457 tail = priv->chan[ch].tail;
460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461 iter = (iter + 1) & (priv->fifo_len - 1);
463 dev_err(dev, "couldn't locate current descriptor\n");
468 return priv->chan[ch].fifo[iter].desc->hdr;
472 * user diagnostics; report root cause of error based on execution unit status
474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
476 struct talitos_private *priv = dev_get_drvdata(dev);
480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
482 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483 case DESC_HDR_SEL0_AFEU:
484 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485 in_be32(priv->reg_afeu + TALITOS_EUISR),
486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
488 case DESC_HDR_SEL0_DEU:
489 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490 in_be32(priv->reg_deu + TALITOS_EUISR),
491 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
493 case DESC_HDR_SEL0_MDEUA:
494 case DESC_HDR_SEL0_MDEUB:
495 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496 in_be32(priv->reg_mdeu + TALITOS_EUISR),
497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
499 case DESC_HDR_SEL0_RNG:
500 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501 in_be32(priv->reg_rngu + TALITOS_ISR),
502 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
504 case DESC_HDR_SEL0_PKEU:
505 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506 in_be32(priv->reg_pkeu + TALITOS_EUISR),
507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
509 case DESC_HDR_SEL0_AESU:
510 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_aesu + TALITOS_EUISR),
512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
514 case DESC_HDR_SEL0_CRCU:
515 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_crcu + TALITOS_EUISR),
517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
519 case DESC_HDR_SEL0_KEU:
520 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
526 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527 case DESC_HDR_SEL1_MDEUA:
528 case DESC_HDR_SEL1_MDEUB:
529 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530 in_be32(priv->reg_mdeu + TALITOS_EUISR),
531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
533 case DESC_HDR_SEL1_CRCU:
534 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535 in_be32(priv->reg_crcu + TALITOS_EUISR),
536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
540 for (i = 0; i < 8; i++)
541 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
547 * recover from error interrupts
549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
551 struct talitos_private *priv = dev_get_drvdata(dev);
552 unsigned int timeout = TALITOS_TIMEOUT;
553 int ch, error, reset_dev = 0;
555 bool is_sec1 = has_ftr_sec1(priv);
556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
558 for (ch = 0; ch < priv->num_channels; ch++) {
559 /* skip channels without errors */
561 /* bits 29, 31, 17, 19 */
562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
565 if (!(isr & (1 << (ch * 2 + 1))))
571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
573 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574 dev_err(dev, "double fetch fifo overflow error\n");
578 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579 /* h/w dropped descriptor */
580 dev_err(dev, "single fetch fifo overflow error\n");
583 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584 dev_err(dev, "master data transfer error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
588 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589 dev_err(dev, is_sec1 ? "parity error\n"
590 : "fetch pointer zero error\n");
591 if (v_lo & TALITOS_CCPSR_LO_IDH)
592 dev_err(dev, "illegal descriptor header error\n");
593 if (v_lo & TALITOS_CCPSR_LO_IEU)
594 dev_err(dev, is_sec1 ? "static assignment error\n"
595 : "invalid exec unit error\n");
596 if (v_lo & TALITOS_CCPSR_LO_EU)
597 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
599 if (v_lo & TALITOS_CCPSR_LO_GB)
600 dev_err(dev, "gather boundary error\n");
601 if (v_lo & TALITOS_CCPSR_LO_GRL)
602 dev_err(dev, "gather return/length error\n");
603 if (v_lo & TALITOS_CCPSR_LO_SB)
604 dev_err(dev, "scatter boundary error\n");
605 if (v_lo & TALITOS_CCPSR_LO_SRL)
606 dev_err(dev, "scatter return/length error\n");
609 flush_channel(dev, ch, error, reset_ch);
612 reset_channel(dev, ch);
614 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618 TALITOS2_CCCR_CONT) && --timeout)
621 dev_err(dev, "failed to restart channel %d\n",
627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
633 dev_err(dev, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
636 /* purge request queues */
637 for (ch = 0; ch < priv->num_channels; ch++)
638 flush_channel(dev, ch, -EIO, 1);
640 /* reset and reinitialize the device */
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
651 unsigned long flags; \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
686 unsigned long flags; \
688 spin_lock_irqsave(&priv->reg_lock, flags); \
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
724 struct device *dev = (struct device *)rng->priv;
725 struct talitos_private *priv = dev_get_drvdata(dev);
729 for (i = 0; i < 20; i++) {
730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731 TALITOS_RNGUSR_LO_OFL;
740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
742 struct device *dev = (struct device *)rng->priv;
743 struct talitos_private *priv = dev_get_drvdata(dev);
745 /* rng fifo requires 64-bit accesses */
746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
752 static int talitos_rng_init(struct hwrng *rng)
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 unsigned int timeout = TALITOS_TIMEOUT;
758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760 & TALITOS_RNGUSR_LO_RD)
764 dev_err(dev, "failed to reset rng hw\n");
768 /* start generating */
769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
774 static int talitos_register_rng(struct device *dev)
776 struct talitos_private *priv = dev_get_drvdata(dev);
779 priv->rng.name = dev_driver_string(dev),
780 priv->rng.init = talitos_rng_init,
781 priv->rng.data_present = talitos_rng_data_present,
782 priv->rng.data_read = talitos_rng_data_read,
783 priv->rng.priv = (unsigned long)dev;
785 err = hwrng_register(&priv->rng);
787 priv->rng_registered = true;
792 static void talitos_unregister_rng(struct device *dev)
794 struct talitos_private *priv = dev_get_drvdata(dev);
796 if (!priv->rng_registered)
799 hwrng_unregister(&priv->rng);
800 priv->rng_registered = false;
806 #define TALITOS_CRA_PRIORITY 3000
807 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
813 __be32 desc_hdr_template;
814 u8 key[TALITOS_MAX_KEY_SIZE];
815 u8 iv[TALITOS_MAX_IV_LENGTH];
817 unsigned int enckeylen;
818 unsigned int authkeylen;
821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
824 struct talitos_ahash_req_ctx {
825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826 unsigned int hw_context_size;
827 u8 buf[HASH_MAX_BLOCK_SIZE];
828 u8 bufnext[HASH_MAX_BLOCK_SIZE];
832 unsigned int to_hash_later;
834 struct scatterlist bufsl[2];
835 struct scatterlist *psrc;
838 struct talitos_export_state {
839 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
840 u8 buf[HASH_MAX_BLOCK_SIZE];
844 unsigned int to_hash_later;
848 static int aead_setkey(struct crypto_aead *authenc,
849 const u8 *key, unsigned int keylen)
851 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
852 struct crypto_authenc_keys keys;
854 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
857 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
860 memcpy(ctx->key, keys.authkey, keys.authkeylen);
861 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
863 ctx->keylen = keys.authkeylen + keys.enckeylen;
864 ctx->enckeylen = keys.enckeylen;
865 ctx->authkeylen = keys.authkeylen;
870 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
875 * talitos_edesc - s/w-extended descriptor
876 * @src_nents: number of segments in input scatterlist
877 * @dst_nents: number of segments in output scatterlist
878 * @icv_ool: whether ICV is out-of-line
879 * @iv_dma: dma address of iv for checking continuity and link table
880 * @dma_len: length of dma mapped link_tbl space
881 * @dma_link_tbl: bus physical address of link_tbl/buf
882 * @desc: h/w descriptor
883 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
884 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
886 * if decrypting (with authcheck), or either one of src_nents or dst_nents
887 * is greater than 1, an integrity check value is concatenated to the end
890 struct talitos_edesc {
896 dma_addr_t dma_link_tbl;
897 struct talitos_desc desc;
899 struct talitos_ptr link_tbl[0];
904 static void talitos_sg_unmap(struct device *dev,
905 struct talitos_edesc *edesc,
906 struct scatterlist *src,
907 struct scatterlist *dst)
909 unsigned int src_nents = edesc->src_nents ? : 1;
910 unsigned int dst_nents = edesc->dst_nents ? : 1;
913 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
916 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
919 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
922 static void ipsec_esp_unmap(struct device *dev,
923 struct talitos_edesc *edesc,
924 struct aead_request *areq)
926 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
927 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
928 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
929 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
931 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
934 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
939 * ipsec_esp descriptor callbacks
941 static void ipsec_esp_encrypt_done(struct device *dev,
942 struct talitos_desc *desc, void *context,
945 struct aead_request *areq = context;
946 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
947 unsigned int authsize = crypto_aead_authsize(authenc);
948 struct talitos_edesc *edesc;
949 struct scatterlist *sg;
952 edesc = container_of(desc, struct talitos_edesc, desc);
954 ipsec_esp_unmap(dev, edesc, areq);
956 /* copy the generated ICV to dst */
957 if (edesc->icv_ool) {
958 icvdata = &edesc->link_tbl[edesc->src_nents +
959 edesc->dst_nents + 2];
960 sg = sg_last(areq->dst, edesc->dst_nents);
961 memcpy((char *)sg_virt(sg) + sg->length - authsize,
967 aead_request_complete(areq, err);
970 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
971 struct talitos_desc *desc,
972 void *context, int err)
974 struct aead_request *req = context;
975 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
976 unsigned int authsize = crypto_aead_authsize(authenc);
977 struct talitos_edesc *edesc;
978 struct scatterlist *sg;
981 edesc = container_of(desc, struct talitos_edesc, desc);
983 ipsec_esp_unmap(dev, edesc, req);
987 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
988 icv = (char *)sg_virt(sg) + sg->length - authsize;
990 if (edesc->dma_len) {
991 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
992 edesc->dst_nents + 2];
994 icv = oicv + authsize;
996 oicv = (char *)&edesc->link_tbl[0];
998 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1003 aead_request_complete(req, err);
1006 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1007 struct talitos_desc *desc,
1008 void *context, int err)
1010 struct aead_request *req = context;
1011 struct talitos_edesc *edesc;
1013 edesc = container_of(desc, struct talitos_edesc, desc);
1015 ipsec_esp_unmap(dev, edesc, req);
1017 /* check ICV auth status */
1018 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1019 DESC_HDR_LO_ICCR1_PASS))
1024 aead_request_complete(req, err);
1028 * convert scatterlist to SEC h/w link table format
1029 * stop at cryptlen bytes
1031 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1032 unsigned int offset, int cryptlen,
1033 struct talitos_ptr *link_tbl_ptr)
1035 int n_sg = sg_count;
1038 while (cryptlen && sg && n_sg--) {
1039 unsigned int len = sg_dma_len(sg);
1041 if (offset >= len) {
1051 to_talitos_ptr(link_tbl_ptr + count,
1052 sg_dma_address(sg) + offset, 0);
1053 link_tbl_ptr[count].len = cpu_to_be16(len);
1054 link_tbl_ptr[count].j_extent = 0;
1063 /* tag end of link table */
1065 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1070 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1072 struct talitos_ptr *link_tbl_ptr)
1074 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1079 * fill in and submit ipsec_esp descriptor
1081 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1082 void (*callback)(struct device *dev,
1083 struct talitos_desc *desc,
1084 void *context, int error))
1086 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1087 unsigned int authsize = crypto_aead_authsize(aead);
1088 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1089 struct device *dev = ctx->dev;
1090 struct talitos_desc *desc = &edesc->desc;
1091 unsigned int cryptlen = areq->cryptlen;
1092 unsigned int ivsize = crypto_aead_ivsize(aead);
1095 int sg_link_tbl_len;
1098 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1101 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1105 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1107 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1109 &edesc->link_tbl[tbl_off])) > 1) {
1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1111 sizeof(struct talitos_ptr), 0);
1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1115 edesc->dma_len, DMA_BIDIRECTIONAL);
1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1120 desc->ptr[1].j_extent = 0;
1124 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1125 desc->ptr[2].len = cpu_to_be16(ivsize);
1126 desc->ptr[2].j_extent = 0;
1129 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1130 (char *)&ctx->key + ctx->authkeylen,
1135 * map and adjust cipher len to aead request cryptlen.
1136 * extent is bytes of HMAC postpended to ciphertext,
1137 * typically 12 for ipsec
1139 desc->ptr[4].len = cpu_to_be16(cryptlen);
1140 desc->ptr[4].j_extent = authsize;
1142 sg_link_tbl_len = cryptlen;
1143 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1144 sg_link_tbl_len += authsize;
1146 if (sg_count == 1) {
1147 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1149 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1150 areq->assoclen, sg_link_tbl_len,
1151 &edesc->link_tbl[tbl_off])) >
1153 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1154 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1156 sizeof(struct talitos_ptr), 0);
1157 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1162 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1166 desc->ptr[5].len = cpu_to_be16(cryptlen);
1167 desc->ptr[5].j_extent = authsize;
1169 if (areq->src != areq->dst)
1170 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1173 edesc->icv_ool = false;
1175 if (sg_count == 1) {
1176 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1178 } else if ((sg_count =
1179 sg_to_link_tbl_offset(areq->dst, sg_count,
1180 areq->assoclen, cryptlen,
1181 &edesc->link_tbl[tbl_off])) > 1) {
1182 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1184 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1185 tbl_off * sizeof(struct talitos_ptr), 0);
1187 /* Add an entry to the link table for ICV data */
1188 tbl_ptr += sg_count - 1;
1189 tbl_ptr->j_extent = 0;
1191 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1192 tbl_ptr->len = cpu_to_be16(authsize);
1194 /* icv data follows link tables */
1195 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1196 (edesc->src_nents + edesc->dst_nents +
1197 2) * sizeof(struct talitos_ptr) +
1199 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1200 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1201 edesc->dma_len, DMA_BIDIRECTIONAL);
1203 edesc->icv_ool = true;
1205 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1209 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1212 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1213 if (ret != -EINPROGRESS) {
1214 ipsec_esp_unmap(dev, edesc, areq);
1221 * allocate and map the extended descriptor
1223 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1224 struct scatterlist *src,
1225 struct scatterlist *dst,
1227 unsigned int assoclen,
1228 unsigned int cryptlen,
1229 unsigned int authsize,
1230 unsigned int ivsize,
1235 struct talitos_edesc *edesc;
1236 int src_nents, dst_nents, alloc_len, dma_len;
1237 dma_addr_t iv_dma = 0;
1238 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1240 struct talitos_private *priv = dev_get_drvdata(dev);
1241 bool is_sec1 = has_ftr_sec1(priv);
1242 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1244 if (cryptlen + authsize > max_len) {
1245 dev_err(dev, "length exceeds h/w max limit\n");
1246 return ERR_PTR(-EINVAL);
1250 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1252 if (!dst || dst == src) {
1253 src_nents = sg_nents_for_len(src,
1254 assoclen + cryptlen + authsize);
1255 src_nents = (src_nents == 1) ? 0 : src_nents;
1256 dst_nents = dst ? src_nents : 0;
1257 } else { /* dst && dst != src*/
1258 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1259 (encrypt ? 0 : authsize));
1260 src_nents = (src_nents == 1) ? 0 : src_nents;
1261 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1262 (encrypt ? authsize : 0));
1263 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1267 * allocate space for base edesc plus the link tables,
1268 * allowing for two separate entries for AD and generated ICV (+ 2),
1269 * and space for two sets of ICVs (stashed and generated)
1271 alloc_len = sizeof(struct talitos_edesc);
1272 if (src_nents || dst_nents) {
1274 dma_len = (src_nents ? cryptlen : 0) +
1275 (dst_nents ? cryptlen : 0);
1277 dma_len = (src_nents + dst_nents + 2) *
1278 sizeof(struct talitos_ptr) + authsize * 2;
1279 alloc_len += dma_len;
1282 alloc_len += icv_stashing ? authsize : 0;
1285 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1288 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1290 dev_err(dev, "could not allocate edescriptor\n");
1291 return ERR_PTR(-ENOMEM);
1294 edesc->src_nents = src_nents;
1295 edesc->dst_nents = dst_nents;
1296 edesc->iv_dma = iv_dma;
1297 edesc->dma_len = dma_len;
1299 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1306 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1307 int icv_stashing, bool encrypt)
1309 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1310 unsigned int authsize = crypto_aead_authsize(authenc);
1311 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1312 unsigned int ivsize = crypto_aead_ivsize(authenc);
1314 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1315 iv, areq->assoclen, areq->cryptlen,
1316 authsize, ivsize, icv_stashing,
1317 areq->base.flags, encrypt);
1320 static int aead_encrypt(struct aead_request *req)
1322 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1323 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1324 struct talitos_edesc *edesc;
1326 /* allocate extended descriptor */
1327 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1329 return PTR_ERR(edesc);
1332 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1334 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1337 static int aead_decrypt(struct aead_request *req)
1339 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1340 unsigned int authsize = crypto_aead_authsize(authenc);
1341 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1342 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1343 struct talitos_edesc *edesc;
1344 struct scatterlist *sg;
1347 req->cryptlen -= authsize;
1349 /* allocate extended descriptor */
1350 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1352 return PTR_ERR(edesc);
1354 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1355 ((!edesc->src_nents && !edesc->dst_nents) ||
1356 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1358 /* decrypt and check the ICV */
1359 edesc->desc.hdr = ctx->desc_hdr_template |
1360 DESC_HDR_DIR_INBOUND |
1361 DESC_HDR_MODE1_MDEU_CICV;
1363 /* reset integrity check result bits */
1364 edesc->desc.hdr_lo = 0;
1366 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1369 /* Have to check the ICV with software */
1370 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1372 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1374 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1375 edesc->dst_nents + 2];
1377 icvdata = &edesc->link_tbl[0];
1379 sg = sg_last(req->src, edesc->src_nents ? : 1);
1381 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1383 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1386 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1387 const u8 *key, unsigned int keylen)
1389 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1391 if (keylen > TALITOS_MAX_KEY_SIZE) {
1392 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1396 memcpy(&ctx->key, key, keylen);
1397 ctx->keylen = keylen;
1402 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1403 struct scatterlist *dst, unsigned int len,
1404 struct talitos_edesc *edesc)
1406 struct talitos_private *priv = dev_get_drvdata(dev);
1407 bool is_sec1 = has_ftr_sec1(priv);
1410 if (!edesc->src_nents) {
1411 dma_unmap_sg(dev, src, 1,
1412 dst != src ? DMA_TO_DEVICE
1413 : DMA_BIDIRECTIONAL);
1415 if (dst && edesc->dst_nents) {
1416 dma_sync_single_for_device(dev,
1417 edesc->dma_link_tbl + len,
1418 len, DMA_FROM_DEVICE);
1419 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1420 edesc->buf + len, len);
1421 } else if (dst && dst != src) {
1422 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1425 talitos_sg_unmap(dev, edesc, src, dst);
1429 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1430 const u8 *key, unsigned int keylen)
1432 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1433 keylen == AES_KEYSIZE_256)
1434 return ablkcipher_setkey(cipher, key, keylen);
1436 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1441 static void common_nonsnoop_unmap(struct device *dev,
1442 struct talitos_edesc *edesc,
1443 struct ablkcipher_request *areq)
1445 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1447 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1448 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1449 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1452 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1456 static void ablkcipher_done(struct device *dev,
1457 struct talitos_desc *desc, void *context,
1460 struct ablkcipher_request *areq = context;
1461 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1462 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1463 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1464 struct talitos_edesc *edesc;
1466 edesc = container_of(desc, struct talitos_edesc, desc);
1468 common_nonsnoop_unmap(dev, edesc, areq);
1469 memcpy(areq->info, ctx->iv, ivsize);
1473 areq->base.complete(&areq->base, err);
1476 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1477 unsigned int len, struct talitos_edesc *edesc,
1478 enum dma_data_direction dir, struct talitos_ptr *ptr)
1481 struct talitos_private *priv = dev_get_drvdata(dev);
1482 bool is_sec1 = has_ftr_sec1(priv);
1484 to_talitos_ptr_len(ptr, len, is_sec1);
1487 sg_count = edesc->src_nents ? : 1;
1489 if (sg_count == 1) {
1490 dma_map_sg(dev, src, 1, dir);
1491 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1493 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1494 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1495 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1496 len, DMA_TO_DEVICE);
1499 to_talitos_ptr_extent_clear(ptr, is_sec1);
1501 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1503 if (sg_count == 1) {
1504 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1506 sg_count = sg_to_link_tbl(src, sg_count, len,
1507 &edesc->link_tbl[0]);
1509 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1510 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1511 dma_sync_single_for_device(dev,
1512 edesc->dma_link_tbl,
1516 /* Only one segment now, so no link tbl needed*/
1517 to_talitos_ptr(ptr, sg_dma_address(src),
1525 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1526 unsigned int len, struct talitos_edesc *edesc,
1527 enum dma_data_direction dir,
1528 struct talitos_ptr *ptr, int sg_count)
1530 struct talitos_private *priv = dev_get_drvdata(dev);
1531 bool is_sec1 = has_ftr_sec1(priv);
1533 if (dir != DMA_NONE)
1534 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1536 to_talitos_ptr_len(ptr, len, is_sec1);
1539 if (sg_count == 1) {
1540 if (dir != DMA_NONE)
1541 dma_map_sg(dev, dst, 1, dir);
1542 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1544 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1545 dma_sync_single_for_device(dev,
1546 edesc->dma_link_tbl + len,
1547 len, DMA_FROM_DEVICE);
1550 to_talitos_ptr_extent_clear(ptr, is_sec1);
1552 if (sg_count == 1) {
1553 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1555 struct talitos_ptr *link_tbl_ptr =
1556 &edesc->link_tbl[edesc->src_nents + 1];
1558 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1559 (edesc->src_nents + 1) *
1560 sizeof(struct talitos_ptr), 0);
1561 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1562 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1563 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1570 static int common_nonsnoop(struct talitos_edesc *edesc,
1571 struct ablkcipher_request *areq,
1572 void (*callback) (struct device *dev,
1573 struct talitos_desc *desc,
1574 void *context, int error))
1576 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1577 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1578 struct device *dev = ctx->dev;
1579 struct talitos_desc *desc = &edesc->desc;
1580 unsigned int cryptlen = areq->nbytes;
1581 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1583 struct talitos_private *priv = dev_get_drvdata(dev);
1584 bool is_sec1 = has_ftr_sec1(priv);
1586 /* first DWORD empty */
1587 desc->ptr[0] = zero_entry;
1590 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1591 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1592 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1595 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1596 (char *)&ctx->key, DMA_TO_DEVICE);
1601 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1602 (areq->src == areq->dst) ?
1603 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1607 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1608 (areq->src == areq->dst) ? DMA_NONE
1610 &desc->ptr[4], sg_count);
1613 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1616 /* last DWORD empty */
1617 desc->ptr[6] = zero_entry;
1619 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1620 if (ret != -EINPROGRESS) {
1621 common_nonsnoop_unmap(dev, edesc, areq);
1627 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1630 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1631 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1632 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1634 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1635 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1636 areq->base.flags, encrypt);
1639 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1641 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1642 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1643 struct talitos_edesc *edesc;
1644 unsigned int blocksize =
1645 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1650 if (areq->nbytes % blocksize)
1653 /* allocate extended descriptor */
1654 edesc = ablkcipher_edesc_alloc(areq, true);
1656 return PTR_ERR(edesc);
1659 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1661 return common_nonsnoop(edesc, areq, ablkcipher_done);
1664 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1666 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1667 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1668 struct talitos_edesc *edesc;
1669 unsigned int blocksize =
1670 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1675 if (areq->nbytes % blocksize)
1678 /* allocate extended descriptor */
1679 edesc = ablkcipher_edesc_alloc(areq, false);
1681 return PTR_ERR(edesc);
1683 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1685 return common_nonsnoop(edesc, areq, ablkcipher_done);
1688 static void common_nonsnoop_hash_unmap(struct device *dev,
1689 struct talitos_edesc *edesc,
1690 struct ahash_request *areq)
1692 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1693 struct talitos_private *priv = dev_get_drvdata(dev);
1694 bool is_sec1 = has_ftr_sec1(priv);
1696 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1698 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1700 /* When using hashctx-in, must unmap it. */
1701 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1702 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1705 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1706 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1710 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1715 static void ahash_done(struct device *dev,
1716 struct talitos_desc *desc, void *context,
1719 struct ahash_request *areq = context;
1720 struct talitos_edesc *edesc =
1721 container_of(desc, struct talitos_edesc, desc);
1722 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1724 if (!req_ctx->last && req_ctx->to_hash_later) {
1725 /* Position any partial block for next update/final/finup */
1726 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1727 req_ctx->nbuf = req_ctx->to_hash_later;
1729 common_nonsnoop_hash_unmap(dev, edesc, areq);
1733 areq->base.complete(&areq->base, err);
1737 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1738 * ourself and submit a padded block
1740 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1741 struct talitos_edesc *edesc,
1742 struct talitos_ptr *ptr)
1744 static u8 padded_hash[64] = {
1745 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1746 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1747 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1748 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1751 pr_err_once("Bug in SEC1, padding ourself\n");
1752 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1753 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1754 (char *)padded_hash, DMA_TO_DEVICE);
1757 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1758 struct ahash_request *areq, unsigned int length,
1759 void (*callback) (struct device *dev,
1760 struct talitos_desc *desc,
1761 void *context, int error))
1763 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1764 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1765 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1766 struct device *dev = ctx->dev;
1767 struct talitos_desc *desc = &edesc->desc;
1769 struct talitos_private *priv = dev_get_drvdata(dev);
1770 bool is_sec1 = has_ftr_sec1(priv);
1772 /* first DWORD empty */
1773 desc->ptr[0] = zero_entry;
1775 /* hash context in */
1776 if (!req_ctx->first || req_ctx->swinit) {
1777 map_single_talitos_ptr(dev, &desc->ptr[1],
1778 req_ctx->hw_context_size,
1779 (char *)req_ctx->hw_context,
1781 req_ctx->swinit = 0;
1783 desc->ptr[1] = zero_entry;
1785 /* Indicate next op is not the first. */
1790 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1791 (char *)&ctx->key, DMA_TO_DEVICE);
1793 desc->ptr[2] = zero_entry;
1798 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1799 DMA_TO_DEVICE, &desc->ptr[3]);
1801 /* fifth DWORD empty */
1802 desc->ptr[4] = zero_entry;
1804 /* hash/HMAC out -or- hash context out */
1806 map_single_talitos_ptr(dev, &desc->ptr[5],
1807 crypto_ahash_digestsize(tfm),
1808 areq->result, DMA_FROM_DEVICE);
1810 map_single_talitos_ptr(dev, &desc->ptr[5],
1811 req_ctx->hw_context_size,
1812 req_ctx->hw_context, DMA_FROM_DEVICE);
1814 /* last DWORD empty */
1815 desc->ptr[6] = zero_entry;
1817 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1818 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1820 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1821 if (ret != -EINPROGRESS) {
1822 common_nonsnoop_hash_unmap(dev, edesc, areq);
1828 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1829 unsigned int nbytes)
1831 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1832 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1833 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1835 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1836 nbytes, 0, 0, 0, areq->base.flags, false);
1839 static int ahash_init(struct ahash_request *areq)
1841 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1842 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1844 /* Initialize the context */
1846 req_ctx->first = 1; /* first indicates h/w must init its context */
1847 req_ctx->swinit = 0; /* assume h/w init of context */
1848 req_ctx->hw_context_size =
1849 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1850 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1851 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1857 * on h/w without explicit sha224 support, we initialize h/w context
1858 * manually with sha224 constants, and tell it to run sha256.
1860 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1862 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1865 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1867 req_ctx->hw_context[0] = SHA224_H0;
1868 req_ctx->hw_context[1] = SHA224_H1;
1869 req_ctx->hw_context[2] = SHA224_H2;
1870 req_ctx->hw_context[3] = SHA224_H3;
1871 req_ctx->hw_context[4] = SHA224_H4;
1872 req_ctx->hw_context[5] = SHA224_H5;
1873 req_ctx->hw_context[6] = SHA224_H6;
1874 req_ctx->hw_context[7] = SHA224_H7;
1876 /* init 64-bit count */
1877 req_ctx->hw_context[8] = 0;
1878 req_ctx->hw_context[9] = 0;
1883 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1885 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1886 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1887 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1888 struct talitos_edesc *edesc;
1889 unsigned int blocksize =
1890 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1891 unsigned int nbytes_to_hash;
1892 unsigned int to_hash_later;
1895 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1896 /* Buffer up to one whole block */
1897 sg_copy_to_buffer(areq->src,
1898 sg_nents_for_len(areq->src, nbytes),
1899 req_ctx->buf + req_ctx->nbuf, nbytes);
1900 req_ctx->nbuf += nbytes;
1904 /* At least (blocksize + 1) bytes are available to hash */
1905 nbytes_to_hash = nbytes + req_ctx->nbuf;
1906 to_hash_later = nbytes_to_hash & (blocksize - 1);
1910 else if (to_hash_later)
1911 /* There is a partial block. Hash the full block(s) now */
1912 nbytes_to_hash -= to_hash_later;
1914 /* Keep one block buffered */
1915 nbytes_to_hash -= blocksize;
1916 to_hash_later = blocksize;
1919 /* Chain in any previously buffered data */
1920 if (req_ctx->nbuf) {
1921 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1922 sg_init_table(req_ctx->bufsl, nsg);
1923 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1925 sg_chain(req_ctx->bufsl, 2, areq->src);
1926 req_ctx->psrc = req_ctx->bufsl;
1928 req_ctx->psrc = areq->src;
1930 if (to_hash_later) {
1931 int nents = sg_nents_for_len(areq->src, nbytes);
1932 sg_pcopy_to_buffer(areq->src, nents,
1935 nbytes - to_hash_later);
1937 req_ctx->to_hash_later = to_hash_later;
1939 /* Allocate extended descriptor */
1940 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1942 return PTR_ERR(edesc);
1944 edesc->desc.hdr = ctx->desc_hdr_template;
1946 /* On last one, request SEC to pad; otherwise continue */
1948 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1950 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1952 /* request SEC to INIT hash. */
1953 if (req_ctx->first && !req_ctx->swinit)
1954 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1956 /* When the tfm context has a keylen, it's an HMAC.
1957 * A first or last (ie. not middle) descriptor must request HMAC.
1959 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1960 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1962 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1966 static int ahash_update(struct ahash_request *areq)
1968 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1972 return ahash_process_req(areq, areq->nbytes);
1975 static int ahash_final(struct ahash_request *areq)
1977 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981 return ahash_process_req(areq, 0);
1984 static int ahash_finup(struct ahash_request *areq)
1986 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1990 return ahash_process_req(areq, areq->nbytes);
1993 static int ahash_digest(struct ahash_request *areq)
1995 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1996 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2001 return ahash_process_req(areq, areq->nbytes);
2004 static int ahash_export(struct ahash_request *areq, void *out)
2006 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2007 struct talitos_export_state *export = out;
2009 memcpy(export->hw_context, req_ctx->hw_context,
2010 req_ctx->hw_context_size);
2011 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2012 export->swinit = req_ctx->swinit;
2013 export->first = req_ctx->first;
2014 export->last = req_ctx->last;
2015 export->to_hash_later = req_ctx->to_hash_later;
2016 export->nbuf = req_ctx->nbuf;
2021 static int ahash_import(struct ahash_request *areq, const void *in)
2023 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2024 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2025 const struct talitos_export_state *export = in;
2027 memset(req_ctx, 0, sizeof(*req_ctx));
2028 req_ctx->hw_context_size =
2029 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2030 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2031 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2032 memcpy(req_ctx->hw_context, export->hw_context,
2033 req_ctx->hw_context_size);
2034 memcpy(req_ctx->buf, export->buf, export->nbuf);
2035 req_ctx->swinit = export->swinit;
2036 req_ctx->first = export->first;
2037 req_ctx->last = export->last;
2038 req_ctx->to_hash_later = export->to_hash_later;
2039 req_ctx->nbuf = export->nbuf;
2044 struct keyhash_result {
2045 struct completion completion;
2049 static void keyhash_complete(struct crypto_async_request *req, int err)
2051 struct keyhash_result *res = req->data;
2053 if (err == -EINPROGRESS)
2057 complete(&res->completion);
2060 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2063 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2065 struct scatterlist sg[1];
2066 struct ahash_request *req;
2067 struct keyhash_result hresult;
2070 init_completion(&hresult.completion);
2072 req = ahash_request_alloc(tfm, GFP_KERNEL);
2076 /* Keep tfm keylen == 0 during hash of the long key */
2078 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2079 keyhash_complete, &hresult);
2081 sg_init_one(&sg[0], key, keylen);
2083 ahash_request_set_crypt(req, sg, hash, keylen);
2084 ret = crypto_ahash_digest(req);
2090 ret = wait_for_completion_interruptible(
2091 &hresult.completion);
2098 ahash_request_free(req);
2103 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2104 unsigned int keylen)
2106 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2107 unsigned int blocksize =
2108 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2109 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2110 unsigned int keysize = keylen;
2111 u8 hash[SHA512_DIGEST_SIZE];
2114 if (keylen <= blocksize)
2115 memcpy(ctx->key, key, keysize);
2117 /* Must get the hash of the long key */
2118 ret = keyhash(tfm, key, keylen, hash);
2121 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2125 keysize = digestsize;
2126 memcpy(ctx->key, hash, digestsize);
2129 ctx->keylen = keysize;
2135 struct talitos_alg_template {
2138 struct crypto_alg crypto;
2139 struct ahash_alg hash;
2140 struct aead_alg aead;
2142 __be32 desc_hdr_template;
2145 static struct talitos_alg_template driver_algs[] = {
2146 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2147 { .type = CRYPTO_ALG_TYPE_AEAD,
2150 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2151 .cra_driver_name = "authenc-hmac-sha1-"
2153 .cra_blocksize = AES_BLOCK_SIZE,
2154 .cra_flags = CRYPTO_ALG_ASYNC,
2156 .ivsize = AES_BLOCK_SIZE,
2157 .maxauthsize = SHA1_DIGEST_SIZE,
2159 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2160 DESC_HDR_SEL0_AESU |
2161 DESC_HDR_MODE0_AESU_CBC |
2162 DESC_HDR_SEL1_MDEUA |
2163 DESC_HDR_MODE1_MDEU_INIT |
2164 DESC_HDR_MODE1_MDEU_PAD |
2165 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2167 { .type = CRYPTO_ALG_TYPE_AEAD,
2170 .cra_name = "authenc(hmac(sha1),"
2172 .cra_driver_name = "authenc-hmac-sha1-"
2174 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2175 .cra_flags = CRYPTO_ALG_ASYNC,
2177 .ivsize = DES3_EDE_BLOCK_SIZE,
2178 .maxauthsize = SHA1_DIGEST_SIZE,
2180 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2182 DESC_HDR_MODE0_DEU_CBC |
2183 DESC_HDR_MODE0_DEU_3DES |
2184 DESC_HDR_SEL1_MDEUA |
2185 DESC_HDR_MODE1_MDEU_INIT |
2186 DESC_HDR_MODE1_MDEU_PAD |
2187 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2189 { .type = CRYPTO_ALG_TYPE_AEAD,
2192 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2193 .cra_driver_name = "authenc-hmac-sha224-"
2195 .cra_blocksize = AES_BLOCK_SIZE,
2196 .cra_flags = CRYPTO_ALG_ASYNC,
2198 .ivsize = AES_BLOCK_SIZE,
2199 .maxauthsize = SHA224_DIGEST_SIZE,
2201 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2202 DESC_HDR_SEL0_AESU |
2203 DESC_HDR_MODE0_AESU_CBC |
2204 DESC_HDR_SEL1_MDEUA |
2205 DESC_HDR_MODE1_MDEU_INIT |
2206 DESC_HDR_MODE1_MDEU_PAD |
2207 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2209 { .type = CRYPTO_ALG_TYPE_AEAD,
2212 .cra_name = "authenc(hmac(sha224),"
2214 .cra_driver_name = "authenc-hmac-sha224-"
2216 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2217 .cra_flags = CRYPTO_ALG_ASYNC,
2219 .ivsize = DES3_EDE_BLOCK_SIZE,
2220 .maxauthsize = SHA224_DIGEST_SIZE,
2222 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2224 DESC_HDR_MODE0_DEU_CBC |
2225 DESC_HDR_MODE0_DEU_3DES |
2226 DESC_HDR_SEL1_MDEUA |
2227 DESC_HDR_MODE1_MDEU_INIT |
2228 DESC_HDR_MODE1_MDEU_PAD |
2229 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2231 { .type = CRYPTO_ALG_TYPE_AEAD,
2234 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2235 .cra_driver_name = "authenc-hmac-sha256-"
2237 .cra_blocksize = AES_BLOCK_SIZE,
2238 .cra_flags = CRYPTO_ALG_ASYNC,
2240 .ivsize = AES_BLOCK_SIZE,
2241 .maxauthsize = SHA256_DIGEST_SIZE,
2243 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2244 DESC_HDR_SEL0_AESU |
2245 DESC_HDR_MODE0_AESU_CBC |
2246 DESC_HDR_SEL1_MDEUA |
2247 DESC_HDR_MODE1_MDEU_INIT |
2248 DESC_HDR_MODE1_MDEU_PAD |
2249 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2251 { .type = CRYPTO_ALG_TYPE_AEAD,
2254 .cra_name = "authenc(hmac(sha256),"
2256 .cra_driver_name = "authenc-hmac-sha256-"
2258 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2259 .cra_flags = CRYPTO_ALG_ASYNC,
2261 .ivsize = DES3_EDE_BLOCK_SIZE,
2262 .maxauthsize = SHA256_DIGEST_SIZE,
2264 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2266 DESC_HDR_MODE0_DEU_CBC |
2267 DESC_HDR_MODE0_DEU_3DES |
2268 DESC_HDR_SEL1_MDEUA |
2269 DESC_HDR_MODE1_MDEU_INIT |
2270 DESC_HDR_MODE1_MDEU_PAD |
2271 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2273 { .type = CRYPTO_ALG_TYPE_AEAD,
2276 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2277 .cra_driver_name = "authenc-hmac-sha384-"
2279 .cra_blocksize = AES_BLOCK_SIZE,
2280 .cra_flags = CRYPTO_ALG_ASYNC,
2282 .ivsize = AES_BLOCK_SIZE,
2283 .maxauthsize = SHA384_DIGEST_SIZE,
2285 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2286 DESC_HDR_SEL0_AESU |
2287 DESC_HDR_MODE0_AESU_CBC |
2288 DESC_HDR_SEL1_MDEUB |
2289 DESC_HDR_MODE1_MDEU_INIT |
2290 DESC_HDR_MODE1_MDEU_PAD |
2291 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2293 { .type = CRYPTO_ALG_TYPE_AEAD,
2296 .cra_name = "authenc(hmac(sha384),"
2298 .cra_driver_name = "authenc-hmac-sha384-"
2300 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2301 .cra_flags = CRYPTO_ALG_ASYNC,
2303 .ivsize = DES3_EDE_BLOCK_SIZE,
2304 .maxauthsize = SHA384_DIGEST_SIZE,
2306 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2308 DESC_HDR_MODE0_DEU_CBC |
2309 DESC_HDR_MODE0_DEU_3DES |
2310 DESC_HDR_SEL1_MDEUB |
2311 DESC_HDR_MODE1_MDEU_INIT |
2312 DESC_HDR_MODE1_MDEU_PAD |
2313 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2315 { .type = CRYPTO_ALG_TYPE_AEAD,
2318 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2319 .cra_driver_name = "authenc-hmac-sha512-"
2321 .cra_blocksize = AES_BLOCK_SIZE,
2322 .cra_flags = CRYPTO_ALG_ASYNC,
2324 .ivsize = AES_BLOCK_SIZE,
2325 .maxauthsize = SHA512_DIGEST_SIZE,
2327 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2328 DESC_HDR_SEL0_AESU |
2329 DESC_HDR_MODE0_AESU_CBC |
2330 DESC_HDR_SEL1_MDEUB |
2331 DESC_HDR_MODE1_MDEU_INIT |
2332 DESC_HDR_MODE1_MDEU_PAD |
2333 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2335 { .type = CRYPTO_ALG_TYPE_AEAD,
2338 .cra_name = "authenc(hmac(sha512),"
2340 .cra_driver_name = "authenc-hmac-sha512-"
2342 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2343 .cra_flags = CRYPTO_ALG_ASYNC,
2345 .ivsize = DES3_EDE_BLOCK_SIZE,
2346 .maxauthsize = SHA512_DIGEST_SIZE,
2348 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2350 DESC_HDR_MODE0_DEU_CBC |
2351 DESC_HDR_MODE0_DEU_3DES |
2352 DESC_HDR_SEL1_MDEUB |
2353 DESC_HDR_MODE1_MDEU_INIT |
2354 DESC_HDR_MODE1_MDEU_PAD |
2355 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2357 { .type = CRYPTO_ALG_TYPE_AEAD,
2360 .cra_name = "authenc(hmac(md5),cbc(aes))",
2361 .cra_driver_name = "authenc-hmac-md5-"
2363 .cra_blocksize = AES_BLOCK_SIZE,
2364 .cra_flags = CRYPTO_ALG_ASYNC,
2366 .ivsize = AES_BLOCK_SIZE,
2367 .maxauthsize = MD5_DIGEST_SIZE,
2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 DESC_HDR_SEL0_AESU |
2371 DESC_HDR_MODE0_AESU_CBC |
2372 DESC_HDR_SEL1_MDEUA |
2373 DESC_HDR_MODE1_MDEU_INIT |
2374 DESC_HDR_MODE1_MDEU_PAD |
2375 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2377 { .type = CRYPTO_ALG_TYPE_AEAD,
2380 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2381 .cra_driver_name = "authenc-hmac-md5-"
2383 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2384 .cra_flags = CRYPTO_ALG_ASYNC,
2386 .ivsize = DES3_EDE_BLOCK_SIZE,
2387 .maxauthsize = MD5_DIGEST_SIZE,
2389 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2391 DESC_HDR_MODE0_DEU_CBC |
2392 DESC_HDR_MODE0_DEU_3DES |
2393 DESC_HDR_SEL1_MDEUA |
2394 DESC_HDR_MODE1_MDEU_INIT |
2395 DESC_HDR_MODE1_MDEU_PAD |
2396 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2398 /* ABLKCIPHER algorithms. */
2399 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2401 .cra_name = "cbc(aes)",
2402 .cra_driver_name = "cbc-aes-talitos",
2403 .cra_blocksize = AES_BLOCK_SIZE,
2404 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2407 .min_keysize = AES_MIN_KEY_SIZE,
2408 .max_keysize = AES_MAX_KEY_SIZE,
2409 .ivsize = AES_BLOCK_SIZE,
2410 .setkey = ablkcipher_aes_setkey,
2413 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2414 DESC_HDR_SEL0_AESU |
2415 DESC_HDR_MODE0_AESU_CBC,
2417 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2419 .cra_name = "cbc(des3_ede)",
2420 .cra_driver_name = "cbc-3des-talitos",
2421 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2422 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2425 .min_keysize = DES3_EDE_KEY_SIZE,
2426 .max_keysize = DES3_EDE_KEY_SIZE,
2427 .ivsize = DES3_EDE_BLOCK_SIZE,
2430 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2432 DESC_HDR_MODE0_DEU_CBC |
2433 DESC_HDR_MODE0_DEU_3DES,
2435 /* AHASH algorithms. */
2436 { .type = CRYPTO_ALG_TYPE_AHASH,
2438 .halg.digestsize = MD5_DIGEST_SIZE,
2439 .halg.statesize = sizeof(struct talitos_export_state),
2442 .cra_driver_name = "md5-talitos",
2443 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2444 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2448 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2449 DESC_HDR_SEL0_MDEUA |
2450 DESC_HDR_MODE0_MDEU_MD5,
2452 { .type = CRYPTO_ALG_TYPE_AHASH,
2454 .halg.digestsize = SHA1_DIGEST_SIZE,
2455 .halg.statesize = sizeof(struct talitos_export_state),
2458 .cra_driver_name = "sha1-talitos",
2459 .cra_blocksize = SHA1_BLOCK_SIZE,
2460 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2464 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2465 DESC_HDR_SEL0_MDEUA |
2466 DESC_HDR_MODE0_MDEU_SHA1,
2468 { .type = CRYPTO_ALG_TYPE_AHASH,
2470 .halg.digestsize = SHA224_DIGEST_SIZE,
2471 .halg.statesize = sizeof(struct talitos_export_state),
2473 .cra_name = "sha224",
2474 .cra_driver_name = "sha224-talitos",
2475 .cra_blocksize = SHA224_BLOCK_SIZE,
2476 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2480 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2481 DESC_HDR_SEL0_MDEUA |
2482 DESC_HDR_MODE0_MDEU_SHA224,
2484 { .type = CRYPTO_ALG_TYPE_AHASH,
2486 .halg.digestsize = SHA256_DIGEST_SIZE,
2487 .halg.statesize = sizeof(struct talitos_export_state),
2489 .cra_name = "sha256",
2490 .cra_driver_name = "sha256-talitos",
2491 .cra_blocksize = SHA256_BLOCK_SIZE,
2492 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2496 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2497 DESC_HDR_SEL0_MDEUA |
2498 DESC_HDR_MODE0_MDEU_SHA256,
2500 { .type = CRYPTO_ALG_TYPE_AHASH,
2502 .halg.digestsize = SHA384_DIGEST_SIZE,
2503 .halg.statesize = sizeof(struct talitos_export_state),
2505 .cra_name = "sha384",
2506 .cra_driver_name = "sha384-talitos",
2507 .cra_blocksize = SHA384_BLOCK_SIZE,
2508 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2512 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2513 DESC_HDR_SEL0_MDEUB |
2514 DESC_HDR_MODE0_MDEUB_SHA384,
2516 { .type = CRYPTO_ALG_TYPE_AHASH,
2518 .halg.digestsize = SHA512_DIGEST_SIZE,
2519 .halg.statesize = sizeof(struct talitos_export_state),
2521 .cra_name = "sha512",
2522 .cra_driver_name = "sha512-talitos",
2523 .cra_blocksize = SHA512_BLOCK_SIZE,
2524 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2528 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2529 DESC_HDR_SEL0_MDEUB |
2530 DESC_HDR_MODE0_MDEUB_SHA512,
2532 { .type = CRYPTO_ALG_TYPE_AHASH,
2534 .halg.digestsize = MD5_DIGEST_SIZE,
2535 .halg.statesize = sizeof(struct talitos_export_state),
2537 .cra_name = "hmac(md5)",
2538 .cra_driver_name = "hmac-md5-talitos",
2539 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2540 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2544 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2545 DESC_HDR_SEL0_MDEUA |
2546 DESC_HDR_MODE0_MDEU_MD5,
2548 { .type = CRYPTO_ALG_TYPE_AHASH,
2550 .halg.digestsize = SHA1_DIGEST_SIZE,
2551 .halg.statesize = sizeof(struct talitos_export_state),
2553 .cra_name = "hmac(sha1)",
2554 .cra_driver_name = "hmac-sha1-talitos",
2555 .cra_blocksize = SHA1_BLOCK_SIZE,
2556 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2560 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2561 DESC_HDR_SEL0_MDEUA |
2562 DESC_HDR_MODE0_MDEU_SHA1,
2564 { .type = CRYPTO_ALG_TYPE_AHASH,
2566 .halg.digestsize = SHA224_DIGEST_SIZE,
2567 .halg.statesize = sizeof(struct talitos_export_state),
2569 .cra_name = "hmac(sha224)",
2570 .cra_driver_name = "hmac-sha224-talitos",
2571 .cra_blocksize = SHA224_BLOCK_SIZE,
2572 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2576 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2577 DESC_HDR_SEL0_MDEUA |
2578 DESC_HDR_MODE0_MDEU_SHA224,
2580 { .type = CRYPTO_ALG_TYPE_AHASH,
2582 .halg.digestsize = SHA256_DIGEST_SIZE,
2583 .halg.statesize = sizeof(struct talitos_export_state),
2585 .cra_name = "hmac(sha256)",
2586 .cra_driver_name = "hmac-sha256-talitos",
2587 .cra_blocksize = SHA256_BLOCK_SIZE,
2588 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2592 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2593 DESC_HDR_SEL0_MDEUA |
2594 DESC_HDR_MODE0_MDEU_SHA256,
2596 { .type = CRYPTO_ALG_TYPE_AHASH,
2598 .halg.digestsize = SHA384_DIGEST_SIZE,
2599 .halg.statesize = sizeof(struct talitos_export_state),
2601 .cra_name = "hmac(sha384)",
2602 .cra_driver_name = "hmac-sha384-talitos",
2603 .cra_blocksize = SHA384_BLOCK_SIZE,
2604 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2608 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2609 DESC_HDR_SEL0_MDEUB |
2610 DESC_HDR_MODE0_MDEUB_SHA384,
2612 { .type = CRYPTO_ALG_TYPE_AHASH,
2614 .halg.digestsize = SHA512_DIGEST_SIZE,
2615 .halg.statesize = sizeof(struct talitos_export_state),
2617 .cra_name = "hmac(sha512)",
2618 .cra_driver_name = "hmac-sha512-talitos",
2619 .cra_blocksize = SHA512_BLOCK_SIZE,
2620 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2624 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2625 DESC_HDR_SEL0_MDEUB |
2626 DESC_HDR_MODE0_MDEUB_SHA512,
2630 struct talitos_crypto_alg {
2631 struct list_head entry;
2633 struct talitos_alg_template algt;
2636 static int talitos_init_common(struct talitos_ctx *ctx,
2637 struct talitos_crypto_alg *talitos_alg)
2639 struct talitos_private *priv;
2641 /* update context with ptr to dev */
2642 ctx->dev = talitos_alg->dev;
2644 /* assign SEC channel to tfm in round-robin fashion */
2645 priv = dev_get_drvdata(ctx->dev);
2646 ctx->ch = atomic_inc_return(&priv->last_chan) &
2647 (priv->num_channels - 1);
2649 /* copy descriptor header template value */
2650 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2652 /* select done notification */
2653 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2658 static int talitos_cra_init(struct crypto_tfm *tfm)
2660 struct crypto_alg *alg = tfm->__crt_alg;
2661 struct talitos_crypto_alg *talitos_alg;
2662 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2664 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2665 talitos_alg = container_of(__crypto_ahash_alg(alg),
2666 struct talitos_crypto_alg,
2669 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2672 return talitos_init_common(ctx, talitos_alg);
2675 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2677 struct aead_alg *alg = crypto_aead_alg(tfm);
2678 struct talitos_crypto_alg *talitos_alg;
2679 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2681 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2684 return talitos_init_common(ctx, talitos_alg);
2687 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2689 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2691 talitos_cra_init(tfm);
2694 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2695 sizeof(struct talitos_ahash_req_ctx));
2701 * given the alg's descriptor header template, determine whether descriptor
2702 * type and primary/secondary execution units required match the hw
2703 * capabilities description provided in the device tree node.
2705 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2707 struct talitos_private *priv = dev_get_drvdata(dev);
2710 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2711 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2713 if (SECONDARY_EU(desc_hdr_template))
2714 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2715 & priv->exec_units);
2720 static int talitos_remove(struct platform_device *ofdev)
2722 struct device *dev = &ofdev->dev;
2723 struct talitos_private *priv = dev_get_drvdata(dev);
2724 struct talitos_crypto_alg *t_alg, *n;
2727 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2728 switch (t_alg->algt.type) {
2729 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2731 case CRYPTO_ALG_TYPE_AEAD:
2732 crypto_unregister_aead(&t_alg->algt.alg.aead);
2734 case CRYPTO_ALG_TYPE_AHASH:
2735 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2738 list_del(&t_alg->entry);
2742 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2743 talitos_unregister_rng(dev);
2745 for (i = 0; priv->chan && i < priv->num_channels; i++)
2746 kfree(priv->chan[i].fifo);
2750 for (i = 0; i < 2; i++)
2752 free_irq(priv->irq[i], dev);
2753 irq_dispose_mapping(priv->irq[i]);
2756 tasklet_kill(&priv->done_task[0]);
2758 tasklet_kill(&priv->done_task[1]);
2767 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2768 struct talitos_alg_template
2771 struct talitos_private *priv = dev_get_drvdata(dev);
2772 struct talitos_crypto_alg *t_alg;
2773 struct crypto_alg *alg;
2775 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2777 return ERR_PTR(-ENOMEM);
2779 t_alg->algt = *template;
2781 switch (t_alg->algt.type) {
2782 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2783 alg = &t_alg->algt.alg.crypto;
2784 alg->cra_init = talitos_cra_init;
2785 alg->cra_type = &crypto_ablkcipher_type;
2786 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2787 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2788 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2789 alg->cra_ablkcipher.geniv = "eseqiv";
2791 case CRYPTO_ALG_TYPE_AEAD:
2792 alg = &t_alg->algt.alg.aead.base;
2793 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2794 t_alg->algt.alg.aead.setkey = aead_setkey;
2795 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2796 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2798 case CRYPTO_ALG_TYPE_AHASH:
2799 alg = &t_alg->algt.alg.hash.halg.base;
2800 alg->cra_init = talitos_cra_init_ahash;
2801 alg->cra_type = &crypto_ahash_type;
2802 t_alg->algt.alg.hash.init = ahash_init;
2803 t_alg->algt.alg.hash.update = ahash_update;
2804 t_alg->algt.alg.hash.final = ahash_final;
2805 t_alg->algt.alg.hash.finup = ahash_finup;
2806 t_alg->algt.alg.hash.digest = ahash_digest;
2807 if (!strncmp(alg->cra_name, "hmac", 4))
2808 t_alg->algt.alg.hash.setkey = ahash_setkey;
2809 t_alg->algt.alg.hash.import = ahash_import;
2810 t_alg->algt.alg.hash.export = ahash_export;
2812 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2813 !strncmp(alg->cra_name, "hmac", 4)) {
2815 return ERR_PTR(-ENOTSUPP);
2817 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2818 (!strcmp(alg->cra_name, "sha224") ||
2819 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2820 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2821 t_alg->algt.desc_hdr_template =
2822 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2823 DESC_HDR_SEL0_MDEUA |
2824 DESC_HDR_MODE0_MDEU_SHA256;
2828 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2830 return ERR_PTR(-EINVAL);
2833 alg->cra_module = THIS_MODULE;
2834 alg->cra_priority = TALITOS_CRA_PRIORITY;
2835 alg->cra_alignmask = 0;
2836 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2837 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2844 static int talitos_probe_irq(struct platform_device *ofdev)
2846 struct device *dev = &ofdev->dev;
2847 struct device_node *np = ofdev->dev.of_node;
2848 struct talitos_private *priv = dev_get_drvdata(dev);
2850 bool is_sec1 = has_ftr_sec1(priv);
2852 priv->irq[0] = irq_of_parse_and_map(np, 0);
2853 if (!priv->irq[0]) {
2854 dev_err(dev, "failed to map irq\n");
2858 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2859 dev_driver_string(dev), dev);
2863 priv->irq[1] = irq_of_parse_and_map(np, 1);
2865 /* get the primary irq line */
2866 if (!priv->irq[1]) {
2867 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2868 dev_driver_string(dev), dev);
2872 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2873 dev_driver_string(dev), dev);
2877 /* get the secondary irq line */
2878 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2879 dev_driver_string(dev), dev);
2881 dev_err(dev, "failed to request secondary irq\n");
2882 irq_dispose_mapping(priv->irq[1]);
2890 dev_err(dev, "failed to request primary irq\n");
2891 irq_dispose_mapping(priv->irq[0]);
2898 static int talitos_probe(struct platform_device *ofdev)
2900 struct device *dev = &ofdev->dev;
2901 struct device_node *np = ofdev->dev.of_node;
2902 struct talitos_private *priv;
2903 const unsigned int *prop;
2907 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2911 INIT_LIST_HEAD(&priv->alg_list);
2913 dev_set_drvdata(dev, priv);
2915 priv->ofdev = ofdev;
2917 spin_lock_init(&priv->reg_lock);
2919 priv->reg = of_iomap(np, 0);
2921 dev_err(dev, "failed to of_iomap\n");
2926 /* get SEC version capabilities from device tree */
2927 prop = of_get_property(np, "fsl,num-channels", NULL);
2929 priv->num_channels = *prop;
2931 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2933 priv->chfifo_len = *prop;
2935 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2937 priv->exec_units = *prop;
2939 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2941 priv->desc_types = *prop;
2943 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2944 !priv->exec_units || !priv->desc_types) {
2945 dev_err(dev, "invalid property data in device tree node\n");
2950 if (of_device_is_compatible(np, "fsl,sec3.0"))
2951 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2953 if (of_device_is_compatible(np, "fsl,sec2.1"))
2954 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2955 TALITOS_FTR_SHA224_HWINIT |
2956 TALITOS_FTR_HMAC_OK;
2958 if (of_device_is_compatible(np, "fsl,sec1.0"))
2959 priv->features |= TALITOS_FTR_SEC1;
2961 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2962 priv->reg_deu = priv->reg + TALITOS12_DEU;
2963 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2964 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2965 stride = TALITOS1_CH_STRIDE;
2966 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2967 priv->reg_deu = priv->reg + TALITOS10_DEU;
2968 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2969 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2970 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2971 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2972 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2973 stride = TALITOS1_CH_STRIDE;
2975 priv->reg_deu = priv->reg + TALITOS2_DEU;
2976 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2977 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2978 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2979 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2980 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2981 priv->reg_keu = priv->reg + TALITOS2_KEU;
2982 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2983 stride = TALITOS2_CH_STRIDE;
2986 err = talitos_probe_irq(ofdev);
2990 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2991 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2992 (unsigned long)dev);
2994 if (!priv->irq[1]) {
2995 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2996 (unsigned long)dev);
2998 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2999 (unsigned long)dev);
3000 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3001 (unsigned long)dev);
3005 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3006 priv->num_channels, GFP_KERNEL);
3008 dev_err(dev, "failed to allocate channel management space\n");
3013 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3015 for (i = 0; i < priv->num_channels; i++) {
3016 priv->chan[i].reg = priv->reg + stride * (i + 1);
3017 if (!priv->irq[1] || !(i & 1))
3018 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3020 spin_lock_init(&priv->chan[i].head_lock);
3021 spin_lock_init(&priv->chan[i].tail_lock);
3023 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3024 priv->fifo_len, GFP_KERNEL);
3025 if (!priv->chan[i].fifo) {
3026 dev_err(dev, "failed to allocate request fifo %d\n", i);
3031 atomic_set(&priv->chan[i].submit_count,
3032 -(priv->chfifo_len - 1));
3035 dma_set_mask(dev, DMA_BIT_MASK(36));
3037 /* reset and initialize the h/w */
3038 err = init_device(dev);
3040 dev_err(dev, "failed to initialize device\n");
3044 /* register the RNG, if available */
3045 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3046 err = talitos_register_rng(dev);
3048 dev_err(dev, "failed to register hwrng: %d\n", err);
3051 dev_info(dev, "hwrng\n");
3054 /* register crypto algorithms the device supports */
3055 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3056 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3057 struct talitos_crypto_alg *t_alg;
3058 struct crypto_alg *alg = NULL;
3060 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3061 if (IS_ERR(t_alg)) {
3062 err = PTR_ERR(t_alg);
3063 if (err == -ENOTSUPP)
3068 switch (t_alg->algt.type) {
3069 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3070 err = crypto_register_alg(
3071 &t_alg->algt.alg.crypto);
3072 alg = &t_alg->algt.alg.crypto;
3075 case CRYPTO_ALG_TYPE_AEAD:
3076 err = crypto_register_aead(
3077 &t_alg->algt.alg.aead);
3078 alg = &t_alg->algt.alg.aead.base;
3081 case CRYPTO_ALG_TYPE_AHASH:
3082 err = crypto_register_ahash(
3083 &t_alg->algt.alg.hash);
3084 alg = &t_alg->algt.alg.hash.halg.base;
3088 dev_err(dev, "%s alg registration failed\n",
3089 alg->cra_driver_name);
3092 list_add_tail(&t_alg->entry, &priv->alg_list);
3095 if (!list_empty(&priv->alg_list))
3096 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3097 (char *)of_get_property(np, "compatible", NULL));
3102 talitos_remove(ofdev);
3107 static const struct of_device_id talitos_match[] = {
3108 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3110 .compatible = "fsl,sec1.0",
3113 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3115 .compatible = "fsl,sec2.0",
3120 MODULE_DEVICE_TABLE(of, talitos_match);
3122 static struct platform_driver talitos_driver = {
3125 .of_match_table = talitos_match,
3127 .probe = talitos_probe,
3128 .remove = talitos_remove,
3131 module_platform_driver(talitos_driver);
3133 MODULE_LICENSE("GPL");
3134 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3135 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");