OSDN Git Service

clk: at91: fix masterck name
[uclinux-h8/linux.git] / drivers / crypto / amcc / crypto4xx_core.c
1 /**
2  * AMCC SoC PPC4xx Crypto Driver
3  *
4  * Copyright (c) 2008 Applied Micro Circuits Corporation.
5  * All rights reserved. James Hsiao <jhsiao@amcc.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * This file implements AMCC crypto offload Linux device driver for use with
18  * Linux CryptoAPI.
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aead.h>
39 #include <crypto/aes.h>
40 #include <crypto/ctr.h>
41 #include <crypto/gcm.h>
42 #include <crypto/sha.h>
43 #include <crypto/scatterwalk.h>
44 #include <crypto/skcipher.h>
45 #include <crypto/internal/aead.h>
46 #include <crypto/internal/skcipher.h>
47 #include "crypto4xx_reg_def.h"
48 #include "crypto4xx_core.h"
49 #include "crypto4xx_sa.h"
50 #include "crypto4xx_trng.h"
51
52 #define PPC4XX_SEC_VERSION_STR                  "0.5"
53
54 /**
55  * PPC4xx Crypto Engine Initialization Routine
56  */
57 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
58 {
59         union ce_ring_size ring_size;
60         union ce_ring_control ring_ctrl;
61         union ce_part_ring_size part_ring_size;
62         union ce_io_threshold io_threshold;
63         u32 rand_num;
64         union ce_pe_dma_cfg pe_dma_cfg;
65         u32 device_ctrl;
66
67         writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
68         /* setup pe dma, include reset sg, pdr and pe, then release reset */
69         pe_dma_cfg.w = 0;
70         pe_dma_cfg.bf.bo_sgpd_en = 1;
71         pe_dma_cfg.bf.bo_data_en = 0;
72         pe_dma_cfg.bf.bo_sa_en = 1;
73         pe_dma_cfg.bf.bo_pd_en = 1;
74         pe_dma_cfg.bf.dynamic_sa_en = 1;
75         pe_dma_cfg.bf.reset_sg = 1;
76         pe_dma_cfg.bf.reset_pdr = 1;
77         pe_dma_cfg.bf.reset_pe = 1;
78         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79         /* un reset pe,sg and pdr */
80         pe_dma_cfg.bf.pe_mode = 0;
81         pe_dma_cfg.bf.reset_sg = 0;
82         pe_dma_cfg.bf.reset_pdr = 0;
83         pe_dma_cfg.bf.reset_pe = 0;
84         pe_dma_cfg.bf.bo_td_en = 0;
85         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
86         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
87         writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
88         writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
89         get_random_bytes(&rand_num, sizeof(rand_num));
90         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
91         get_random_bytes(&rand_num, sizeof(rand_num));
92         writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
93         ring_size.w = 0;
94         ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
95         ring_size.bf.ring_size   = PPC4XX_NUM_PD;
96         writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
97         ring_ctrl.w = 0;
98         writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
99         device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
100         device_ctrl |= PPC4XX_DC_3DES_EN;
101         writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
102         writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
103         writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
104         part_ring_size.w = 0;
105         part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
106         part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
107         writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
108         writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
109         io_threshold.w = 0;
110         io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
111         io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
112         writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
113         writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
114         writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
115         writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
116         writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
117         writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
118         writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
119         writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
120         /* un reset pe,sg and pdr */
121         pe_dma_cfg.bf.pe_mode = 1;
122         pe_dma_cfg.bf.reset_sg = 0;
123         pe_dma_cfg.bf.reset_pdr = 0;
124         pe_dma_cfg.bf.reset_pe = 0;
125         pe_dma_cfg.bf.bo_td_en = 0;
126         writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
127         /*clear all pending interrupt*/
128         writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
129         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
130         writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
131         writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
132         if (dev->is_revb) {
133                 writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
134                        dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
135                 writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
136                        dev->ce_base + CRYPTO4XX_INT_EN);
137         } else {
138                 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
139         }
140 }
141
142 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
143 {
144         ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
145         if (ctx->sa_in == NULL)
146                 return -ENOMEM;
147
148         ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
149         if (ctx->sa_out == NULL) {
150                 kfree(ctx->sa_in);
151                 ctx->sa_in = NULL;
152                 return -ENOMEM;
153         }
154
155         ctx->sa_len = size;
156
157         return 0;
158 }
159
160 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
161 {
162         kfree(ctx->sa_in);
163         ctx->sa_in = NULL;
164         kfree(ctx->sa_out);
165         ctx->sa_out = NULL;
166         ctx->sa_len = 0;
167 }
168
169 /**
170  * alloc memory for the gather ring
171  * no need to alloc buf for the ring
172  * gdr_tail, gdr_head and gdr_count are initialized by this function
173  */
174 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
175 {
176         int i;
177         dev->pdr = dma_alloc_coherent(dev->core_dev->device,
178                                       sizeof(struct ce_pd) * PPC4XX_NUM_PD,
179                                       &dev->pdr_pa, GFP_ATOMIC);
180         if (!dev->pdr)
181                 return -ENOMEM;
182
183         dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
184                                  GFP_KERNEL);
185         if (!dev->pdr_uinfo) {
186                 dma_free_coherent(dev->core_dev->device,
187                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
188                                   dev->pdr,
189                                   dev->pdr_pa);
190                 return -ENOMEM;
191         }
192         memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
193         dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
194                                    sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
195                                    &dev->shadow_sa_pool_pa,
196                                    GFP_ATOMIC);
197         if (!dev->shadow_sa_pool)
198                 return -ENOMEM;
199
200         dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
201                          sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
202                          &dev->shadow_sr_pool_pa, GFP_ATOMIC);
203         if (!dev->shadow_sr_pool)
204                 return -ENOMEM;
205         for (i = 0; i < PPC4XX_NUM_PD; i++) {
206                 struct ce_pd *pd = &dev->pdr[i];
207                 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
208
209                 pd->sa = dev->shadow_sa_pool_pa +
210                         sizeof(union shadow_sa_buf) * i;
211
212                 /* alloc 256 bytes which is enough for any kind of dynamic sa */
213                 pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
214
215                 /* alloc state record */
216                 pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
217                 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
218                     sizeof(struct sa_state_record) * i;
219         }
220
221         return 0;
222 }
223
224 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
225 {
226         if (dev->pdr)
227                 dma_free_coherent(dev->core_dev->device,
228                                   sizeof(struct ce_pd) * PPC4XX_NUM_PD,
229                                   dev->pdr, dev->pdr_pa);
230
231         if (dev->shadow_sa_pool)
232                 dma_free_coherent(dev->core_dev->device,
233                         sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
234                         dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
235
236         if (dev->shadow_sr_pool)
237                 dma_free_coherent(dev->core_dev->device,
238                         sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
239                         dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
240
241         kfree(dev->pdr_uinfo);
242 }
243
244 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
245 {
246         u32 retval;
247         u32 tmp;
248
249         retval = dev->pdr_head;
250         tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
251
252         if (tmp == dev->pdr_tail)
253                 return ERING_WAS_FULL;
254
255         dev->pdr_head = tmp;
256
257         return retval;
258 }
259
260 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
261 {
262         struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
263         u32 tail;
264         unsigned long flags;
265
266         spin_lock_irqsave(&dev->core_dev->lock, flags);
267         pd_uinfo->state = PD_ENTRY_FREE;
268
269         if (dev->pdr_tail != PPC4XX_LAST_PD)
270                 dev->pdr_tail++;
271         else
272                 dev->pdr_tail = 0;
273         tail = dev->pdr_tail;
274         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
275
276         return tail;
277 }
278
279 /**
280  * alloc memory for the gather ring
281  * no need to alloc buf for the ring
282  * gdr_tail, gdr_head and gdr_count are initialized by this function
283  */
284 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
285 {
286         dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
287                                        sizeof(struct ce_gd) * PPC4XX_NUM_GD,
288                                        &dev->gdr_pa, GFP_ATOMIC);
289         if (!dev->gdr)
290                 return -ENOMEM;
291
292         return 0;
293 }
294
295 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
296 {
297         dma_free_coherent(dev->core_dev->device,
298                           sizeof(struct ce_gd) * PPC4XX_NUM_GD,
299                           dev->gdr, dev->gdr_pa);
300 }
301
302 /*
303  * when this function is called.
304  * preemption or interrupt must be disabled
305  */
306 static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
307 {
308         u32 retval;
309         u32 tmp;
310
311         if (n >= PPC4XX_NUM_GD)
312                 return ERING_WAS_FULL;
313
314         retval = dev->gdr_head;
315         tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
316         if (dev->gdr_head > dev->gdr_tail) {
317                 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
318                         return ERING_WAS_FULL;
319         } else if (dev->gdr_head < dev->gdr_tail) {
320                 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
321                         return ERING_WAS_FULL;
322         }
323         dev->gdr_head = tmp;
324
325         return retval;
326 }
327
328 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
329 {
330         unsigned long flags;
331
332         spin_lock_irqsave(&dev->core_dev->lock, flags);
333         if (dev->gdr_tail == dev->gdr_head) {
334                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
335                 return 0;
336         }
337
338         if (dev->gdr_tail != PPC4XX_LAST_GD)
339                 dev->gdr_tail++;
340         else
341                 dev->gdr_tail = 0;
342
343         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
344
345         return 0;
346 }
347
348 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
349                                               dma_addr_t *gd_dma, u32 idx)
350 {
351         *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
352
353         return &dev->gdr[idx];
354 }
355
356 /**
357  * alloc memory for the scatter ring
358  * need to alloc buf for the ring
359  * sdr_tail, sdr_head and sdr_count are initialized by this function
360  */
361 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
362 {
363         int i;
364
365         /* alloc memory for scatter descriptor ring */
366         dev->sdr = dma_alloc_coherent(dev->core_dev->device,
367                                       sizeof(struct ce_sd) * PPC4XX_NUM_SD,
368                                       &dev->sdr_pa, GFP_ATOMIC);
369         if (!dev->sdr)
370                 return -ENOMEM;
371
372         dev->scatter_buffer_va =
373                 dma_alloc_coherent(dev->core_dev->device,
374                         PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
375                         &dev->scatter_buffer_pa, GFP_ATOMIC);
376         if (!dev->scatter_buffer_va) {
377                 dma_free_coherent(dev->core_dev->device,
378                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
379                                   dev->sdr, dev->sdr_pa);
380                 return -ENOMEM;
381         }
382
383         for (i = 0; i < PPC4XX_NUM_SD; i++) {
384                 dev->sdr[i].ptr = dev->scatter_buffer_pa +
385                                   PPC4XX_SD_BUFFER_SIZE * i;
386         }
387
388         return 0;
389 }
390
391 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
392 {
393         if (dev->sdr)
394                 dma_free_coherent(dev->core_dev->device,
395                                   sizeof(struct ce_sd) * PPC4XX_NUM_SD,
396                                   dev->sdr, dev->sdr_pa);
397
398         if (dev->scatter_buffer_va)
399                 dma_free_coherent(dev->core_dev->device,
400                                   PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
401                                   dev->scatter_buffer_va,
402                                   dev->scatter_buffer_pa);
403 }
404
405 /*
406  * when this function is called.
407  * preemption or interrupt must be disabled
408  */
409 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
410 {
411         u32 retval;
412         u32 tmp;
413
414         if (n >= PPC4XX_NUM_SD)
415                 return ERING_WAS_FULL;
416
417         retval = dev->sdr_head;
418         tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
419         if (dev->sdr_head > dev->gdr_tail) {
420                 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
421                         return ERING_WAS_FULL;
422         } else if (dev->sdr_head < dev->sdr_tail) {
423                 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
424                         return ERING_WAS_FULL;
425         } /* the head = tail, or empty case is already take cared */
426         dev->sdr_head = tmp;
427
428         return retval;
429 }
430
431 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
432 {
433         unsigned long flags;
434
435         spin_lock_irqsave(&dev->core_dev->lock, flags);
436         if (dev->sdr_tail == dev->sdr_head) {
437                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
438                 return 0;
439         }
440         if (dev->sdr_tail != PPC4XX_LAST_SD)
441                 dev->sdr_tail++;
442         else
443                 dev->sdr_tail = 0;
444         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
445
446         return 0;
447 }
448
449 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
450                                               dma_addr_t *sd_dma, u32 idx)
451 {
452         *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
453
454         return &dev->sdr[idx];
455 }
456
457 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
458                                       struct ce_pd *pd,
459                                       struct pd_uinfo *pd_uinfo,
460                                       u32 nbytes,
461                                       struct scatterlist *dst)
462 {
463         unsigned int first_sd = pd_uinfo->first_sd;
464         unsigned int last_sd;
465         unsigned int overflow = 0;
466         unsigned int to_copy;
467         unsigned int dst_start = 0;
468
469         /*
470          * Because the scatter buffers are all neatly organized in one
471          * big continuous ringbuffer; scatterwalk_map_and_copy() can
472          * be instructed to copy a range of buffers in one go.
473          */
474
475         last_sd = (first_sd + pd_uinfo->num_sd);
476         if (last_sd > PPC4XX_LAST_SD) {
477                 last_sd = PPC4XX_LAST_SD;
478                 overflow = last_sd % PPC4XX_NUM_SD;
479         }
480
481         while (nbytes) {
482                 void *buf = dev->scatter_buffer_va +
483                         first_sd * PPC4XX_SD_BUFFER_SIZE;
484
485                 to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
486                                       (1 + last_sd - first_sd));
487                 scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
488                 nbytes -= to_copy;
489
490                 if (overflow) {
491                         first_sd = 0;
492                         last_sd = overflow;
493                         dst_start += to_copy;
494                         overflow = 0;
495                 }
496         }
497 }
498
499 static void crypto4xx_copy_digest_to_dst(void *dst,
500                                         struct pd_uinfo *pd_uinfo,
501                                         struct crypto4xx_ctx *ctx)
502 {
503         struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
504
505         if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
506                 memcpy(dst, pd_uinfo->sr_va->save_digest,
507                        SA_HASH_ALG_SHA1_DIGEST_SIZE);
508         }
509 }
510
511 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
512                                   struct pd_uinfo *pd_uinfo)
513 {
514         int i;
515         if (pd_uinfo->num_gd) {
516                 for (i = 0; i < pd_uinfo->num_gd; i++)
517                         crypto4xx_put_gd_to_gdr(dev);
518                 pd_uinfo->first_gd = 0xffffffff;
519                 pd_uinfo->num_gd = 0;
520         }
521         if (pd_uinfo->num_sd) {
522                 for (i = 0; i < pd_uinfo->num_sd; i++)
523                         crypto4xx_put_sd_to_sdr(dev);
524
525                 pd_uinfo->first_sd = 0xffffffff;
526                 pd_uinfo->num_sd = 0;
527         }
528 }
529
530 static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
531                                      struct pd_uinfo *pd_uinfo,
532                                      struct ce_pd *pd)
533 {
534         struct skcipher_request *req;
535         struct scatterlist *dst;
536         dma_addr_t addr;
537
538         req = skcipher_request_cast(pd_uinfo->async_req);
539
540         if (pd_uinfo->using_sd) {
541                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
542                                           req->cryptlen, req->dst);
543         } else {
544                 dst = pd_uinfo->dest_va;
545                 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
546                                     dst->offset, dst->length, DMA_FROM_DEVICE);
547         }
548
549         if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
550                 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
551
552                 crypto4xx_memcpy_from_le32((u32 *)req->iv,
553                         pd_uinfo->sr_va->save_iv,
554                         crypto_skcipher_ivsize(skcipher));
555         }
556
557         crypto4xx_ret_sg_desc(dev, pd_uinfo);
558
559         if (pd_uinfo->state & PD_ENTRY_BUSY)
560                 skcipher_request_complete(req, -EINPROGRESS);
561         skcipher_request_complete(req, 0);
562 }
563
564 static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
565                                 struct pd_uinfo *pd_uinfo)
566 {
567         struct crypto4xx_ctx *ctx;
568         struct ahash_request *ahash_req;
569
570         ahash_req = ahash_request_cast(pd_uinfo->async_req);
571         ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
572
573         crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
574                                      crypto_tfm_ctx(ahash_req->base.tfm));
575         crypto4xx_ret_sg_desc(dev, pd_uinfo);
576
577         if (pd_uinfo->state & PD_ENTRY_BUSY)
578                 ahash_request_complete(ahash_req, -EINPROGRESS);
579         ahash_request_complete(ahash_req, 0);
580 }
581
582 static void crypto4xx_aead_done(struct crypto4xx_device *dev,
583                                 struct pd_uinfo *pd_uinfo,
584                                 struct ce_pd *pd)
585 {
586         struct aead_request *aead_req = container_of(pd_uinfo->async_req,
587                 struct aead_request, base);
588         struct scatterlist *dst = pd_uinfo->dest_va;
589         size_t cp_len = crypto_aead_authsize(
590                 crypto_aead_reqtfm(aead_req));
591         u32 icv[AES_BLOCK_SIZE];
592         int err = 0;
593
594         if (pd_uinfo->using_sd) {
595                 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
596                                           pd->pd_ctl_len.bf.pkt_len,
597                                           dst);
598         } else {
599                 dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
600                                 DMA_FROM_DEVICE);
601         }
602
603         if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
604                 /* append icv at the end */
605                 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
606                                            sizeof(icv));
607
608                 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
609                                          cp_len, 1);
610         } else {
611                 /* check icv at the end */
612                 scatterwalk_map_and_copy(icv, aead_req->src,
613                         aead_req->assoclen + aead_req->cryptlen -
614                         cp_len, cp_len, 0);
615
616                 crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
617
618                 if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
619                         err = -EBADMSG;
620         }
621
622         crypto4xx_ret_sg_desc(dev, pd_uinfo);
623
624         if (pd->pd_ctl.bf.status & 0xff) {
625                 if (!__ratelimit(&dev->aead_ratelimit)) {
626                         if (pd->pd_ctl.bf.status & 2)
627                                 pr_err("pad fail error\n");
628                         if (pd->pd_ctl.bf.status & 4)
629                                 pr_err("seqnum fail\n");
630                         if (pd->pd_ctl.bf.status & 8)
631                                 pr_err("error _notify\n");
632                         pr_err("aead return err status = 0x%02x\n",
633                                 pd->pd_ctl.bf.status & 0xff);
634                         pr_err("pd pad_ctl = 0x%08x\n",
635                                 pd->pd_ctl.bf.pd_pad_ctl);
636                 }
637                 err = -EINVAL;
638         }
639
640         if (pd_uinfo->state & PD_ENTRY_BUSY)
641                 aead_request_complete(aead_req, -EINPROGRESS);
642
643         aead_request_complete(aead_req, err);
644 }
645
646 static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
647 {
648         struct ce_pd *pd = &dev->pdr[idx];
649         struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
650
651         switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
652         case CRYPTO_ALG_TYPE_SKCIPHER:
653                 crypto4xx_cipher_done(dev, pd_uinfo, pd);
654                 break;
655         case CRYPTO_ALG_TYPE_AEAD:
656                 crypto4xx_aead_done(dev, pd_uinfo, pd);
657                 break;
658         case CRYPTO_ALG_TYPE_AHASH:
659                 crypto4xx_ahash_done(dev, pd_uinfo);
660                 break;
661         }
662 }
663
664 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
665 {
666         crypto4xx_destroy_pdr(core_dev->dev);
667         crypto4xx_destroy_gdr(core_dev->dev);
668         crypto4xx_destroy_sdr(core_dev->dev);
669         iounmap(core_dev->dev->ce_base);
670         kfree(core_dev->dev);
671         kfree(core_dev);
672 }
673
674 static u32 get_next_gd(u32 current)
675 {
676         if (current != PPC4XX_LAST_GD)
677                 return current + 1;
678         else
679                 return 0;
680 }
681
682 static u32 get_next_sd(u32 current)
683 {
684         if (current != PPC4XX_LAST_SD)
685                 return current + 1;
686         else
687                 return 0;
688 }
689
690 int crypto4xx_build_pd(struct crypto_async_request *req,
691                        struct crypto4xx_ctx *ctx,
692                        struct scatterlist *src,
693                        struct scatterlist *dst,
694                        const unsigned int datalen,
695                        const __le32 *iv, const u32 iv_len,
696                        const struct dynamic_sa_ctl *req_sa,
697                        const unsigned int sa_len,
698                        const unsigned int assoclen,
699                        struct scatterlist *_dst)
700 {
701         struct crypto4xx_device *dev = ctx->dev;
702         struct dynamic_sa_ctl *sa;
703         struct ce_gd *gd;
704         struct ce_pd *pd;
705         u32 num_gd, num_sd;
706         u32 fst_gd = 0xffffffff;
707         u32 fst_sd = 0xffffffff;
708         u32 pd_entry;
709         unsigned long flags;
710         struct pd_uinfo *pd_uinfo;
711         unsigned int nbytes = datalen;
712         size_t offset_to_sr_ptr;
713         u32 gd_idx = 0;
714         int tmp;
715         bool is_busy;
716
717         /* figure how many gd are needed */
718         tmp = sg_nents_for_len(src, assoclen + datalen);
719         if (tmp < 0) {
720                 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
721                 return tmp;
722         }
723         if (tmp == 1)
724                 tmp = 0;
725         num_gd = tmp;
726
727         if (assoclen) {
728                 nbytes += assoclen;
729                 dst = scatterwalk_ffwd(_dst, dst, assoclen);
730         }
731
732         /* figure how many sd are needed */
733         if (sg_is_last(dst)) {
734                 num_sd = 0;
735         } else {
736                 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
737                         num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
738                         if (datalen % PPC4XX_SD_BUFFER_SIZE)
739                                 num_sd++;
740                 } else {
741                         num_sd = 1;
742                 }
743         }
744
745         /*
746          * The follow section of code needs to be protected
747          * The gather ring and scatter ring needs to be consecutive
748          * In case of run out of any kind of descriptor, the descriptor
749          * already got must be return the original place.
750          */
751         spin_lock_irqsave(&dev->core_dev->lock, flags);
752         /*
753          * Let the caller know to slow down, once more than 13/16ths = 81%
754          * of the available data contexts are being used simultaneously.
755          *
756          * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
757          * 31 more contexts. Before new requests have to be rejected.
758          */
759         if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
760                 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
761                         ((PPC4XX_NUM_PD * 13) / 16);
762         } else {
763                 /*
764                  * To fix contention issues between ipsec (no blacklog) and
765                  * dm-crypto (backlog) reserve 32 entries for "no backlog"
766                  * data contexts.
767                  */
768                 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
769                         ((PPC4XX_NUM_PD * 15) / 16);
770
771                 if (is_busy) {
772                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
773                         return -EBUSY;
774                 }
775         }
776
777         if (num_gd) {
778                 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
779                 if (fst_gd == ERING_WAS_FULL) {
780                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
781                         return -EAGAIN;
782                 }
783         }
784         if (num_sd) {
785                 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
786                 if (fst_sd == ERING_WAS_FULL) {
787                         if (num_gd)
788                                 dev->gdr_head = fst_gd;
789                         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
790                         return -EAGAIN;
791                 }
792         }
793         pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
794         if (pd_entry == ERING_WAS_FULL) {
795                 if (num_gd)
796                         dev->gdr_head = fst_gd;
797                 if (num_sd)
798                         dev->sdr_head = fst_sd;
799                 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
800                 return -EAGAIN;
801         }
802         spin_unlock_irqrestore(&dev->core_dev->lock, flags);
803
804         pd = &dev->pdr[pd_entry];
805         pd->sa_len = sa_len;
806
807         pd_uinfo = &dev->pdr_uinfo[pd_entry];
808         pd_uinfo->async_req = req;
809         pd_uinfo->num_gd = num_gd;
810         pd_uinfo->num_sd = num_sd;
811
812         if (iv_len)
813                 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
814
815         sa = pd_uinfo->sa_va;
816         memcpy(sa, req_sa, sa_len * 4);
817
818         sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
819         offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
820         *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
821
822         if (num_gd) {
823                 dma_addr_t gd_dma;
824                 struct scatterlist *sg;
825
826                 /* get first gd we are going to use */
827                 gd_idx = fst_gd;
828                 pd_uinfo->first_gd = fst_gd;
829                 pd_uinfo->num_gd = num_gd;
830                 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
831                 pd->src = gd_dma;
832                 /* enable gather */
833                 sa->sa_command_0.bf.gather = 1;
834                 /* walk the sg, and setup gather array */
835
836                 sg = src;
837                 while (nbytes) {
838                         size_t len;
839
840                         len = min(sg->length, nbytes);
841                         gd->ptr = dma_map_page(dev->core_dev->device,
842                                 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
843                         gd->ctl_len.len = len;
844                         gd->ctl_len.done = 0;
845                         gd->ctl_len.ready = 1;
846                         if (len >= nbytes)
847                                 break;
848
849                         nbytes -= sg->length;
850                         gd_idx = get_next_gd(gd_idx);
851                         gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
852                         sg = sg_next(sg);
853                 }
854         } else {
855                 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
856                                 src->offset, min(nbytes, src->length),
857                                 DMA_TO_DEVICE);
858                 /*
859                  * Disable gather in sa command
860                  */
861                 sa->sa_command_0.bf.gather = 0;
862                 /*
863                  * Indicate gather array is not used
864                  */
865                 pd_uinfo->first_gd = 0xffffffff;
866                 pd_uinfo->num_gd = 0;
867         }
868         if (sg_is_last(dst)) {
869                 /*
870                  * we know application give us dst a whole piece of memory
871                  * no need to use scatter ring.
872                  */
873                 pd_uinfo->using_sd = 0;
874                 pd_uinfo->first_sd = 0xffffffff;
875                 pd_uinfo->num_sd = 0;
876                 pd_uinfo->dest_va = dst;
877                 sa->sa_command_0.bf.scatter = 0;
878                 pd->dest = (u32)dma_map_page(dev->core_dev->device,
879                                              sg_page(dst), dst->offset,
880                                              min(datalen, dst->length),
881                                              DMA_TO_DEVICE);
882         } else {
883                 dma_addr_t sd_dma;
884                 struct ce_sd *sd = NULL;
885
886                 u32 sd_idx = fst_sd;
887                 nbytes = datalen;
888                 sa->sa_command_0.bf.scatter = 1;
889                 pd_uinfo->using_sd = 1;
890                 pd_uinfo->dest_va = dst;
891                 pd_uinfo->first_sd = fst_sd;
892                 pd_uinfo->num_sd = num_sd;
893                 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
894                 pd->dest = sd_dma;
895                 /* setup scatter descriptor */
896                 sd->ctl.done = 0;
897                 sd->ctl.rdy = 1;
898                 /* sd->ptr should be setup by sd_init routine*/
899                 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
900                         nbytes -= PPC4XX_SD_BUFFER_SIZE;
901                 else
902                         nbytes = 0;
903                 while (nbytes) {
904                         sd_idx = get_next_sd(sd_idx);
905                         sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
906                         /* setup scatter descriptor */
907                         sd->ctl.done = 0;
908                         sd->ctl.rdy = 1;
909                         if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
910                                 nbytes -= PPC4XX_SD_BUFFER_SIZE;
911                         } else {
912                                 /*
913                                  * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
914                                  * which is more than nbytes, so done.
915                                  */
916                                 nbytes = 0;
917                         }
918                 }
919         }
920
921         pd->pd_ctl.w = PD_CTL_HOST_READY |
922                 ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
923                  (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
924                         PD_CTL_HASH_FINAL : 0);
925         pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
926         pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
927
928         wmb();
929         /* write any value to push engine to read a pd */
930         writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
931         writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
932         return is_busy ? -EBUSY : -EINPROGRESS;
933 }
934
935 /**
936  * Algorithm Registration Functions
937  */
938 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
939                                struct crypto4xx_ctx *ctx)
940 {
941         ctx->dev = amcc_alg->dev;
942         ctx->sa_in = NULL;
943         ctx->sa_out = NULL;
944         ctx->sa_len = 0;
945 }
946
947 static int crypto4xx_sk_init(struct crypto_skcipher *sk)
948 {
949         struct skcipher_alg *alg = crypto_skcipher_alg(sk);
950         struct crypto4xx_alg *amcc_alg;
951         struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
952
953         if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
954                 ctx->sw_cipher.cipher =
955                         crypto_alloc_skcipher(alg->base.cra_name, 0,
956                                               CRYPTO_ALG_NEED_FALLBACK |
957                                               CRYPTO_ALG_ASYNC);
958                 if (IS_ERR(ctx->sw_cipher.cipher))
959                         return PTR_ERR(ctx->sw_cipher.cipher);
960
961                 crypto_skcipher_set_reqsize(sk,
962                         sizeof(struct skcipher_request) + 32 +
963                         crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
964         }
965
966         amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
967         crypto4xx_ctx_init(amcc_alg, ctx);
968         return 0;
969 }
970
971 static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
972 {
973         crypto4xx_free_sa(ctx);
974 }
975
976 static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
977 {
978         struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
979
980         crypto4xx_common_exit(ctx);
981         if (ctx->sw_cipher.cipher)
982                 crypto_free_skcipher(ctx->sw_cipher.cipher);
983 }
984
985 static int crypto4xx_aead_init(struct crypto_aead *tfm)
986 {
987         struct aead_alg *alg = crypto_aead_alg(tfm);
988         struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
989         struct crypto4xx_alg *amcc_alg;
990
991         ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
992                                                 CRYPTO_ALG_NEED_FALLBACK |
993                                                 CRYPTO_ALG_ASYNC);
994         if (IS_ERR(ctx->sw_cipher.aead))
995                 return PTR_ERR(ctx->sw_cipher.aead);
996
997         amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
998         crypto4xx_ctx_init(amcc_alg, ctx);
999         crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
1000                                 crypto_aead_reqsize(ctx->sw_cipher.aead),
1001                                 sizeof(struct crypto4xx_aead_reqctx)));
1002         return 0;
1003 }
1004
1005 static void crypto4xx_aead_exit(struct crypto_aead *tfm)
1006 {
1007         struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1008
1009         crypto4xx_common_exit(ctx);
1010         crypto_free_aead(ctx->sw_cipher.aead);
1011 }
1012
1013 static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1014                                   struct crypto4xx_alg_common *crypto_alg,
1015                                   int array_size)
1016 {
1017         struct crypto4xx_alg *alg;
1018         int i;
1019         int rc = 0;
1020
1021         for (i = 0; i < array_size; i++) {
1022                 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1023                 if (!alg)
1024                         return -ENOMEM;
1025
1026                 alg->alg = crypto_alg[i];
1027                 alg->dev = sec_dev;
1028
1029                 switch (alg->alg.type) {
1030                 case CRYPTO_ALG_TYPE_AEAD:
1031                         rc = crypto_register_aead(&alg->alg.u.aead);
1032                         break;
1033
1034                 case CRYPTO_ALG_TYPE_AHASH:
1035                         rc = crypto_register_ahash(&alg->alg.u.hash);
1036                         break;
1037
1038                 default:
1039                         rc = crypto_register_skcipher(&alg->alg.u.cipher);
1040                         break;
1041                 }
1042
1043                 if (rc)
1044                         kfree(alg);
1045                 else
1046                         list_add_tail(&alg->entry, &sec_dev->alg_list);
1047         }
1048
1049         return 0;
1050 }
1051
1052 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1053 {
1054         struct crypto4xx_alg *alg, *tmp;
1055
1056         list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1057                 list_del(&alg->entry);
1058                 switch (alg->alg.type) {
1059                 case CRYPTO_ALG_TYPE_AHASH:
1060                         crypto_unregister_ahash(&alg->alg.u.hash);
1061                         break;
1062
1063                 case CRYPTO_ALG_TYPE_AEAD:
1064                         crypto_unregister_aead(&alg->alg.u.aead);
1065                         break;
1066
1067                 default:
1068                         crypto_unregister_skcipher(&alg->alg.u.cipher);
1069                 }
1070                 kfree(alg);
1071         }
1072 }
1073
1074 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1075 {
1076         struct device *dev = (struct device *)data;
1077         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1078         struct pd_uinfo *pd_uinfo;
1079         struct ce_pd *pd;
1080         u32 tail = core_dev->dev->pdr_tail;
1081         u32 head = core_dev->dev->pdr_head;
1082
1083         do {
1084                 pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1085                 pd = &core_dev->dev->pdr[tail];
1086                 if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1087                      ((READ_ONCE(pd->pd_ctl.w) &
1088                        (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1089                        PD_CTL_PE_DONE)) {
1090                         crypto4xx_pd_done(core_dev->dev, tail);
1091                         tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1092                 } else {
1093                         /* if tail not done, break */
1094                         break;
1095                 }
1096         } while (head != tail);
1097 }
1098
1099 /**
1100  * Top Half of isr.
1101  */
1102 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1103                                                       u32 clr_val)
1104 {
1105         struct device *dev = (struct device *)data;
1106         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1107
1108         writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1109         tasklet_schedule(&core_dev->tasklet);
1110
1111         return IRQ_HANDLED;
1112 }
1113
1114 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1115 {
1116         return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1117 }
1118
1119 static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1120 {
1121         return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1122                 PPC4XX_TMO_ERR_INT);
1123 }
1124
1125 /**
1126  * Supported Crypto Algorithms
1127  */
1128 static struct crypto4xx_alg_common crypto4xx_alg[] = {
1129         /* Crypto AES modes */
1130         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1131                 .base = {
1132                         .cra_name = "cbc(aes)",
1133                         .cra_driver_name = "cbc-aes-ppc4xx",
1134                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135                         .cra_flags = CRYPTO_ALG_ASYNC |
1136                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1137                         .cra_blocksize = AES_BLOCK_SIZE,
1138                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1139                         .cra_module = THIS_MODULE,
1140                 },
1141                 .min_keysize = AES_MIN_KEY_SIZE,
1142                 .max_keysize = AES_MAX_KEY_SIZE,
1143                 .ivsize = AES_IV_SIZE,
1144                 .setkey = crypto4xx_setkey_aes_cbc,
1145                 .encrypt = crypto4xx_encrypt_iv,
1146                 .decrypt = crypto4xx_decrypt_iv,
1147                 .init = crypto4xx_sk_init,
1148                 .exit = crypto4xx_sk_exit,
1149         } },
1150         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1151                 .base = {
1152                         .cra_name = "cfb(aes)",
1153                         .cra_driver_name = "cfb-aes-ppc4xx",
1154                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1155                         .cra_flags = CRYPTO_ALG_ASYNC |
1156                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1157                         .cra_blocksize = AES_BLOCK_SIZE,
1158                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1159                         .cra_module = THIS_MODULE,
1160                 },
1161                 .min_keysize = AES_MIN_KEY_SIZE,
1162                 .max_keysize = AES_MAX_KEY_SIZE,
1163                 .ivsize = AES_IV_SIZE,
1164                 .setkey = crypto4xx_setkey_aes_cfb,
1165                 .encrypt = crypto4xx_encrypt_iv,
1166                 .decrypt = crypto4xx_decrypt_iv,
1167                 .init = crypto4xx_sk_init,
1168                 .exit = crypto4xx_sk_exit,
1169         } },
1170         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1171                 .base = {
1172                         .cra_name = "ctr(aes)",
1173                         .cra_driver_name = "ctr-aes-ppc4xx",
1174                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1175                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1176                                 CRYPTO_ALG_ASYNC |
1177                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1178                         .cra_blocksize = AES_BLOCK_SIZE,
1179                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1180                         .cra_module = THIS_MODULE,
1181                 },
1182                 .min_keysize = AES_MIN_KEY_SIZE,
1183                 .max_keysize = AES_MAX_KEY_SIZE,
1184                 .ivsize = AES_IV_SIZE,
1185                 .setkey = crypto4xx_setkey_aes_ctr,
1186                 .encrypt = crypto4xx_encrypt_ctr,
1187                 .decrypt = crypto4xx_decrypt_ctr,
1188                 .init = crypto4xx_sk_init,
1189                 .exit = crypto4xx_sk_exit,
1190         } },
1191         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1192                 .base = {
1193                         .cra_name = "rfc3686(ctr(aes))",
1194                         .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1195                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1196                         .cra_flags = CRYPTO_ALG_ASYNC |
1197                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1198                         .cra_blocksize = AES_BLOCK_SIZE,
1199                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1200                         .cra_module = THIS_MODULE,
1201                 },
1202                 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1203                 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1204                 .ivsize = CTR_RFC3686_IV_SIZE,
1205                 .setkey = crypto4xx_setkey_rfc3686,
1206                 .encrypt = crypto4xx_rfc3686_encrypt,
1207                 .decrypt = crypto4xx_rfc3686_decrypt,
1208                 .init = crypto4xx_sk_init,
1209                 .exit = crypto4xx_sk_exit,
1210         } },
1211         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1212                 .base = {
1213                         .cra_name = "ecb(aes)",
1214                         .cra_driver_name = "ecb-aes-ppc4xx",
1215                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1216                         .cra_flags = CRYPTO_ALG_ASYNC |
1217                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1218                         .cra_blocksize = AES_BLOCK_SIZE,
1219                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1220                         .cra_module = THIS_MODULE,
1221                 },
1222                 .min_keysize = AES_MIN_KEY_SIZE,
1223                 .max_keysize = AES_MAX_KEY_SIZE,
1224                 .setkey = crypto4xx_setkey_aes_ecb,
1225                 .encrypt = crypto4xx_encrypt_noiv,
1226                 .decrypt = crypto4xx_decrypt_noiv,
1227                 .init = crypto4xx_sk_init,
1228                 .exit = crypto4xx_sk_exit,
1229         } },
1230         { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1231                 .base = {
1232                         .cra_name = "ofb(aes)",
1233                         .cra_driver_name = "ofb-aes-ppc4xx",
1234                         .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1235                         .cra_flags = CRYPTO_ALG_ASYNC |
1236                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
1237                         .cra_blocksize = AES_BLOCK_SIZE,
1238                         .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1239                         .cra_module = THIS_MODULE,
1240                 },
1241                 .min_keysize = AES_MIN_KEY_SIZE,
1242                 .max_keysize = AES_MAX_KEY_SIZE,
1243                 .ivsize = AES_IV_SIZE,
1244                 .setkey = crypto4xx_setkey_aes_ofb,
1245                 .encrypt = crypto4xx_encrypt_iv,
1246                 .decrypt = crypto4xx_decrypt_iv,
1247                 .init = crypto4xx_sk_init,
1248                 .exit = crypto4xx_sk_exit,
1249         } },
1250
1251         /* AEAD */
1252         { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1253                 .setkey         = crypto4xx_setkey_aes_ccm,
1254                 .setauthsize    = crypto4xx_setauthsize_aead,
1255                 .encrypt        = crypto4xx_encrypt_aes_ccm,
1256                 .decrypt        = crypto4xx_decrypt_aes_ccm,
1257                 .init           = crypto4xx_aead_init,
1258                 .exit           = crypto4xx_aead_exit,
1259                 .ivsize         = AES_BLOCK_SIZE,
1260                 .maxauthsize    = 16,
1261                 .base = {
1262                         .cra_name       = "ccm(aes)",
1263                         .cra_driver_name = "ccm-aes-ppc4xx",
1264                         .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1265                         .cra_flags      = CRYPTO_ALG_ASYNC |
1266                                           CRYPTO_ALG_NEED_FALLBACK |
1267                                           CRYPTO_ALG_KERN_DRIVER_ONLY,
1268                         .cra_blocksize  = 1,
1269                         .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1270                         .cra_module     = THIS_MODULE,
1271                 },
1272         } },
1273         { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1274                 .setkey         = crypto4xx_setkey_aes_gcm,
1275                 .setauthsize    = crypto4xx_setauthsize_aead,
1276                 .encrypt        = crypto4xx_encrypt_aes_gcm,
1277                 .decrypt        = crypto4xx_decrypt_aes_gcm,
1278                 .init           = crypto4xx_aead_init,
1279                 .exit           = crypto4xx_aead_exit,
1280                 .ivsize         = GCM_AES_IV_SIZE,
1281                 .maxauthsize    = 16,
1282                 .base = {
1283                         .cra_name       = "gcm(aes)",
1284                         .cra_driver_name = "gcm-aes-ppc4xx",
1285                         .cra_priority   = CRYPTO4XX_CRYPTO_PRIORITY,
1286                         .cra_flags      = CRYPTO_ALG_ASYNC |
1287                                           CRYPTO_ALG_NEED_FALLBACK |
1288                                           CRYPTO_ALG_KERN_DRIVER_ONLY,
1289                         .cra_blocksize  = 1,
1290                         .cra_ctxsize    = sizeof(struct crypto4xx_ctx),
1291                         .cra_module     = THIS_MODULE,
1292                 },
1293         } },
1294 };
1295
1296 /**
1297  * Module Initialization Routine
1298  */
1299 static int crypto4xx_probe(struct platform_device *ofdev)
1300 {
1301         int rc;
1302         struct resource res;
1303         struct device *dev = &ofdev->dev;
1304         struct crypto4xx_core_device *core_dev;
1305         u32 pvr;
1306         bool is_revb = true;
1307
1308         rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1309         if (rc)
1310                 return -ENODEV;
1311
1312         if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1313                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1314                        mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1315                 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1316                        mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1317         } else if (of_find_compatible_node(NULL, NULL,
1318                         "amcc,ppc405ex-crypto")) {
1319                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1320                        mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1321                 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1322                        mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1323                 is_revb = false;
1324         } else if (of_find_compatible_node(NULL, NULL,
1325                         "amcc,ppc460sx-crypto")) {
1326                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1327                        mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1328                 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1329                        mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1330         } else {
1331                 printk(KERN_ERR "Crypto Function Not supported!\n");
1332                 return -EINVAL;
1333         }
1334
1335         core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1336         if (!core_dev)
1337                 return -ENOMEM;
1338
1339         dev_set_drvdata(dev, core_dev);
1340         core_dev->ofdev = ofdev;
1341         core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1342         rc = -ENOMEM;
1343         if (!core_dev->dev)
1344                 goto err_alloc_dev;
1345
1346         /*
1347          * Older version of 460EX/GT have a hardware bug.
1348          * Hence they do not support H/W based security intr coalescing
1349          */
1350         pvr = mfspr(SPRN_PVR);
1351         if (is_revb && ((pvr >> 4) == 0x130218A)) {
1352                 u32 min = PVR_MIN(pvr);
1353
1354                 if (min < 4) {
1355                         dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1356                         is_revb = false;
1357                 }
1358         }
1359
1360         core_dev->dev->core_dev = core_dev;
1361         core_dev->dev->is_revb = is_revb;
1362         core_dev->device = dev;
1363         spin_lock_init(&core_dev->lock);
1364         INIT_LIST_HEAD(&core_dev->dev->alg_list);
1365         ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1366         rc = crypto4xx_build_pdr(core_dev->dev);
1367         if (rc)
1368                 goto err_build_pdr;
1369
1370         rc = crypto4xx_build_gdr(core_dev->dev);
1371         if (rc)
1372                 goto err_build_pdr;
1373
1374         rc = crypto4xx_build_sdr(core_dev->dev);
1375         if (rc)
1376                 goto err_build_sdr;
1377
1378         /* Init tasklet for bottom half processing */
1379         tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1380                      (unsigned long) dev);
1381
1382         core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1383         if (!core_dev->dev->ce_base) {
1384                 dev_err(dev, "failed to of_iomap\n");
1385                 rc = -ENOMEM;
1386                 goto err_iomap;
1387         }
1388
1389         /* Register for Crypto isr, Crypto Engine IRQ */
1390         core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1391         rc = request_irq(core_dev->irq, is_revb ?
1392                          crypto4xx_ce_interrupt_handler_revb :
1393                          crypto4xx_ce_interrupt_handler, 0,
1394                          KBUILD_MODNAME, dev);
1395         if (rc)
1396                 goto err_request_irq;
1397
1398         /* need to setup pdr, rdr, gdr and sdr before this */
1399         crypto4xx_hw_init(core_dev->dev);
1400
1401         /* Register security algorithms with Linux CryptoAPI */
1402         rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1403                                ARRAY_SIZE(crypto4xx_alg));
1404         if (rc)
1405                 goto err_start_dev;
1406
1407         ppc4xx_trng_probe(core_dev);
1408         return 0;
1409
1410 err_start_dev:
1411         free_irq(core_dev->irq, dev);
1412 err_request_irq:
1413         irq_dispose_mapping(core_dev->irq);
1414         iounmap(core_dev->dev->ce_base);
1415 err_iomap:
1416         tasklet_kill(&core_dev->tasklet);
1417 err_build_sdr:
1418         crypto4xx_destroy_sdr(core_dev->dev);
1419         crypto4xx_destroy_gdr(core_dev->dev);
1420 err_build_pdr:
1421         crypto4xx_destroy_pdr(core_dev->dev);
1422         kfree(core_dev->dev);
1423 err_alloc_dev:
1424         kfree(core_dev);
1425
1426         return rc;
1427 }
1428
1429 static int crypto4xx_remove(struct platform_device *ofdev)
1430 {
1431         struct device *dev = &ofdev->dev;
1432         struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1433
1434         ppc4xx_trng_remove(core_dev);
1435
1436         free_irq(core_dev->irq, dev);
1437         irq_dispose_mapping(core_dev->irq);
1438
1439         tasklet_kill(&core_dev->tasklet);
1440         /* Un-register with Linux CryptoAPI */
1441         crypto4xx_unregister_alg(core_dev->dev);
1442         /* Free all allocated memory */
1443         crypto4xx_stop_all(core_dev);
1444
1445         return 0;
1446 }
1447
1448 static const struct of_device_id crypto4xx_match[] = {
1449         { .compatible      = "amcc,ppc4xx-crypto",},
1450         { },
1451 };
1452 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1453
1454 static struct platform_driver crypto4xx_driver = {
1455         .driver = {
1456                 .name = KBUILD_MODNAME,
1457                 .of_match_table = crypto4xx_match,
1458         },
1459         .probe          = crypto4xx_probe,
1460         .remove         = crypto4xx_remove,
1461 };
1462
1463 module_platform_driver(crypto4xx_driver);
1464
1465 MODULE_LICENSE("GPL");
1466 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1467 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");