OSDN Git Service

crypto: seqiv - Add seqniv
[uclinux-h8/linux.git] / crypto / seqiv.c
1 /*
2  * seqiv: Sequence Number IV Generator
3  *
4  * This generator generates an IV based on a sequence number by xoring it
5  * with a salt.  This algorithm is mainly useful for CTR and similar modes.
6  *
7  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/null.h>
19 #include <crypto/rng.h>
20 #include <crypto/scatterwalk.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28
29 struct seqiv_ctx {
30         spinlock_t lock;
31         u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
32 };
33
34 struct seqiv_aead_ctx {
35         struct crypto_aead *child;
36         spinlock_t lock;
37         struct crypto_blkcipher *null;
38         u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
39 };
40
41 static int seqiv_aead_setkey(struct crypto_aead *tfm,
42                              const u8 *key, unsigned int keylen)
43 {
44         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm);
45
46         return crypto_aead_setkey(ctx->child, key, keylen);
47 }
48
49 static int seqiv_aead_setauthsize(struct crypto_aead *tfm,
50                                   unsigned int authsize)
51 {
52         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm);
53
54         return crypto_aead_setauthsize(ctx->child, authsize);
55 }
56
57 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
58 {
59         struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
60         struct crypto_ablkcipher *geniv;
61
62         if (err == -EINPROGRESS)
63                 return;
64
65         if (err)
66                 goto out;
67
68         geniv = skcipher_givcrypt_reqtfm(req);
69         memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
70
71 out:
72         kfree(subreq->info);
73 }
74
75 static void seqiv_complete(struct crypto_async_request *base, int err)
76 {
77         struct skcipher_givcrypt_request *req = base->data;
78
79         seqiv_complete2(req, err);
80         skcipher_givcrypt_complete(req, err);
81 }
82
83 static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
84 {
85         struct aead_request *subreq = aead_givcrypt_reqctx(req);
86         struct crypto_aead *geniv;
87
88         if (err == -EINPROGRESS)
89                 return;
90
91         if (err)
92                 goto out;
93
94         geniv = aead_givcrypt_reqtfm(req);
95         memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
96
97 out:
98         kfree(subreq->iv);
99 }
100
101 static void seqiv_aead_complete(struct crypto_async_request *base, int err)
102 {
103         struct aead_givcrypt_request *req = base->data;
104
105         seqiv_aead_complete2(req, err);
106         aead_givcrypt_complete(req, err);
107 }
108
109 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
110 {
111         struct aead_request *subreq = aead_request_ctx(req);
112         struct crypto_aead *geniv;
113
114         if (err == -EINPROGRESS)
115                 return;
116
117         if (err)
118                 goto out;
119
120         geniv = crypto_aead_reqtfm(req);
121         memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv));
122
123 out:
124         kzfree(subreq->iv);
125 }
126
127 static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
128                                         int err)
129 {
130         struct aead_request *req = base->data;
131
132         seqiv_aead_encrypt_complete2(req, err);
133         aead_request_complete(req, err);
134 }
135
136 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
137                         unsigned int ivsize)
138 {
139         unsigned int len = ivsize;
140
141         if (ivsize > sizeof(u64)) {
142                 memset(info, 0, ivsize - sizeof(u64));
143                 len = sizeof(u64);
144         }
145         seq = cpu_to_be64(seq);
146         memcpy(info + ivsize - len, &seq, len);
147         crypto_xor(info, ctx->salt, ivsize);
148 }
149
150 static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
151 {
152         struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
153         struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
154         struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
155         crypto_completion_t compl;
156         void *data;
157         u8 *info;
158         unsigned int ivsize;
159         int err;
160
161         ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
162
163         compl = req->creq.base.complete;
164         data = req->creq.base.data;
165         info = req->creq.info;
166
167         ivsize = crypto_ablkcipher_ivsize(geniv);
168
169         if (unlikely(!IS_ALIGNED((unsigned long)info,
170                                  crypto_ablkcipher_alignmask(geniv) + 1))) {
171                 info = kmalloc(ivsize, req->creq.base.flags &
172                                        CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
173                                                                   GFP_ATOMIC);
174                 if (!info)
175                         return -ENOMEM;
176
177                 compl = seqiv_complete;
178                 data = req;
179         }
180
181         ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
182                                         data);
183         ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
184                                      req->creq.nbytes, info);
185
186         seqiv_geniv(ctx, info, req->seq, ivsize);
187         memcpy(req->giv, info, ivsize);
188
189         err = crypto_ablkcipher_encrypt(subreq);
190         if (unlikely(info != req->creq.info))
191                 seqiv_complete2(req, err);
192         return err;
193 }
194
195 static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
196 {
197         struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
198         struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
199         struct aead_request *areq = &req->areq;
200         struct aead_request *subreq = aead_givcrypt_reqctx(req);
201         crypto_completion_t compl;
202         void *data;
203         u8 *info;
204         unsigned int ivsize;
205         int err;
206
207         aead_request_set_tfm(subreq, aead_geniv_base(geniv));
208
209         compl = areq->base.complete;
210         data = areq->base.data;
211         info = areq->iv;
212
213         ivsize = crypto_aead_ivsize(geniv);
214
215         if (unlikely(!IS_ALIGNED((unsigned long)info,
216                                  crypto_aead_alignmask(geniv) + 1))) {
217                 info = kmalloc(ivsize, areq->base.flags &
218                                        CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
219                                                                   GFP_ATOMIC);
220                 if (!info)
221                         return -ENOMEM;
222
223                 compl = seqiv_aead_complete;
224                 data = req;
225         }
226
227         aead_request_set_callback(subreq, areq->base.flags, compl, data);
228         aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
229                                info);
230         aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
231
232         seqiv_geniv(ctx, info, req->seq, ivsize);
233         memcpy(req->giv, info, ivsize);
234
235         err = crypto_aead_encrypt(subreq);
236         if (unlikely(info != areq->iv))
237                 seqiv_aead_complete2(req, err);
238         return err;
239 }
240
241 static int seqiv_aead_encrypt_compat(struct aead_request *req)
242 {
243         struct crypto_aead *geniv = crypto_aead_reqtfm(req);
244         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
245         struct aead_request *subreq = aead_request_ctx(req);
246         crypto_completion_t compl;
247         void *data;
248         u8 *info;
249         unsigned int ivsize;
250         int err;
251
252         aead_request_set_tfm(subreq, ctx->child);
253
254         compl = req->base.complete;
255         data = req->base.data;
256         info = req->iv;
257
258         ivsize = crypto_aead_ivsize(geniv);
259
260         if (unlikely(!IS_ALIGNED((unsigned long)info,
261                                  crypto_aead_alignmask(geniv) + 1))) {
262                 info = kmalloc(ivsize, req->base.flags &
263                                        CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
264                                                                   GFP_ATOMIC);
265                 if (!info)
266                         return -ENOMEM;
267
268                 memcpy(info, req->iv, ivsize);
269                 compl = seqiv_aead_encrypt_complete;
270                 data = req;
271         }
272
273         aead_request_set_callback(subreq, req->base.flags, compl, data);
274         aead_request_set_crypt(subreq, req->src, req->dst,
275                                req->cryptlen - ivsize, info);
276         aead_request_set_ad(subreq, req->assoclen, ivsize);
277
278         crypto_xor(info, ctx->salt, ivsize);
279         scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
280
281         err = crypto_aead_encrypt(subreq);
282         if (unlikely(info != req->iv))
283                 seqiv_aead_encrypt_complete2(req, err);
284         return err;
285 }
286
287 static int seqiv_aead_encrypt(struct aead_request *req)
288 {
289         struct crypto_aead *geniv = crypto_aead_reqtfm(req);
290         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
291         struct aead_request *subreq = aead_request_ctx(req);
292         crypto_completion_t compl;
293         void *data;
294         u8 *info;
295         unsigned int ivsize;
296         int err;
297
298         aead_request_set_tfm(subreq, ctx->child);
299
300         compl = req->base.complete;
301         data = req->base.data;
302         info = req->iv;
303
304         ivsize = crypto_aead_ivsize(geniv);
305
306         if (req->src != req->dst) {
307                 struct scatterlist src[2];
308                 struct scatterlist dst[2];
309                 struct blkcipher_desc desc = {
310                         .tfm = ctx->null,
311                 };
312
313                 err = crypto_blkcipher_encrypt(
314                         &desc,
315                         scatterwalk_ffwd(dst, req->dst,
316                                          req->assoclen + ivsize),
317                         scatterwalk_ffwd(src, req->src,
318                                          req->assoclen + ivsize),
319                         req->cryptlen - ivsize);
320                 if (err)
321                         return err;
322         }
323
324         if (unlikely(!IS_ALIGNED((unsigned long)info,
325                                  crypto_aead_alignmask(geniv) + 1))) {
326                 info = kmalloc(ivsize, req->base.flags &
327                                        CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
328                                                                   GFP_ATOMIC);
329                 if (!info)
330                         return -ENOMEM;
331
332                 memcpy(info, req->iv, ivsize);
333                 compl = seqiv_aead_encrypt_complete;
334                 data = req;
335         }
336
337         aead_request_set_callback(subreq, req->base.flags, compl, data);
338         aead_request_set_crypt(subreq, req->dst, req->dst,
339                                req->cryptlen - ivsize, info);
340         aead_request_set_ad(subreq, req->assoclen + ivsize, 0);
341
342         crypto_xor(info, ctx->salt, ivsize);
343         scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
344
345         err = crypto_aead_encrypt(subreq);
346         if (unlikely(info != req->iv))
347                 seqiv_aead_encrypt_complete2(req, err);
348         return err;
349 }
350
351 static int seqiv_aead_decrypt_compat(struct aead_request *req)
352 {
353         struct crypto_aead *geniv = crypto_aead_reqtfm(req);
354         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
355         struct aead_request *subreq = aead_request_ctx(req);
356         crypto_completion_t compl;
357         void *data;
358         unsigned int ivsize;
359
360         aead_request_set_tfm(subreq, ctx->child);
361
362         compl = req->base.complete;
363         data = req->base.data;
364
365         ivsize = crypto_aead_ivsize(geniv);
366
367         aead_request_set_callback(subreq, req->base.flags, compl, data);
368         aead_request_set_crypt(subreq, req->src, req->dst,
369                                req->cryptlen - ivsize, req->iv);
370         aead_request_set_ad(subreq, req->assoclen, ivsize);
371
372         scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
373
374         return crypto_aead_decrypt(subreq);
375 }
376
377 static int seqiv_aead_decrypt(struct aead_request *req)
378 {
379         struct crypto_aead *geniv = crypto_aead_reqtfm(req);
380         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
381         struct aead_request *subreq = aead_request_ctx(req);
382         crypto_completion_t compl;
383         void *data;
384         unsigned int ivsize;
385
386         aead_request_set_tfm(subreq, ctx->child);
387
388         compl = req->base.complete;
389         data = req->base.data;
390
391         ivsize = crypto_aead_ivsize(geniv);
392
393         aead_request_set_callback(subreq, req->base.flags, compl, data);
394         aead_request_set_crypt(subreq, req->src, req->dst,
395                                req->cryptlen - ivsize, req->iv);
396         aead_request_set_ad(subreq, req->assoclen + ivsize, 0);
397
398         scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
399         if (req->src != req->dst)
400                 scatterwalk_map_and_copy(req->iv, req->dst,
401                                          req->assoclen, ivsize, 1);
402
403         return crypto_aead_decrypt(subreq);
404 }
405
406 static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
407 {
408         struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
409         struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
410         int err = 0;
411
412         spin_lock_bh(&ctx->lock);
413         if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
414                 goto unlock;
415
416         crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
417         err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
418                                    crypto_ablkcipher_ivsize(geniv));
419
420 unlock:
421         spin_unlock_bh(&ctx->lock);
422
423         if (err)
424                 return err;
425
426         return seqiv_givencrypt(req);
427 }
428
429 static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
430 {
431         struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
432         struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
433         int err = 0;
434
435         spin_lock_bh(&ctx->lock);
436         if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
437                 goto unlock;
438
439         crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
440         err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
441                                    crypto_aead_ivsize(geniv));
442
443 unlock:
444         spin_unlock_bh(&ctx->lock);
445
446         if (err)
447                 return err;
448
449         return seqiv_aead_givencrypt(req);
450 }
451
452 static int seqiv_aead_encrypt_compat_first(struct aead_request *req)
453 {
454         struct crypto_aead *geniv = crypto_aead_reqtfm(req);
455         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
456         int err = 0;
457
458         spin_lock_bh(&ctx->lock);
459         if (geniv->encrypt != seqiv_aead_encrypt_compat_first)
460                 goto unlock;
461
462         geniv->encrypt = seqiv_aead_encrypt_compat;
463         err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
464                                    crypto_aead_ivsize(geniv));
465
466 unlock:
467         spin_unlock_bh(&ctx->lock);
468
469         if (err)
470                 return err;
471
472         return seqiv_aead_encrypt_compat(req);
473 }
474
475 static int seqiv_aead_encrypt_first(struct aead_request *req)
476 {
477         struct crypto_aead *geniv = crypto_aead_reqtfm(req);
478         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
479         int err = 0;
480
481         spin_lock_bh(&ctx->lock);
482         if (geniv->encrypt != seqiv_aead_encrypt_first)
483                 goto unlock;
484
485         geniv->encrypt = seqiv_aead_encrypt;
486         err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
487                                    crypto_aead_ivsize(geniv));
488
489 unlock:
490         spin_unlock_bh(&ctx->lock);
491
492         if (err)
493                 return err;
494
495         return seqiv_aead_encrypt(req);
496 }
497
498 static int seqiv_init(struct crypto_tfm *tfm)
499 {
500         struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
501         struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
502
503         spin_lock_init(&ctx->lock);
504
505         tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
506
507         return skcipher_geniv_init(tfm);
508 }
509
510 static int seqiv_old_aead_init(struct crypto_tfm *tfm)
511 {
512         struct crypto_aead *geniv = __crypto_aead_cast(tfm);
513         struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
514
515         spin_lock_init(&ctx->lock);
516
517         crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
518                                 sizeof(struct aead_request));
519
520         return aead_geniv_init(tfm);
521 }
522
523 static int seqiv_aead_compat_init(struct crypto_tfm *tfm)
524 {
525         struct crypto_aead *geniv = __crypto_aead_cast(tfm);
526         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
527         int err;
528
529         spin_lock_init(&ctx->lock);
530
531         crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
532
533         err = aead_geniv_init(tfm);
534
535         ctx->child = geniv->child;
536         geniv->child = geniv;
537
538         return err;
539 }
540
541 static int seqiv_aead_init(struct crypto_tfm *tfm)
542 {
543         struct crypto_aead *geniv = __crypto_aead_cast(tfm);
544         struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
545         int err;
546
547         spin_lock_init(&ctx->lock);
548
549         crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
550
551         ctx->null = crypto_get_default_null_skcipher();
552         err = PTR_ERR(ctx->null);
553         if (IS_ERR(ctx->null))
554                 goto out;
555
556         err = aead_geniv_init(tfm);
557         if (err)
558                 goto drop_null;
559
560         ctx->child = geniv->child;
561         geniv->child = geniv;
562
563 out:
564         return err;
565
566 drop_null:
567         crypto_put_default_null_skcipher();
568         goto out;
569 }
570
571 static void seqiv_aead_compat_exit(struct crypto_tfm *tfm)
572 {
573         struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
574
575         crypto_free_aead(ctx->child);
576 }
577
578 static void seqiv_aead_exit(struct crypto_tfm *tfm)
579 {
580         struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
581
582         crypto_free_aead(ctx->child);
583         crypto_put_default_null_skcipher();
584 }
585
586 static struct crypto_template seqiv_tmpl;
587 static struct crypto_template seqniv_tmpl;
588
589 static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
590 {
591         struct crypto_instance *inst;
592
593         inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
594
595         if (IS_ERR(inst))
596                 goto out;
597
598         if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) {
599                 skcipher_geniv_free(inst);
600                 inst = ERR_PTR(-EINVAL);
601                 goto out;
602         }
603
604         inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
605
606         inst->alg.cra_init = seqiv_init;
607         inst->alg.cra_exit = skcipher_geniv_exit;
608
609         inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
610         inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
611
612 out:
613         return inst;
614 }
615
616 static struct crypto_instance *seqiv_old_aead_alloc(struct aead_instance *aead)
617 {
618         struct crypto_instance *inst = aead_crypto_instance(aead);
619
620         if (inst->alg.cra_aead.ivsize < sizeof(u64)) {
621                 aead_geniv_free(aead);
622                 return ERR_PTR(-EINVAL);
623         }
624
625         inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
626
627         inst->alg.cra_init = seqiv_old_aead_init;
628         inst->alg.cra_exit = aead_geniv_exit;
629
630         inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
631         inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
632
633         return inst;
634 }
635
636 static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
637 {
638         struct aead_instance *inst;
639         struct crypto_aead_spawn *spawn;
640         struct aead_alg *alg;
641
642         inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
643
644         if (IS_ERR(inst))
645                 goto out;
646
647         if (inst->alg.base.cra_aead.encrypt)
648                 return seqiv_old_aead_alloc(inst);
649
650         if (inst->alg.ivsize < sizeof(u64)) {
651                 aead_geniv_free(inst);
652                 inst = ERR_PTR(-EINVAL);
653                 goto out;
654         }
655
656         spawn = aead_instance_ctx(inst);
657         alg = crypto_spawn_aead_alg(spawn);
658
659         inst->alg.setkey = seqiv_aead_setkey;
660         inst->alg.setauthsize = seqiv_aead_setauthsize;
661         inst->alg.encrypt = seqiv_aead_encrypt_first;
662         inst->alg.decrypt = seqiv_aead_decrypt;
663
664         inst->alg.base.cra_init = seqiv_aead_init;
665         inst->alg.base.cra_exit = seqiv_aead_exit;
666
667         inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
668         inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
669
670         if (alg->base.cra_aead.encrypt) {
671                 inst->alg.encrypt = seqiv_aead_encrypt_compat_first;
672                 inst->alg.decrypt = seqiv_aead_decrypt_compat;
673
674                 inst->alg.base.cra_init = seqiv_aead_compat_init;
675                 inst->alg.base.cra_exit = seqiv_aead_compat_exit;
676         }
677
678 out:
679         return aead_crypto_instance(inst);
680 }
681
682 static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
683 {
684         struct crypto_attr_type *algt;
685         struct crypto_instance *inst;
686         int err;
687
688         algt = crypto_get_attr_type(tb);
689         if (IS_ERR(algt))
690                 return ERR_CAST(algt);
691
692         err = crypto_get_default_rng();
693         if (err)
694                 return ERR_PTR(err);
695
696         if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
697                 inst = seqiv_ablkcipher_alloc(tb);
698         else
699                 inst = seqiv_aead_alloc(tb);
700
701         if (IS_ERR(inst))
702                 goto put_rng;
703
704         inst->alg.cra_alignmask |= __alignof__(u32) - 1;
705
706 out:
707         return inst;
708
709 put_rng:
710         crypto_put_default_rng();
711         goto out;
712 }
713
714 static struct crypto_instance *seqniv_alloc(struct rtattr **tb)
715 {
716         struct aead_instance *inst;
717         struct crypto_aead_spawn *spawn;
718         struct aead_alg *alg;
719         int err;
720
721         err = crypto_get_default_rng();
722         if (err)
723                 return ERR_PTR(err);
724
725         inst = aead_geniv_alloc(&seqniv_tmpl, tb, 0, 0);
726
727         if (IS_ERR(inst))
728                 goto put_rng;
729
730         if (inst->alg.ivsize < sizeof(u64)) {
731                 aead_geniv_free(inst);
732                 inst = ERR_PTR(-EINVAL);
733                 goto put_rng;
734         }
735
736         spawn = aead_instance_ctx(inst);
737         alg = crypto_spawn_aead_alg(spawn);
738
739         inst->alg.setkey = seqiv_aead_setkey;
740         inst->alg.setauthsize = seqiv_aead_setauthsize;
741         inst->alg.encrypt = seqiv_aead_encrypt_compat_first;
742         inst->alg.decrypt = seqiv_aead_decrypt_compat;
743
744         inst->alg.base.cra_init = seqiv_aead_compat_init;
745         inst->alg.base.cra_exit = seqiv_aead_compat_exit;
746
747         inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
748         inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
749         inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
750
751 out:
752         return aead_crypto_instance(inst);
753
754 put_rng:
755         crypto_put_default_rng();
756         goto out;
757 }
758
759 static void seqiv_free(struct crypto_instance *inst)
760 {
761         if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
762                 skcipher_geniv_free(inst);
763         else
764                 aead_geniv_free(aead_instance(inst));
765         crypto_put_default_rng();
766 }
767
768 static struct crypto_template seqiv_tmpl = {
769         .name = "seqiv",
770         .alloc = seqiv_alloc,
771         .free = seqiv_free,
772         .module = THIS_MODULE,
773 };
774
775 static struct crypto_template seqniv_tmpl = {
776         .name = "seqniv",
777         .alloc = seqniv_alloc,
778         .free = seqiv_free,
779         .module = THIS_MODULE,
780 };
781
782 static int __init seqiv_module_init(void)
783 {
784         int err;
785
786         err = crypto_register_template(&seqiv_tmpl);
787         if (err)
788                 goto out;
789
790         err = crypto_register_template(&seqniv_tmpl);
791         if (err)
792                 goto out_undo_niv;
793
794 out:
795         return err;
796
797 out_undo_niv:
798         crypto_unregister_template(&seqiv_tmpl);
799         goto out;
800 }
801
802 static void __exit seqiv_module_exit(void)
803 {
804         crypto_unregister_template(&seqiv_tmpl);
805 }
806
807 module_init(seqiv_module_init);
808 module_exit(seqiv_module_exit);
809
810 MODULE_LICENSE("GPL");
811 MODULE_DESCRIPTION("Sequence Number IV Generator");
812 MODULE_ALIAS_CRYPTO("seqiv");
813 MODULE_ALIAS_CRYPTO("seqniv");