OSDN Git Service

X.509: unpack RSA signatureValue field from BIT STRING
[android-x86/kernel.git] / crypto / ahash.c
1 /*
2  * Asynchronous Cryptographic Hash operations.
3  *
4  * This is the asynchronous version of hash.c with notification of
5  * completion via a callback.
6  *
7  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31         crypto_completion_t complete;
32         void *data;
33         u8 *result;
34         u32 flags;
35         void *ubuf[] CRYPTO_MINALIGN_ATTR;
36 };
37
38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
39 {
40         return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41                             halg);
42 }
43
44 static int hash_walk_next(struct crypto_hash_walk *walk)
45 {
46         unsigned int alignmask = walk->alignmask;
47         unsigned int offset = walk->offset;
48         unsigned int nbytes = min(walk->entrylen,
49                                   ((unsigned int)(PAGE_SIZE)) - offset);
50
51         if (walk->flags & CRYPTO_ALG_ASYNC)
52                 walk->data = kmap(walk->pg);
53         else
54                 walk->data = kmap_atomic(walk->pg);
55         walk->data += offset;
56
57         if (offset & alignmask) {
58                 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59
60                 if (nbytes > unaligned)
61                         nbytes = unaligned;
62         }
63
64         walk->entrylen -= nbytes;
65         return nbytes;
66 }
67
68 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
69 {
70         struct scatterlist *sg;
71
72         sg = walk->sg;
73         walk->offset = sg->offset;
74         walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
75         walk->offset = offset_in_page(walk->offset);
76         walk->entrylen = sg->length;
77
78         if (walk->entrylen > walk->total)
79                 walk->entrylen = walk->total;
80         walk->total -= walk->entrylen;
81
82         return hash_walk_next(walk);
83 }
84
85 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
86 {
87         unsigned int alignmask = walk->alignmask;
88         unsigned int nbytes = walk->entrylen;
89
90         walk->data -= walk->offset;
91
92         if (nbytes && walk->offset & alignmask && !err) {
93                 walk->offset = ALIGN(walk->offset, alignmask + 1);
94                 nbytes = min(nbytes,
95                              ((unsigned int)(PAGE_SIZE)) - walk->offset);
96                 walk->entrylen -= nbytes;
97
98                 if (nbytes) {
99                         walk->data += walk->offset;
100                         return nbytes;
101                 }
102         }
103
104         if (walk->flags & CRYPTO_ALG_ASYNC)
105                 kunmap(walk->pg);
106         else {
107                 kunmap_atomic(walk->data);
108                 /*
109                  * The may sleep test only makes sense for sync users.
110                  * Async users don't need to sleep here anyway.
111                  */
112                 crypto_yield(walk->flags);
113         }
114
115         if (err)
116                 return err;
117
118         if (nbytes) {
119                 walk->offset = 0;
120                 walk->pg++;
121                 return hash_walk_next(walk);
122         }
123
124         if (!walk->total)
125                 return 0;
126
127         walk->sg = sg_next(walk->sg);
128
129         return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
132
133 int crypto_hash_walk_first(struct ahash_request *req,
134                            struct crypto_hash_walk *walk)
135 {
136         walk->total = req->nbytes;
137
138         if (!walk->total) {
139                 walk->entrylen = 0;
140                 return 0;
141         }
142
143         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
144         walk->sg = req->src;
145         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
146
147         return hash_walk_new_entry(walk);
148 }
149 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
150
151 int crypto_ahash_walk_first(struct ahash_request *req,
152                             struct crypto_hash_walk *walk)
153 {
154         walk->total = req->nbytes;
155
156         if (!walk->total) {
157                 walk->entrylen = 0;
158                 return 0;
159         }
160
161         walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
162         walk->sg = req->src;
163         walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
164         walk->flags |= CRYPTO_ALG_ASYNC;
165
166         BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
167
168         return hash_walk_new_entry(walk);
169 }
170 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
171
172 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
173                                 unsigned int keylen)
174 {
175         unsigned long alignmask = crypto_ahash_alignmask(tfm);
176         int ret;
177         u8 *buffer, *alignbuffer;
178         unsigned long absize;
179
180         absize = keylen + alignmask;
181         buffer = kmalloc(absize, GFP_KERNEL);
182         if (!buffer)
183                 return -ENOMEM;
184
185         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
186         memcpy(alignbuffer, key, keylen);
187         ret = tfm->setkey(tfm, alignbuffer, keylen);
188         kzfree(buffer);
189         return ret;
190 }
191
192 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
193                         unsigned int keylen)
194 {
195         unsigned long alignmask = crypto_ahash_alignmask(tfm);
196         int err;
197
198         if ((unsigned long)key & alignmask)
199                 err = ahash_setkey_unaligned(tfm, key, keylen);
200         else
201                 err = tfm->setkey(tfm, key, keylen);
202
203         if (err)
204                 return err;
205
206         crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
207         return 0;
208 }
209 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
210
211 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
212                           unsigned int keylen)
213 {
214         return -ENOSYS;
215 }
216
217 static inline unsigned int ahash_align_buffer_size(unsigned len,
218                                                    unsigned long mask)
219 {
220         return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
221 }
222
223 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
224 {
225         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
226         unsigned long alignmask = crypto_ahash_alignmask(tfm);
227         unsigned int ds = crypto_ahash_digestsize(tfm);
228         struct ahash_request_priv *priv;
229
230         priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
231                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
232                        GFP_KERNEL : GFP_ATOMIC);
233         if (!priv)
234                 return -ENOMEM;
235
236         /*
237          * WARNING: Voodoo programming below!
238          *
239          * The code below is obscure and hard to understand, thus explanation
240          * is necessary. See include/crypto/hash.h and include/linux/crypto.h
241          * to understand the layout of structures used here!
242          *
243          * The code here will replace portions of the ORIGINAL request with
244          * pointers to new code and buffers so the hashing operation can store
245          * the result in aligned buffer. We will call the modified request
246          * an ADJUSTED request.
247          *
248          * The newly mangled request will look as such:
249          *
250          * req {
251          *   .result        = ADJUSTED[new aligned buffer]
252          *   .base.complete = ADJUSTED[pointer to completion function]
253          *   .base.data     = ADJUSTED[*req (pointer to self)]
254          *   .priv          = ADJUSTED[new priv] {
255          *           .result   = ORIGINAL(result)
256          *           .complete = ORIGINAL(base.complete)
257          *           .data     = ORIGINAL(base.data)
258          *   }
259          */
260
261         priv->result = req->result;
262         priv->complete = req->base.complete;
263         priv->data = req->base.data;
264         priv->flags = req->base.flags;
265
266         /*
267          * WARNING: We do not backup req->priv here! The req->priv
268          *          is for internal use of the Crypto API and the
269          *          user must _NOT_ _EVER_ depend on it's content!
270          */
271
272         req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
273         req->base.complete = cplt;
274         req->base.data = req;
275         req->priv = priv;
276
277         return 0;
278 }
279
280 static void ahash_restore_req(struct ahash_request *req, int err)
281 {
282         struct ahash_request_priv *priv = req->priv;
283
284         if (!err)
285                 memcpy(priv->result, req->result,
286                        crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
287
288         /* Restore the original crypto request. */
289         req->result = priv->result;
290
291         ahash_request_set_callback(req, priv->flags,
292                                    priv->complete, priv->data);
293         req->priv = NULL;
294
295         /* Free the req->priv.priv from the ADJUSTED request. */
296         kzfree(priv);
297 }
298
299 static void ahash_notify_einprogress(struct ahash_request *req)
300 {
301         struct ahash_request_priv *priv = req->priv;
302         struct crypto_async_request oreq;
303
304         oreq.data = priv->data;
305
306         priv->complete(&oreq, -EINPROGRESS);
307 }
308
309 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
310 {
311         struct ahash_request *areq = req->data;
312
313         if (err == -EINPROGRESS) {
314                 ahash_notify_einprogress(areq);
315                 return;
316         }
317
318         /*
319          * Restore the original request, see ahash_op_unaligned() for what
320          * goes where.
321          *
322          * The "struct ahash_request *req" here is in fact the "req.base"
323          * from the ADJUSTED request from ahash_op_unaligned(), thus as it
324          * is a pointer to self, it is also the ADJUSTED "req" .
325          */
326
327         /* First copy req->result into req->priv.result */
328         ahash_restore_req(areq, err);
329
330         /* Complete the ORIGINAL request. */
331         areq->base.complete(&areq->base, err);
332 }
333
334 static int ahash_op_unaligned(struct ahash_request *req,
335                               int (*op)(struct ahash_request *))
336 {
337         int err;
338
339         err = ahash_save_req(req, ahash_op_unaligned_done);
340         if (err)
341                 return err;
342
343         err = op(req);
344         if (err == -EINPROGRESS ||
345             (err == -EBUSY && (ahash_request_flags(req) &
346                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
347                 return err;
348
349         ahash_restore_req(req, err);
350
351         return err;
352 }
353
354 static int crypto_ahash_op(struct ahash_request *req,
355                            int (*op)(struct ahash_request *))
356 {
357         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
358         unsigned long alignmask = crypto_ahash_alignmask(tfm);
359
360         if ((unsigned long)req->result & alignmask)
361                 return ahash_op_unaligned(req, op);
362
363         return op(req);
364 }
365
366 int crypto_ahash_final(struct ahash_request *req)
367 {
368         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
369 }
370 EXPORT_SYMBOL_GPL(crypto_ahash_final);
371
372 int crypto_ahash_finup(struct ahash_request *req)
373 {
374         return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
375 }
376 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
377
378 int crypto_ahash_digest(struct ahash_request *req)
379 {
380         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
381
382         if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
383                 return -ENOKEY;
384
385         return crypto_ahash_op(req, tfm->digest);
386 }
387 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
388
389 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
390 {
391         struct ahash_request *areq = req->data;
392
393         if (err == -EINPROGRESS)
394                 return;
395
396         ahash_restore_req(areq, err);
397
398         areq->base.complete(&areq->base, err);
399 }
400
401 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
402 {
403         if (err)
404                 goto out;
405
406         req->base.complete = ahash_def_finup_done2;
407
408         err = crypto_ahash_reqtfm(req)->final(req);
409         if (err == -EINPROGRESS ||
410             (err == -EBUSY && (ahash_request_flags(req) &
411                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
412                 return err;
413
414 out:
415         ahash_restore_req(req, err);
416         return err;
417 }
418
419 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
420 {
421         struct ahash_request *areq = req->data;
422
423         if (err == -EINPROGRESS) {
424                 ahash_notify_einprogress(areq);
425                 return;
426         }
427
428         areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
429
430         err = ahash_def_finup_finish1(areq, err);
431         if (areq->priv)
432                 return;
433
434         areq->base.complete(&areq->base, err);
435 }
436
437 static int ahash_def_finup(struct ahash_request *req)
438 {
439         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
440         int err;
441
442         err = ahash_save_req(req, ahash_def_finup_done1);
443         if (err)
444                 return err;
445
446         err = tfm->update(req);
447         if (err == -EINPROGRESS ||
448             (err == -EBUSY && (ahash_request_flags(req) &
449                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
450                 return err;
451
452         return ahash_def_finup_finish1(req, err);
453 }
454
455 static int ahash_no_export(struct ahash_request *req, void *out)
456 {
457         return -ENOSYS;
458 }
459
460 static int ahash_no_import(struct ahash_request *req, const void *in)
461 {
462         return -ENOSYS;
463 }
464
465 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
466 {
467         struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
468         struct ahash_alg *alg = crypto_ahash_alg(hash);
469
470         hash->setkey = ahash_nosetkey;
471         hash->export = ahash_no_export;
472         hash->import = ahash_no_import;
473
474         if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
475                 return crypto_init_shash_ops_async(tfm);
476
477         hash->init = alg->init;
478         hash->update = alg->update;
479         hash->final = alg->final;
480         hash->finup = alg->finup ?: ahash_def_finup;
481         hash->digest = alg->digest;
482
483         if (alg->setkey) {
484                 hash->setkey = alg->setkey;
485                 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
486                         crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
487         }
488         if (alg->export)
489                 hash->export = alg->export;
490         if (alg->import)
491                 hash->import = alg->import;
492
493         return 0;
494 }
495
496 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
497 {
498         if (alg->cra_type != &crypto_ahash_type)
499                 return sizeof(struct crypto_shash *);
500
501         return crypto_alg_extsize(alg);
502 }
503
504 #ifdef CONFIG_NET
505 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
506 {
507         struct crypto_report_hash rhash;
508
509         strncpy(rhash.type, "ahash", sizeof(rhash.type));
510
511         rhash.blocksize = alg->cra_blocksize;
512         rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
513
514         if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
515                     sizeof(struct crypto_report_hash), &rhash))
516                 goto nla_put_failure;
517         return 0;
518
519 nla_put_failure:
520         return -EMSGSIZE;
521 }
522 #else
523 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
524 {
525         return -ENOSYS;
526 }
527 #endif
528
529 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
530         __attribute__ ((unused));
531 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
532 {
533         seq_printf(m, "type         : ahash\n");
534         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
535                                              "yes" : "no");
536         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
537         seq_printf(m, "digestsize   : %u\n",
538                    __crypto_hash_alg_common(alg)->digestsize);
539 }
540
541 const struct crypto_type crypto_ahash_type = {
542         .extsize = crypto_ahash_extsize,
543         .init_tfm = crypto_ahash_init_tfm,
544 #ifdef CONFIG_PROC_FS
545         .show = crypto_ahash_show,
546 #endif
547         .report = crypto_ahash_report,
548         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
549         .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
550         .type = CRYPTO_ALG_TYPE_AHASH,
551         .tfmsize = offsetof(struct crypto_ahash, base),
552 };
553 EXPORT_SYMBOL_GPL(crypto_ahash_type);
554
555 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
556                                         u32 mask)
557 {
558         return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
559 }
560 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
561
562 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
563 {
564         return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
565 }
566 EXPORT_SYMBOL_GPL(crypto_has_ahash);
567
568 static int ahash_prepare_alg(struct ahash_alg *alg)
569 {
570         struct crypto_alg *base = &alg->halg.base;
571
572         if (alg->halg.digestsize > PAGE_SIZE / 8 ||
573             alg->halg.statesize > PAGE_SIZE / 8 ||
574             alg->halg.statesize == 0)
575                 return -EINVAL;
576
577         base->cra_type = &crypto_ahash_type;
578         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
579         base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
580
581         return 0;
582 }
583
584 int crypto_register_ahash(struct ahash_alg *alg)
585 {
586         struct crypto_alg *base = &alg->halg.base;
587         int err;
588
589         err = ahash_prepare_alg(alg);
590         if (err)
591                 return err;
592
593         return crypto_register_alg(base);
594 }
595 EXPORT_SYMBOL_GPL(crypto_register_ahash);
596
597 int crypto_unregister_ahash(struct ahash_alg *alg)
598 {
599         return crypto_unregister_alg(&alg->halg.base);
600 }
601 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
602
603 int ahash_register_instance(struct crypto_template *tmpl,
604                             struct ahash_instance *inst)
605 {
606         int err;
607
608         err = ahash_prepare_alg(&inst->alg);
609         if (err)
610                 return err;
611
612         return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
613 }
614 EXPORT_SYMBOL_GPL(ahash_register_instance);
615
616 void ahash_free_instance(struct crypto_instance *inst)
617 {
618         crypto_drop_spawn(crypto_instance_ctx(inst));
619         kfree(ahash_instance(inst));
620 }
621 EXPORT_SYMBOL_GPL(ahash_free_instance);
622
623 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
624                             struct hash_alg_common *alg,
625                             struct crypto_instance *inst)
626 {
627         return crypto_init_spawn2(&spawn->base, &alg->base, inst,
628                                   &crypto_ahash_type);
629 }
630 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
631
632 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
633 {
634         struct crypto_alg *alg;
635
636         alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
637         return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
638 }
639 EXPORT_SYMBOL_GPL(ahash_attr_alg);
640
641 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
642 {
643         struct crypto_alg *alg = &halg->base;
644
645         if (alg->cra_type != &crypto_ahash_type)
646                 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
647
648         return __crypto_ahash_alg(alg)->setkey != NULL;
649 }
650 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
651
652 MODULE_LICENSE("GPL");
653 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");