OSDN Git Service

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[uclinux-h8/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 #define AESNI_ALIGN     16
48 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56         u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57         struct crypto_aes_ctx aes_key_expanded
58                 __attribute__ ((__aligned__(AESNI_ALIGN)));
59         u8 nonce[4];
60 };
61
62 struct aesni_gcm_set_hash_subkey_result {
63         int err;
64         struct completion completion;
65 };
66
67 struct aesni_hash_subkey_req_data {
68         u8 iv[16];
69         struct aesni_gcm_set_hash_subkey_result result;
70         struct scatterlist sg;
71 };
72
73 struct aesni_lrw_ctx {
74         struct lrw_table_ctx lrw_table;
75         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76 };
77
78 struct aesni_xts_ctx {
79         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 };
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                           const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                           const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
100
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103
104 #ifdef CONFIG_X86_64
105
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112                                  const u8 *in, bool enc, u8 *iv);
113
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131                         const u8 *in, unsigned long plaintext_len, u8 *iv,
132                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133                         u8 *auth_tag, unsigned long auth_tag_len);
134
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155
156
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159                 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161                 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163                 void *keys, u8 *out, unsigned int num_bytes);
164 /*
165  * asmlinkage void aesni_gcm_precomp_avx_gen2()
166  * gcm_data *my_ctx_data, context data
167  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
168  */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172                         const u8 *in, unsigned long plaintext_len, u8 *iv,
173                         const u8 *aad, unsigned long aad_len,
174                         u8 *auth_tag, unsigned long auth_tag_len);
175
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
178                         const u8 *aad, unsigned long aad_len,
179                         u8 *auth_tag, unsigned long auth_tag_len);
180
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182                         const u8 *in, unsigned long plaintext_len, u8 *iv,
183                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184                         u8 *auth_tag, unsigned long auth_tag_len)
185 {
186         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189                                 aad_len, auth_tag, auth_tag_len);
190         } else {
191                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193                                         aad_len, auth_tag, auth_tag_len);
194         }
195 }
196
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
199                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200                         u8 *auth_tag, unsigned long auth_tag_len)
201 {
202         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205                                 aad_len, auth_tag, auth_tag_len);
206         } else {
207                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209                                         aad_len, auth_tag, auth_tag_len);
210         }
211 }
212 #endif
213
214 #ifdef CONFIG_AS_AVX2
215 /*
216  * asmlinkage void aesni_gcm_precomp_avx_gen4()
217  * gcm_data *my_ctx_data, context data
218  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
219  */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223                         const u8 *in, unsigned long plaintext_len, u8 *iv,
224                         const u8 *aad, unsigned long aad_len,
225                         u8 *auth_tag, unsigned long auth_tag_len);
226
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
229                         const u8 *aad, unsigned long aad_len,
230                         u8 *auth_tag, unsigned long auth_tag_len);
231
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233                         const u8 *in, unsigned long plaintext_len, u8 *iv,
234                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235                         u8 *auth_tag, unsigned long auth_tag_len)
236 {
237        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240                                 aad_len, auth_tag, auth_tag_len);
241         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244                                         aad_len, auth_tag, auth_tag_len);
245         } else {
246                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248                                         aad_len, auth_tag, auth_tag_len);
249         }
250 }
251
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
254                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255                         u8 *auth_tag, unsigned long auth_tag_len)
256 {
257        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260                                 aad, aad_len, auth_tag, auth_tag_len);
261         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264                                         aad_len, auth_tag, auth_tag_len);
265         } else {
266                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268                                         aad_len, auth_tag, auth_tag_len);
269         }
270 }
271 #endif
272
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274                         const u8 *in, unsigned long plaintext_len, u8 *iv,
275                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
280                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281                         u8 *auth_tag, unsigned long auth_tag_len);
282
283 static inline struct
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286         unsigned long align = AESNI_ALIGN;
287
288         if (align <= crypto_tfm_ctx_alignment())
289                 align = 1;
290         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
291 }
292 #endif
293
294 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
295 {
296         unsigned long addr = (unsigned long)raw_ctx;
297         unsigned long align = AESNI_ALIGN;
298
299         if (align <= crypto_tfm_ctx_alignment())
300                 align = 1;
301         return (struct crypto_aes_ctx *)ALIGN(addr, align);
302 }
303
304 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305                               const u8 *in_key, unsigned int key_len)
306 {
307         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308         u32 *flags = &tfm->crt_flags;
309         int err;
310
311         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312             key_len != AES_KEYSIZE_256) {
313                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
314                 return -EINVAL;
315         }
316
317         if (!irq_fpu_usable())
318                 err = crypto_aes_expand_key(ctx, in_key, key_len);
319         else {
320                 kernel_fpu_begin();
321                 err = aesni_set_key(ctx, in_key, key_len);
322                 kernel_fpu_end();
323         }
324
325         return err;
326 }
327
328 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329                        unsigned int key_len)
330 {
331         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
332 }
333
334 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
335 {
336         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
337
338         if (!irq_fpu_usable())
339                 crypto_aes_encrypt_x86(ctx, dst, src);
340         else {
341                 kernel_fpu_begin();
342                 aesni_enc(ctx, dst, src);
343                 kernel_fpu_end();
344         }
345 }
346
347 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348 {
349         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350
351         if (!irq_fpu_usable())
352                 crypto_aes_decrypt_x86(ctx, dst, src);
353         else {
354                 kernel_fpu_begin();
355                 aesni_dec(ctx, dst, src);
356                 kernel_fpu_end();
357         }
358 }
359
360 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361 {
362         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363
364         aesni_enc(ctx, dst, src);
365 }
366
367 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
368 {
369         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
370
371         aesni_dec(ctx, dst, src);
372 }
373
374 static int ecb_encrypt(struct blkcipher_desc *desc,
375                        struct scatterlist *dst, struct scatterlist *src,
376                        unsigned int nbytes)
377 {
378         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379         struct blkcipher_walk walk;
380         int err;
381
382         blkcipher_walk_init(&walk, dst, src, nbytes);
383         err = blkcipher_walk_virt(desc, &walk);
384         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
385
386         kernel_fpu_begin();
387         while ((nbytes = walk.nbytes)) {
388                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389                               nbytes & AES_BLOCK_MASK);
390                 nbytes &= AES_BLOCK_SIZE - 1;
391                 err = blkcipher_walk_done(desc, &walk, nbytes);
392         }
393         kernel_fpu_end();
394
395         return err;
396 }
397
398 static int ecb_decrypt(struct blkcipher_desc *desc,
399                        struct scatterlist *dst, struct scatterlist *src,
400                        unsigned int nbytes)
401 {
402         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403         struct blkcipher_walk walk;
404         int err;
405
406         blkcipher_walk_init(&walk, dst, src, nbytes);
407         err = blkcipher_walk_virt(desc, &walk);
408         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
409
410         kernel_fpu_begin();
411         while ((nbytes = walk.nbytes)) {
412                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413                               nbytes & AES_BLOCK_MASK);
414                 nbytes &= AES_BLOCK_SIZE - 1;
415                 err = blkcipher_walk_done(desc, &walk, nbytes);
416         }
417         kernel_fpu_end();
418
419         return err;
420 }
421
422 static int cbc_encrypt(struct blkcipher_desc *desc,
423                        struct scatterlist *dst, struct scatterlist *src,
424                        unsigned int nbytes)
425 {
426         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427         struct blkcipher_walk walk;
428         int err;
429
430         blkcipher_walk_init(&walk, dst, src, nbytes);
431         err = blkcipher_walk_virt(desc, &walk);
432         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433
434         kernel_fpu_begin();
435         while ((nbytes = walk.nbytes)) {
436                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK, walk.iv);
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = blkcipher_walk_done(desc, &walk, nbytes);
440         }
441         kernel_fpu_end();
442
443         return err;
444 }
445
446 static int cbc_decrypt(struct blkcipher_desc *desc,
447                        struct scatterlist *dst, struct scatterlist *src,
448                        unsigned int nbytes)
449 {
450         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451         struct blkcipher_walk walk;
452         int err;
453
454         blkcipher_walk_init(&walk, dst, src, nbytes);
455         err = blkcipher_walk_virt(desc, &walk);
456         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
457
458         kernel_fpu_begin();
459         while ((nbytes = walk.nbytes)) {
460                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461                               nbytes & AES_BLOCK_MASK, walk.iv);
462                 nbytes &= AES_BLOCK_SIZE - 1;
463                 err = blkcipher_walk_done(desc, &walk, nbytes);
464         }
465         kernel_fpu_end();
466
467         return err;
468 }
469
470 #ifdef CONFIG_X86_64
471 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472                             struct blkcipher_walk *walk)
473 {
474         u8 *ctrblk = walk->iv;
475         u8 keystream[AES_BLOCK_SIZE];
476         u8 *src = walk->src.virt.addr;
477         u8 *dst = walk->dst.virt.addr;
478         unsigned int nbytes = walk->nbytes;
479
480         aesni_enc(ctx, keystream, ctrblk);
481         crypto_xor(keystream, src, nbytes);
482         memcpy(dst, keystream, nbytes);
483         crypto_inc(ctrblk, AES_BLOCK_SIZE);
484 }
485
486 #ifdef CONFIG_AS_AVX
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488                               const u8 *in, unsigned int len, u8 *iv)
489 {
490         /*
491          * based on key length, override with the by8 version
492          * of ctr mode encryption/decryption for improved performance
493          * aes_set_key_common() ensures that key length is one of
494          * {128,192,256}
495          */
496         if (ctx->key_length == AES_KEYSIZE_128)
497                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498         else if (ctx->key_length == AES_KEYSIZE_192)
499                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
500         else
501                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
502 }
503 #endif
504
505 static int ctr_crypt(struct blkcipher_desc *desc,
506                      struct scatterlist *dst, struct scatterlist *src,
507                      unsigned int nbytes)
508 {
509         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510         struct blkcipher_walk walk;
511         int err;
512
513         blkcipher_walk_init(&walk, dst, src, nbytes);
514         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
516
517         kernel_fpu_begin();
518         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
519                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
520                                       nbytes & AES_BLOCK_MASK, walk.iv);
521                 nbytes &= AES_BLOCK_SIZE - 1;
522                 err = blkcipher_walk_done(desc, &walk, nbytes);
523         }
524         if (walk.nbytes) {
525                 ctr_crypt_final(ctx, &walk);
526                 err = blkcipher_walk_done(desc, &walk, 0);
527         }
528         kernel_fpu_end();
529
530         return err;
531 }
532 #endif
533
534 static int ablk_ecb_init(struct crypto_tfm *tfm)
535 {
536         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
537 }
538
539 static int ablk_cbc_init(struct crypto_tfm *tfm)
540 {
541         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
542 }
543
544 #ifdef CONFIG_X86_64
545 static int ablk_ctr_init(struct crypto_tfm *tfm)
546 {
547         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
548 }
549
550 #endif
551
552 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
553 static int ablk_pcbc_init(struct crypto_tfm *tfm)
554 {
555         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
556 }
557 #endif
558
559 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
560 {
561         aesni_ecb_enc(ctx, blks, blks, nbytes);
562 }
563
564 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
565 {
566         aesni_ecb_dec(ctx, blks, blks, nbytes);
567 }
568
569 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
570                             unsigned int keylen)
571 {
572         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
573         int err;
574
575         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
576                                  keylen - AES_BLOCK_SIZE);
577         if (err)
578                 return err;
579
580         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
581 }
582
583 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
584 {
585         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
586
587         lrw_free_table(&ctx->lrw_table);
588 }
589
590 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
591                        struct scatterlist *src, unsigned int nbytes)
592 {
593         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
594         be128 buf[8];
595         struct lrw_crypt_req req = {
596                 .tbuf = buf,
597                 .tbuflen = sizeof(buf),
598
599                 .table_ctx = &ctx->lrw_table,
600                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
601                 .crypt_fn = lrw_xts_encrypt_callback,
602         };
603         int ret;
604
605         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
606
607         kernel_fpu_begin();
608         ret = lrw_crypt(desc, dst, src, nbytes, &req);
609         kernel_fpu_end();
610
611         return ret;
612 }
613
614 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
615                        struct scatterlist *src, unsigned int nbytes)
616 {
617         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
618         be128 buf[8];
619         struct lrw_crypt_req req = {
620                 .tbuf = buf,
621                 .tbuflen = sizeof(buf),
622
623                 .table_ctx = &ctx->lrw_table,
624                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
625                 .crypt_fn = lrw_xts_decrypt_callback,
626         };
627         int ret;
628
629         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
630
631         kernel_fpu_begin();
632         ret = lrw_crypt(desc, dst, src, nbytes, &req);
633         kernel_fpu_end();
634
635         return ret;
636 }
637
638 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
639                             unsigned int keylen)
640 {
641         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
642         u32 *flags = &tfm->crt_flags;
643         int err;
644
645         /* key consists of keys of equal size concatenated, therefore
646          * the length must be even
647          */
648         if (keylen % 2) {
649                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
650                 return -EINVAL;
651         }
652
653         /* first half of xts-key is for crypt */
654         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
655         if (err)
656                 return err;
657
658         /* second half of xts-key is for tweak */
659         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
660                                   keylen / 2);
661 }
662
663
664 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
665 {
666         aesni_enc(ctx, out, in);
667 }
668
669 #ifdef CONFIG_X86_64
670
671 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
672 {
673         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
674 }
675
676 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
677 {
678         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
679 }
680
681 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
682 {
683         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
684 }
685
686 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
687 {
688         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
689 }
690
691 static const struct common_glue_ctx aesni_enc_xts = {
692         .num_funcs = 2,
693         .fpu_blocks_limit = 1,
694
695         .funcs = { {
696                 .num_blocks = 8,
697                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
698         }, {
699                 .num_blocks = 1,
700                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
701         } }
702 };
703
704 static const struct common_glue_ctx aesni_dec_xts = {
705         .num_funcs = 2,
706         .fpu_blocks_limit = 1,
707
708         .funcs = { {
709                 .num_blocks = 8,
710                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
711         }, {
712                 .num_blocks = 1,
713                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
714         } }
715 };
716
717 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
718                        struct scatterlist *src, unsigned int nbytes)
719 {
720         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
721
722         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
723                                      XTS_TWEAK_CAST(aesni_xts_tweak),
724                                      aes_ctx(ctx->raw_tweak_ctx),
725                                      aes_ctx(ctx->raw_crypt_ctx));
726 }
727
728 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
729                        struct scatterlist *src, unsigned int nbytes)
730 {
731         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
732
733         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
734                                      XTS_TWEAK_CAST(aesni_xts_tweak),
735                                      aes_ctx(ctx->raw_tweak_ctx),
736                                      aes_ctx(ctx->raw_crypt_ctx));
737 }
738
739 #else
740
741 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
742                        struct scatterlist *src, unsigned int nbytes)
743 {
744         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
745         be128 buf[8];
746         struct xts_crypt_req req = {
747                 .tbuf = buf,
748                 .tbuflen = sizeof(buf),
749
750                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
751                 .tweak_fn = aesni_xts_tweak,
752                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
753                 .crypt_fn = lrw_xts_encrypt_callback,
754         };
755         int ret;
756
757         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
758
759         kernel_fpu_begin();
760         ret = xts_crypt(desc, dst, src, nbytes, &req);
761         kernel_fpu_end();
762
763         return ret;
764 }
765
766 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
767                        struct scatterlist *src, unsigned int nbytes)
768 {
769         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
770         be128 buf[8];
771         struct xts_crypt_req req = {
772                 .tbuf = buf,
773                 .tbuflen = sizeof(buf),
774
775                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
776                 .tweak_fn = aesni_xts_tweak,
777                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
778                 .crypt_fn = lrw_xts_decrypt_callback,
779         };
780         int ret;
781
782         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
783
784         kernel_fpu_begin();
785         ret = xts_crypt(desc, dst, src, nbytes, &req);
786         kernel_fpu_end();
787
788         return ret;
789 }
790
791 #endif
792
793 #ifdef CONFIG_X86_64
794 static int rfc4106_init(struct crypto_aead *aead)
795 {
796         struct cryptd_aead *cryptd_tfm;
797         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
798
799         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
800                                        CRYPTO_ALG_INTERNAL,
801                                        CRYPTO_ALG_INTERNAL);
802         if (IS_ERR(cryptd_tfm))
803                 return PTR_ERR(cryptd_tfm);
804
805         *ctx = cryptd_tfm;
806         crypto_aead_set_reqsize(
807                 aead,
808                 sizeof(struct aead_request) +
809                 crypto_aead_reqsize(&cryptd_tfm->base));
810         return 0;
811 }
812
813 static void rfc4106_exit(struct crypto_aead *aead)
814 {
815         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
816
817         cryptd_free_aead(*ctx);
818 }
819
820 static void
821 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
822 {
823         struct aesni_gcm_set_hash_subkey_result *result = req->data;
824
825         if (err == -EINPROGRESS)
826                 return;
827         result->err = err;
828         complete(&result->completion);
829 }
830
831 static int
832 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
833 {
834         struct crypto_ablkcipher *ctr_tfm;
835         struct ablkcipher_request *req;
836         int ret = -EINVAL;
837         struct aesni_hash_subkey_req_data *req_data;
838
839         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
840         if (IS_ERR(ctr_tfm))
841                 return PTR_ERR(ctr_tfm);
842
843         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
844         if (ret)
845                 goto out_free_ablkcipher;
846
847         ret = -ENOMEM;
848         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
849         if (!req)
850                 goto out_free_ablkcipher;
851
852         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
853         if (!req_data)
854                 goto out_free_request;
855
856         memset(req_data->iv, 0, sizeof(req_data->iv));
857
858         /* Clear the data in the hash sub key container to zero.*/
859         /* We want to cipher all zeros to create the hash sub key. */
860         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
861
862         init_completion(&req_data->result.completion);
863         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
864         ablkcipher_request_set_tfm(req, ctr_tfm);
865         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
866                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
867                                         rfc4106_set_hash_subkey_done,
868                                         &req_data->result);
869
870         ablkcipher_request_set_crypt(req, &req_data->sg,
871                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
872
873         ret = crypto_ablkcipher_encrypt(req);
874         if (ret == -EINPROGRESS || ret == -EBUSY) {
875                 ret = wait_for_completion_interruptible
876                         (&req_data->result.completion);
877                 if (!ret)
878                         ret = req_data->result.err;
879         }
880         kfree(req_data);
881 out_free_request:
882         ablkcipher_request_free(req);
883 out_free_ablkcipher:
884         crypto_free_ablkcipher(ctr_tfm);
885         return ret;
886 }
887
888 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
889                                   unsigned int key_len)
890 {
891         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
892
893         if (key_len < 4) {
894                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
895                 return -EINVAL;
896         }
897         /*Account for 4 byte nonce at the end.*/
898         key_len -= 4;
899
900         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
901
902         return aes_set_key_common(crypto_aead_tfm(aead),
903                                   &ctx->aes_key_expanded, key, key_len) ?:
904                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
905 }
906
907 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
908                            unsigned int key_len)
909 {
910         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
911         struct cryptd_aead *cryptd_tfm = *ctx;
912
913         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
914 }
915
916 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
917                                        unsigned int authsize)
918 {
919         switch (authsize) {
920         case 8:
921         case 12:
922         case 16:
923                 break;
924         default:
925                 return -EINVAL;
926         }
927
928         return 0;
929 }
930
931 /* This is the Integrity Check Value (aka the authentication tag length and can
932  * be 8, 12 or 16 bytes long. */
933 static int rfc4106_set_authsize(struct crypto_aead *parent,
934                                 unsigned int authsize)
935 {
936         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
937         struct cryptd_aead *cryptd_tfm = *ctx;
938
939         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
940 }
941
942 static int helper_rfc4106_encrypt(struct aead_request *req)
943 {
944         u8 one_entry_in_sg = 0;
945         u8 *src, *dst, *assoc;
946         __be32 counter = cpu_to_be32(1);
947         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
948         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
949         void *aes_ctx = &(ctx->aes_key_expanded);
950         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
951         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
952         struct scatter_walk src_sg_walk;
953         struct scatter_walk dst_sg_walk;
954         unsigned int i;
955
956         /* Assuming we are supporting rfc4106 64-bit extended */
957         /* sequence numbers We need to have the AAD length equal */
958         /* to 8 or 12 bytes */
959         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
960                 return -EINVAL;
961
962         /* IV below built */
963         for (i = 0; i < 4; i++)
964                 *(iv+i) = ctx->nonce[i];
965         for (i = 0; i < 8; i++)
966                 *(iv+4+i) = req->iv[i];
967         *((__be32 *)(iv+12)) = counter;
968
969         if (sg_is_last(req->src) &&
970             req->src->offset + req->src->length <= PAGE_SIZE &&
971             sg_is_last(req->dst) &&
972             req->dst->offset + req->dst->length <= PAGE_SIZE) {
973                 one_entry_in_sg = 1;
974                 scatterwalk_start(&src_sg_walk, req->src);
975                 assoc = scatterwalk_map(&src_sg_walk);
976                 src = assoc + req->assoclen;
977                 dst = src;
978                 if (unlikely(req->src != req->dst)) {
979                         scatterwalk_start(&dst_sg_walk, req->dst);
980                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
981                 }
982         } else {
983                 /* Allocate memory for src, dst, assoc */
984                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
985                         GFP_ATOMIC);
986                 if (unlikely(!assoc))
987                         return -ENOMEM;
988                 scatterwalk_map_and_copy(assoc, req->src, 0,
989                                          req->assoclen + req->cryptlen, 0);
990                 src = assoc + req->assoclen;
991                 dst = src;
992         }
993
994         kernel_fpu_begin();
995         aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
996                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
997                 + ((unsigned long)req->cryptlen), auth_tag_len);
998         kernel_fpu_end();
999
1000         /* The authTag (aka the Integrity Check Value) needs to be written
1001          * back to the packet. */
1002         if (one_entry_in_sg) {
1003                 if (unlikely(req->src != req->dst)) {
1004                         scatterwalk_unmap(dst - req->assoclen);
1005                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1006                         scatterwalk_done(&dst_sg_walk, 1, 0);
1007                 }
1008                 scatterwalk_unmap(assoc);
1009                 scatterwalk_advance(&src_sg_walk, req->src->length);
1010                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1011         } else {
1012                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1013                                          req->cryptlen + auth_tag_len, 1);
1014                 kfree(assoc);
1015         }
1016         return 0;
1017 }
1018
1019 static int helper_rfc4106_decrypt(struct aead_request *req)
1020 {
1021         u8 one_entry_in_sg = 0;
1022         u8 *src, *dst, *assoc;
1023         unsigned long tempCipherLen = 0;
1024         __be32 counter = cpu_to_be32(1);
1025         int retval = 0;
1026         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1027         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1028         void *aes_ctx = &(ctx->aes_key_expanded);
1029         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1030         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1031         u8 authTag[16];
1032         struct scatter_walk src_sg_walk;
1033         struct scatter_walk dst_sg_walk;
1034         unsigned int i;
1035
1036         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1037                 return -EINVAL;
1038
1039         /* Assuming we are supporting rfc4106 64-bit extended */
1040         /* sequence numbers We need to have the AAD length */
1041         /* equal to 8 or 12 bytes */
1042
1043         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1044         /* IV below built */
1045         for (i = 0; i < 4; i++)
1046                 *(iv+i) = ctx->nonce[i];
1047         for (i = 0; i < 8; i++)
1048                 *(iv+4+i) = req->iv[i];
1049         *((__be32 *)(iv+12)) = counter;
1050
1051         if (sg_is_last(req->src) &&
1052             req->src->offset + req->src->length <= PAGE_SIZE &&
1053             sg_is_last(req->dst) &&
1054             req->dst->offset + req->dst->length <= PAGE_SIZE) {
1055                 one_entry_in_sg = 1;
1056                 scatterwalk_start(&src_sg_walk, req->src);
1057                 assoc = scatterwalk_map(&src_sg_walk);
1058                 src = assoc + req->assoclen;
1059                 dst = src;
1060                 if (unlikely(req->src != req->dst)) {
1061                         scatterwalk_start(&dst_sg_walk, req->dst);
1062                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1063                 }
1064
1065         } else {
1066                 /* Allocate memory for src, dst, assoc */
1067                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1068                 if (!assoc)
1069                         return -ENOMEM;
1070                 scatterwalk_map_and_copy(assoc, req->src, 0,
1071                                          req->assoclen + req->cryptlen, 0);
1072                 src = assoc + req->assoclen;
1073                 dst = src;
1074         }
1075
1076         kernel_fpu_begin();
1077         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1078                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1079                 authTag, auth_tag_len);
1080         kernel_fpu_end();
1081
1082         /* Compare generated tag with passed in tag. */
1083         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1084                 -EBADMSG : 0;
1085
1086         if (one_entry_in_sg) {
1087                 if (unlikely(req->src != req->dst)) {
1088                         scatterwalk_unmap(dst - req->assoclen);
1089                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1090                         scatterwalk_done(&dst_sg_walk, 1, 0);
1091                 }
1092                 scatterwalk_unmap(assoc);
1093                 scatterwalk_advance(&src_sg_walk, req->src->length);
1094                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1095         } else {
1096                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1097                                          tempCipherLen, 1);
1098                 kfree(assoc);
1099         }
1100         return retval;
1101 }
1102
1103 static int rfc4106_encrypt(struct aead_request *req)
1104 {
1105         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1106         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1107         struct cryptd_aead *cryptd_tfm = *ctx;
1108         struct aead_request *subreq = aead_request_ctx(req);
1109
1110         aead_request_set_tfm(subreq, irq_fpu_usable() ?
1111                                      cryptd_aead_child(cryptd_tfm) :
1112                                      &cryptd_tfm->base);
1113
1114         aead_request_set_callback(subreq, req->base.flags,
1115                                   req->base.complete, req->base.data);
1116         aead_request_set_crypt(subreq, req->src, req->dst,
1117                                req->cryptlen, req->iv);
1118         aead_request_set_ad(subreq, req->assoclen);
1119
1120         return crypto_aead_encrypt(subreq);
1121 }
1122
1123 static int rfc4106_decrypt(struct aead_request *req)
1124 {
1125         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1126         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1127         struct cryptd_aead *cryptd_tfm = *ctx;
1128         struct aead_request *subreq = aead_request_ctx(req);
1129
1130         aead_request_set_tfm(subreq, irq_fpu_usable() ?
1131                                      cryptd_aead_child(cryptd_tfm) :
1132                                      &cryptd_tfm->base);
1133
1134         aead_request_set_callback(subreq, req->base.flags,
1135                                   req->base.complete, req->base.data);
1136         aead_request_set_crypt(subreq, req->src, req->dst,
1137                                req->cryptlen, req->iv);
1138         aead_request_set_ad(subreq, req->assoclen);
1139
1140         return crypto_aead_decrypt(subreq);
1141 }
1142 #endif
1143
1144 static struct crypto_alg aesni_algs[] = { {
1145         .cra_name               = "aes",
1146         .cra_driver_name        = "aes-aesni",
1147         .cra_priority           = 300,
1148         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1149         .cra_blocksize          = AES_BLOCK_SIZE,
1150         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1151                                   AESNI_ALIGN - 1,
1152         .cra_alignmask          = 0,
1153         .cra_module             = THIS_MODULE,
1154         .cra_u  = {
1155                 .cipher = {
1156                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1157                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1158                         .cia_setkey             = aes_set_key,
1159                         .cia_encrypt            = aes_encrypt,
1160                         .cia_decrypt            = aes_decrypt
1161                 }
1162         }
1163 }, {
1164         .cra_name               = "__aes-aesni",
1165         .cra_driver_name        = "__driver-aes-aesni",
1166         .cra_priority           = 0,
1167         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1168         .cra_blocksize          = AES_BLOCK_SIZE,
1169         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1170                                   AESNI_ALIGN - 1,
1171         .cra_alignmask          = 0,
1172         .cra_module             = THIS_MODULE,
1173         .cra_u  = {
1174                 .cipher = {
1175                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1176                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1177                         .cia_setkey             = aes_set_key,
1178                         .cia_encrypt            = __aes_encrypt,
1179                         .cia_decrypt            = __aes_decrypt
1180                 }
1181         }
1182 }, {
1183         .cra_name               = "__ecb-aes-aesni",
1184         .cra_driver_name        = "__driver-ecb-aes-aesni",
1185         .cra_priority           = 0,
1186         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1187                                   CRYPTO_ALG_INTERNAL,
1188         .cra_blocksize          = AES_BLOCK_SIZE,
1189         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1190                                   AESNI_ALIGN - 1,
1191         .cra_alignmask          = 0,
1192         .cra_type               = &crypto_blkcipher_type,
1193         .cra_module             = THIS_MODULE,
1194         .cra_u = {
1195                 .blkcipher = {
1196                         .min_keysize    = AES_MIN_KEY_SIZE,
1197                         .max_keysize    = AES_MAX_KEY_SIZE,
1198                         .setkey         = aes_set_key,
1199                         .encrypt        = ecb_encrypt,
1200                         .decrypt        = ecb_decrypt,
1201                 },
1202         },
1203 }, {
1204         .cra_name               = "__cbc-aes-aesni",
1205         .cra_driver_name        = "__driver-cbc-aes-aesni",
1206         .cra_priority           = 0,
1207         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1208                                   CRYPTO_ALG_INTERNAL,
1209         .cra_blocksize          = AES_BLOCK_SIZE,
1210         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1211                                   AESNI_ALIGN - 1,
1212         .cra_alignmask          = 0,
1213         .cra_type               = &crypto_blkcipher_type,
1214         .cra_module             = THIS_MODULE,
1215         .cra_u = {
1216                 .blkcipher = {
1217                         .min_keysize    = AES_MIN_KEY_SIZE,
1218                         .max_keysize    = AES_MAX_KEY_SIZE,
1219                         .setkey         = aes_set_key,
1220                         .encrypt        = cbc_encrypt,
1221                         .decrypt        = cbc_decrypt,
1222                 },
1223         },
1224 }, {
1225         .cra_name               = "ecb(aes)",
1226         .cra_driver_name        = "ecb-aes-aesni",
1227         .cra_priority           = 400,
1228         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229         .cra_blocksize          = AES_BLOCK_SIZE,
1230         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1231         .cra_alignmask          = 0,
1232         .cra_type               = &crypto_ablkcipher_type,
1233         .cra_module             = THIS_MODULE,
1234         .cra_init               = ablk_ecb_init,
1235         .cra_exit               = ablk_exit,
1236         .cra_u = {
1237                 .ablkcipher = {
1238                         .min_keysize    = AES_MIN_KEY_SIZE,
1239                         .max_keysize    = AES_MAX_KEY_SIZE,
1240                         .setkey         = ablk_set_key,
1241                         .encrypt        = ablk_encrypt,
1242                         .decrypt        = ablk_decrypt,
1243                 },
1244         },
1245 }, {
1246         .cra_name               = "cbc(aes)",
1247         .cra_driver_name        = "cbc-aes-aesni",
1248         .cra_priority           = 400,
1249         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1250         .cra_blocksize          = AES_BLOCK_SIZE,
1251         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1252         .cra_alignmask          = 0,
1253         .cra_type               = &crypto_ablkcipher_type,
1254         .cra_module             = THIS_MODULE,
1255         .cra_init               = ablk_cbc_init,
1256         .cra_exit               = ablk_exit,
1257         .cra_u = {
1258                 .ablkcipher = {
1259                         .min_keysize    = AES_MIN_KEY_SIZE,
1260                         .max_keysize    = AES_MAX_KEY_SIZE,
1261                         .ivsize         = AES_BLOCK_SIZE,
1262                         .setkey         = ablk_set_key,
1263                         .encrypt        = ablk_encrypt,
1264                         .decrypt        = ablk_decrypt,
1265                 },
1266         },
1267 #ifdef CONFIG_X86_64
1268 }, {
1269         .cra_name               = "__ctr-aes-aesni",
1270         .cra_driver_name        = "__driver-ctr-aes-aesni",
1271         .cra_priority           = 0,
1272         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1273                                   CRYPTO_ALG_INTERNAL,
1274         .cra_blocksize          = 1,
1275         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1276                                   AESNI_ALIGN - 1,
1277         .cra_alignmask          = 0,
1278         .cra_type               = &crypto_blkcipher_type,
1279         .cra_module             = THIS_MODULE,
1280         .cra_u = {
1281                 .blkcipher = {
1282                         .min_keysize    = AES_MIN_KEY_SIZE,
1283                         .max_keysize    = AES_MAX_KEY_SIZE,
1284                         .ivsize         = AES_BLOCK_SIZE,
1285                         .setkey         = aes_set_key,
1286                         .encrypt        = ctr_crypt,
1287                         .decrypt        = ctr_crypt,
1288                 },
1289         },
1290 }, {
1291         .cra_name               = "ctr(aes)",
1292         .cra_driver_name        = "ctr-aes-aesni",
1293         .cra_priority           = 400,
1294         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1295         .cra_blocksize          = 1,
1296         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1297         .cra_alignmask          = 0,
1298         .cra_type               = &crypto_ablkcipher_type,
1299         .cra_module             = THIS_MODULE,
1300         .cra_init               = ablk_ctr_init,
1301         .cra_exit               = ablk_exit,
1302         .cra_u = {
1303                 .ablkcipher = {
1304                         .min_keysize    = AES_MIN_KEY_SIZE,
1305                         .max_keysize    = AES_MAX_KEY_SIZE,
1306                         .ivsize         = AES_BLOCK_SIZE,
1307                         .setkey         = ablk_set_key,
1308                         .encrypt        = ablk_encrypt,
1309                         .decrypt        = ablk_encrypt,
1310                         .geniv          = "chainiv",
1311                 },
1312         },
1313 #endif
1314 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1315 }, {
1316         .cra_name               = "pcbc(aes)",
1317         .cra_driver_name        = "pcbc-aes-aesni",
1318         .cra_priority           = 400,
1319         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1320         .cra_blocksize          = AES_BLOCK_SIZE,
1321         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1322         .cra_alignmask          = 0,
1323         .cra_type               = &crypto_ablkcipher_type,
1324         .cra_module             = THIS_MODULE,
1325         .cra_init               = ablk_pcbc_init,
1326         .cra_exit               = ablk_exit,
1327         .cra_u = {
1328                 .ablkcipher = {
1329                         .min_keysize    = AES_MIN_KEY_SIZE,
1330                         .max_keysize    = AES_MAX_KEY_SIZE,
1331                         .ivsize         = AES_BLOCK_SIZE,
1332                         .setkey         = ablk_set_key,
1333                         .encrypt        = ablk_encrypt,
1334                         .decrypt        = ablk_decrypt,
1335                 },
1336         },
1337 #endif
1338 }, {
1339         .cra_name               = "__lrw-aes-aesni",
1340         .cra_driver_name        = "__driver-lrw-aes-aesni",
1341         .cra_priority           = 0,
1342         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1343                                   CRYPTO_ALG_INTERNAL,
1344         .cra_blocksize          = AES_BLOCK_SIZE,
1345         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1346         .cra_alignmask          = 0,
1347         .cra_type               = &crypto_blkcipher_type,
1348         .cra_module             = THIS_MODULE,
1349         .cra_exit               = lrw_aesni_exit_tfm,
1350         .cra_u = {
1351                 .blkcipher = {
1352                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1353                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1354                         .ivsize         = AES_BLOCK_SIZE,
1355                         .setkey         = lrw_aesni_setkey,
1356                         .encrypt        = lrw_encrypt,
1357                         .decrypt        = lrw_decrypt,
1358                 },
1359         },
1360 }, {
1361         .cra_name               = "__xts-aes-aesni",
1362         .cra_driver_name        = "__driver-xts-aes-aesni",
1363         .cra_priority           = 0,
1364         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1365                                   CRYPTO_ALG_INTERNAL,
1366         .cra_blocksize          = AES_BLOCK_SIZE,
1367         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1368         .cra_alignmask          = 0,
1369         .cra_type               = &crypto_blkcipher_type,
1370         .cra_module             = THIS_MODULE,
1371         .cra_u = {
1372                 .blkcipher = {
1373                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1374                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1375                         .ivsize         = AES_BLOCK_SIZE,
1376                         .setkey         = xts_aesni_setkey,
1377                         .encrypt        = xts_encrypt,
1378                         .decrypt        = xts_decrypt,
1379                 },
1380         },
1381 }, {
1382         .cra_name               = "lrw(aes)",
1383         .cra_driver_name        = "lrw-aes-aesni",
1384         .cra_priority           = 400,
1385         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1386         .cra_blocksize          = AES_BLOCK_SIZE,
1387         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1388         .cra_alignmask          = 0,
1389         .cra_type               = &crypto_ablkcipher_type,
1390         .cra_module             = THIS_MODULE,
1391         .cra_init               = ablk_init,
1392         .cra_exit               = ablk_exit,
1393         .cra_u = {
1394                 .ablkcipher = {
1395                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1396                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1397                         .ivsize         = AES_BLOCK_SIZE,
1398                         .setkey         = ablk_set_key,
1399                         .encrypt        = ablk_encrypt,
1400                         .decrypt        = ablk_decrypt,
1401                 },
1402         },
1403 }, {
1404         .cra_name               = "xts(aes)",
1405         .cra_driver_name        = "xts-aes-aesni",
1406         .cra_priority           = 400,
1407         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1408         .cra_blocksize          = AES_BLOCK_SIZE,
1409         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1410         .cra_alignmask          = 0,
1411         .cra_type               = &crypto_ablkcipher_type,
1412         .cra_module             = THIS_MODULE,
1413         .cra_init               = ablk_init,
1414         .cra_exit               = ablk_exit,
1415         .cra_u = {
1416                 .ablkcipher = {
1417                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1418                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1419                         .ivsize         = AES_BLOCK_SIZE,
1420                         .setkey         = ablk_set_key,
1421                         .encrypt        = ablk_encrypt,
1422                         .decrypt        = ablk_decrypt,
1423                 },
1424         },
1425 } };
1426
1427 #ifdef CONFIG_X86_64
1428 static struct aead_alg aesni_aead_algs[] = { {
1429         .setkey                 = common_rfc4106_set_key,
1430         .setauthsize            = common_rfc4106_set_authsize,
1431         .encrypt                = helper_rfc4106_encrypt,
1432         .decrypt                = helper_rfc4106_decrypt,
1433         .ivsize                 = 8,
1434         .maxauthsize            = 16,
1435         .base = {
1436                 .cra_name               = "__gcm-aes-aesni",
1437                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1438                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1439                 .cra_blocksize          = 1,
1440                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1441                 .cra_alignmask          = AESNI_ALIGN - 1,
1442                 .cra_module             = THIS_MODULE,
1443         },
1444 }, {
1445         .init                   = rfc4106_init,
1446         .exit                   = rfc4106_exit,
1447         .setkey                 = rfc4106_set_key,
1448         .setauthsize            = rfc4106_set_authsize,
1449         .encrypt                = rfc4106_encrypt,
1450         .decrypt                = rfc4106_decrypt,
1451         .ivsize                 = 8,
1452         .maxauthsize            = 16,
1453         .base = {
1454                 .cra_name               = "rfc4106(gcm(aes))",
1455                 .cra_driver_name        = "rfc4106-gcm-aesni",
1456                 .cra_priority           = 400,
1457                 .cra_flags              = CRYPTO_ALG_ASYNC,
1458                 .cra_blocksize          = 1,
1459                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1460                 .cra_module             = THIS_MODULE,
1461         },
1462 } };
1463 #else
1464 static struct aead_alg aesni_aead_algs[0];
1465 #endif
1466
1467
1468 static const struct x86_cpu_id aesni_cpu_id[] = {
1469         X86_FEATURE_MATCH(X86_FEATURE_AES),
1470         {}
1471 };
1472 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1473
1474 static int __init aesni_init(void)
1475 {
1476         int err;
1477
1478         if (!x86_match_cpu(aesni_cpu_id))
1479                 return -ENODEV;
1480 #ifdef CONFIG_X86_64
1481 #ifdef CONFIG_AS_AVX2
1482         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1483                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1484                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1485                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1486         } else
1487 #endif
1488 #ifdef CONFIG_AS_AVX
1489         if (boot_cpu_has(X86_FEATURE_AVX)) {
1490                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1491                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1492                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1493         } else
1494 #endif
1495         {
1496                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1497                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1498                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1499         }
1500         aesni_ctr_enc_tfm = aesni_ctr_enc;
1501 #ifdef CONFIG_AS_AVX
1502         if (cpu_has_avx) {
1503                 /* optimize performance of ctr mode encryption transform */
1504                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1505                 pr_info("AES CTR mode by8 optimization enabled\n");
1506         }
1507 #endif
1508 #endif
1509
1510         err = crypto_fpu_init();
1511         if (err)
1512                 return err;
1513
1514         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1515         if (err)
1516                 goto fpu_exit;
1517
1518         err = crypto_register_aeads(aesni_aead_algs,
1519                                     ARRAY_SIZE(aesni_aead_algs));
1520         if (err)
1521                 goto unregister_algs;
1522
1523         return err;
1524
1525 unregister_algs:
1526         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1527 fpu_exit:
1528         crypto_fpu_exit();
1529         return err;
1530 }
1531
1532 static void __exit aesni_exit(void)
1533 {
1534         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1535         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1536
1537         crypto_fpu_exit();
1538 }
1539
1540 module_init(aesni_init);
1541 module_exit(aesni_exit);
1542
1543 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1544 MODULE_LICENSE("GPL");
1545 MODULE_ALIAS_CRYPTO("aes");