]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - arch/x86/crypto/aesni-intel_glue.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 #define AESNI_ALIGN     16
48 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56         u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57         struct crypto_aes_ctx aes_key_expanded
58                 __attribute__ ((__aligned__(AESNI_ALIGN)));
59         u8 nonce[4];
60 };
61
62 struct aesni_lrw_ctx {
63         struct lrw_table_ctx lrw_table;
64         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
65 };
66
67 struct aesni_xts_ctx {
68         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
69         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
70 };
71
72 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
73                              unsigned int key_len);
74 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
75                           const u8 *in);
76 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
77                           const u8 *in);
78 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
79                               const u8 *in, unsigned int len);
80 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
81                               const u8 *in, unsigned int len);
82 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
83                               const u8 *in, unsigned int len, u8 *iv);
84 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
85                               const u8 *in, unsigned int len, u8 *iv);
86
87 int crypto_fpu_init(void);
88 void crypto_fpu_exit(void);
89
90 #define AVX_GEN2_OPTSIZE 640
91 #define AVX_GEN4_OPTSIZE 4096
92
93 #ifdef CONFIG_X86_64
94
95 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
98                               const u8 *in, unsigned int len, u8 *iv);
99
100 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
101                                  const u8 *in, bool enc, u8 *iv);
102
103 /* asmlinkage void aesni_gcm_enc()
104  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
105  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
106  * const u8 *in, Plaintext input
107  * unsigned long plaintext_len, Length of data in bytes for encryption.
108  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
109  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
110  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
111  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
112  * const u8 *aad, Additional Authentication Data (AAD)
113  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
114  *          is going to be 8 or 12 bytes
115  * u8 *auth_tag, Authenticated Tag output.
116  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
117  *          Valid values are 16 (most likely), 12 or 8.
118  */
119 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
120                         const u8 *in, unsigned long plaintext_len, u8 *iv,
121                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
122                         u8 *auth_tag, unsigned long auth_tag_len);
123
124 /* asmlinkage void aesni_gcm_dec()
125  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
126  * u8 *out, Plaintext output. Decrypt in-place is allowed.
127  * const u8 *in, Ciphertext input
128  * unsigned long ciphertext_len, Length of data in bytes for decryption.
129  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
130  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
131  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
132  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
133  * const u8 *aad, Additional Authentication Data (AAD)
134  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
135  * to be 8 or 12 bytes
136  * u8 *auth_tag, Authenticated Tag output.
137  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
138  * Valid values are 16 (most likely), 12 or 8.
139  */
140 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
141                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
142                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
143                         u8 *auth_tag, unsigned long auth_tag_len);
144
145
146 #ifdef CONFIG_AS_AVX
147 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
148                 void *keys, u8 *out, unsigned int num_bytes);
149 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
150                 void *keys, u8 *out, unsigned int num_bytes);
151 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
152                 void *keys, u8 *out, unsigned int num_bytes);
153 /*
154  * asmlinkage void aesni_gcm_precomp_avx_gen2()
155  * gcm_data *my_ctx_data, context data
156  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
157  */
158 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
159
160 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
161                         const u8 *in, unsigned long plaintext_len, u8 *iv,
162                         const u8 *aad, unsigned long aad_len,
163                         u8 *auth_tag, unsigned long auth_tag_len);
164
165 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
166                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
167                         const u8 *aad, unsigned long aad_len,
168                         u8 *auth_tag, unsigned long auth_tag_len);
169
170 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
171                         const u8 *in, unsigned long plaintext_len, u8 *iv,
172                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
173                         u8 *auth_tag, unsigned long auth_tag_len)
174 {
175         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
176         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
177                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
178                                 aad_len, auth_tag, auth_tag_len);
179         } else {
180                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
181                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
182                                         aad_len, auth_tag, auth_tag_len);
183         }
184 }
185
186 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
187                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
188                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
189                         u8 *auth_tag, unsigned long auth_tag_len)
190 {
191         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
192         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
193                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
194                                 aad_len, auth_tag, auth_tag_len);
195         } else {
196                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
197                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
198                                         aad_len, auth_tag, auth_tag_len);
199         }
200 }
201 #endif
202
203 #ifdef CONFIG_AS_AVX2
204 /*
205  * asmlinkage void aesni_gcm_precomp_avx_gen4()
206  * gcm_data *my_ctx_data, context data
207  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
208  */
209 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
210
211 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
212                         const u8 *in, unsigned long plaintext_len, u8 *iv,
213                         const u8 *aad, unsigned long aad_len,
214                         u8 *auth_tag, unsigned long auth_tag_len);
215
216 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
217                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
218                         const u8 *aad, unsigned long aad_len,
219                         u8 *auth_tag, unsigned long auth_tag_len);
220
221 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
222                         const u8 *in, unsigned long plaintext_len, u8 *iv,
223                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
224                         u8 *auth_tag, unsigned long auth_tag_len)
225 {
226        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
227         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
228                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
229                                 aad_len, auth_tag, auth_tag_len);
230         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
231                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
232                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
233                                         aad_len, auth_tag, auth_tag_len);
234         } else {
235                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
236                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
237                                         aad_len, auth_tag, auth_tag_len);
238         }
239 }
240
241 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
242                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
243                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
244                         u8 *auth_tag, unsigned long auth_tag_len)
245 {
246        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
247         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
248                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
249                                 aad, aad_len, auth_tag, auth_tag_len);
250         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
251                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
252                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
253                                         aad_len, auth_tag, auth_tag_len);
254         } else {
255                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
256                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
257                                         aad_len, auth_tag, auth_tag_len);
258         }
259 }
260 #endif
261
262 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
263                         const u8 *in, unsigned long plaintext_len, u8 *iv,
264                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
265                         u8 *auth_tag, unsigned long auth_tag_len);
266
267 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
268                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
269                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
270                         u8 *auth_tag, unsigned long auth_tag_len);
271
272 static inline struct
273 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
274 {
275         unsigned long align = AESNI_ALIGN;
276
277         if (align <= crypto_tfm_ctx_alignment())
278                 align = 1;
279         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
280 }
281 #endif
282
283 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
284 {
285         unsigned long addr = (unsigned long)raw_ctx;
286         unsigned long align = AESNI_ALIGN;
287
288         if (align <= crypto_tfm_ctx_alignment())
289                 align = 1;
290         return (struct crypto_aes_ctx *)ALIGN(addr, align);
291 }
292
293 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
294                               const u8 *in_key, unsigned int key_len)
295 {
296         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
297         u32 *flags = &tfm->crt_flags;
298         int err;
299
300         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
301             key_len != AES_KEYSIZE_256) {
302                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
303                 return -EINVAL;
304         }
305
306         if (!irq_fpu_usable())
307                 err = crypto_aes_expand_key(ctx, in_key, key_len);
308         else {
309                 kernel_fpu_begin();
310                 err = aesni_set_key(ctx, in_key, key_len);
311                 kernel_fpu_end();
312         }
313
314         return err;
315 }
316
317 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
318                        unsigned int key_len)
319 {
320         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
321 }
322
323 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
324 {
325         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
326
327         if (!irq_fpu_usable())
328                 crypto_aes_encrypt_x86(ctx, dst, src);
329         else {
330                 kernel_fpu_begin();
331                 aesni_enc(ctx, dst, src);
332                 kernel_fpu_end();
333         }
334 }
335
336 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
337 {
338         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
339
340         if (!irq_fpu_usable())
341                 crypto_aes_decrypt_x86(ctx, dst, src);
342         else {
343                 kernel_fpu_begin();
344                 aesni_dec(ctx, dst, src);
345                 kernel_fpu_end();
346         }
347 }
348
349 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
350 {
351         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
352
353         aesni_enc(ctx, dst, src);
354 }
355
356 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
357 {
358         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
359
360         aesni_dec(ctx, dst, src);
361 }
362
363 static int ecb_encrypt(struct blkcipher_desc *desc,
364                        struct scatterlist *dst, struct scatterlist *src,
365                        unsigned int nbytes)
366 {
367         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
368         struct blkcipher_walk walk;
369         int err;
370
371         blkcipher_walk_init(&walk, dst, src, nbytes);
372         err = blkcipher_walk_virt(desc, &walk);
373         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
374
375         while ((nbytes = walk.nbytes)) {
376                 kernel_fpu_begin();
377                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
378                                 nbytes & AES_BLOCK_MASK);
379                 kernel_fpu_end();
380                 nbytes &= AES_BLOCK_SIZE - 1;
381                 err = blkcipher_walk_done(desc, &walk, nbytes);
382         }
383
384         return err;
385 }
386
387 static int ecb_decrypt(struct blkcipher_desc *desc,
388                        struct scatterlist *dst, struct scatterlist *src,
389                        unsigned int nbytes)
390 {
391         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
392         struct blkcipher_walk walk;
393         int err;
394
395         blkcipher_walk_init(&walk, dst, src, nbytes);
396         err = blkcipher_walk_virt(desc, &walk);
397         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
398
399         while ((nbytes = walk.nbytes)) {
400                 kernel_fpu_begin();
401                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
402                               nbytes & AES_BLOCK_MASK);
403                 kernel_fpu_end();
404                 nbytes &= AES_BLOCK_SIZE - 1;
405                 err = blkcipher_walk_done(desc, &walk, nbytes);
406         }
407
408         return err;
409 }
410
411 static int cbc_encrypt(struct blkcipher_desc *desc,
412                        struct scatterlist *dst, struct scatterlist *src,
413                        unsigned int nbytes)
414 {
415         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
416         struct blkcipher_walk walk;
417         int err;
418
419         blkcipher_walk_init(&walk, dst, src, nbytes);
420         err = blkcipher_walk_virt(desc, &walk);
421         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
422
423         while ((nbytes = walk.nbytes)) {
424                 kernel_fpu_begin();
425                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
426                               nbytes & AES_BLOCK_MASK, walk.iv);
427                 kernel_fpu_end();
428                 nbytes &= AES_BLOCK_SIZE - 1;
429                 err = blkcipher_walk_done(desc, &walk, nbytes);
430         }
431
432         return err;
433 }
434
435 static int cbc_decrypt(struct blkcipher_desc *desc,
436                        struct scatterlist *dst, struct scatterlist *src,
437                        unsigned int nbytes)
438 {
439         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
440         struct blkcipher_walk walk;
441         int err;
442
443         blkcipher_walk_init(&walk, dst, src, nbytes);
444         err = blkcipher_walk_virt(desc, &walk);
445         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
446
447         while ((nbytes = walk.nbytes)) {
448                 kernel_fpu_begin();
449                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
450                               nbytes & AES_BLOCK_MASK, walk.iv);
451                 kernel_fpu_end();
452                 nbytes &= AES_BLOCK_SIZE - 1;
453                 err = blkcipher_walk_done(desc, &walk, nbytes);
454         }
455
456         return err;
457 }
458
459 #ifdef CONFIG_X86_64
460 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
461                             struct blkcipher_walk *walk)
462 {
463         u8 *ctrblk = walk->iv;
464         u8 keystream[AES_BLOCK_SIZE];
465         u8 *src = walk->src.virt.addr;
466         u8 *dst = walk->dst.virt.addr;
467         unsigned int nbytes = walk->nbytes;
468
469         aesni_enc(ctx, keystream, ctrblk);
470         crypto_xor(keystream, src, nbytes);
471         memcpy(dst, keystream, nbytes);
472         crypto_inc(ctrblk, AES_BLOCK_SIZE);
473 }
474
475 #ifdef CONFIG_AS_AVX
476 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
477                               const u8 *in, unsigned int len, u8 *iv)
478 {
479         /*
480          * based on key length, override with the by8 version
481          * of ctr mode encryption/decryption for improved performance
482          * aes_set_key_common() ensures that key length is one of
483          * {128,192,256}
484          */
485         if (ctx->key_length == AES_KEYSIZE_128)
486                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
487         else if (ctx->key_length == AES_KEYSIZE_192)
488                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
489         else
490                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
491 }
492 #endif
493
494 static int ctr_crypt(struct blkcipher_desc *desc,
495                      struct scatterlist *dst, struct scatterlist *src,
496                      unsigned int nbytes)
497 {
498         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
499         struct blkcipher_walk walk;
500         int err;
501
502         blkcipher_walk_init(&walk, dst, src, nbytes);
503         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
504         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
505
506         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
507                 kernel_fpu_begin();
508                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
509                                       nbytes & AES_BLOCK_MASK, walk.iv);
510                 kernel_fpu_end();
511                 nbytes &= AES_BLOCK_SIZE - 1;
512                 err = blkcipher_walk_done(desc, &walk, nbytes);
513         }
514         if (walk.nbytes) {
515                 kernel_fpu_begin();
516                 ctr_crypt_final(ctx, &walk);
517                 kernel_fpu_end();
518                 err = blkcipher_walk_done(desc, &walk, 0);
519         }
520
521         return err;
522 }
523 #endif
524
525 static int ablk_ecb_init(struct crypto_tfm *tfm)
526 {
527         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
528 }
529
530 static int ablk_cbc_init(struct crypto_tfm *tfm)
531 {
532         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
533 }
534
535 #ifdef CONFIG_X86_64
536 static int ablk_ctr_init(struct crypto_tfm *tfm)
537 {
538         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
539 }
540
541 #endif
542
543 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
544 static int ablk_pcbc_init(struct crypto_tfm *tfm)
545 {
546         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
547 }
548 #endif
549
550 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
551 {
552         aesni_ecb_enc(ctx, blks, blks, nbytes);
553 }
554
555 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
556 {
557         aesni_ecb_dec(ctx, blks, blks, nbytes);
558 }
559
560 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
561                             unsigned int keylen)
562 {
563         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
564         int err;
565
566         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
567                                  keylen - AES_BLOCK_SIZE);
568         if (err)
569                 return err;
570
571         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
572 }
573
574 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
575 {
576         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
577
578         lrw_free_table(&ctx->lrw_table);
579 }
580
581 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
582                        struct scatterlist *src, unsigned int nbytes)
583 {
584         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
585         be128 buf[8];
586         struct lrw_crypt_req req = {
587                 .tbuf = buf,
588                 .tbuflen = sizeof(buf),
589
590                 .table_ctx = &ctx->lrw_table,
591                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
592                 .crypt_fn = lrw_xts_encrypt_callback,
593         };
594         int ret;
595
596         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
597
598         kernel_fpu_begin();
599         ret = lrw_crypt(desc, dst, src, nbytes, &req);
600         kernel_fpu_end();
601
602         return ret;
603 }
604
605 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
606                        struct scatterlist *src, unsigned int nbytes)
607 {
608         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
609         be128 buf[8];
610         struct lrw_crypt_req req = {
611                 .tbuf = buf,
612                 .tbuflen = sizeof(buf),
613
614                 .table_ctx = &ctx->lrw_table,
615                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
616                 .crypt_fn = lrw_xts_decrypt_callback,
617         };
618         int ret;
619
620         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
621
622         kernel_fpu_begin();
623         ret = lrw_crypt(desc, dst, src, nbytes, &req);
624         kernel_fpu_end();
625
626         return ret;
627 }
628
629 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
630                             unsigned int keylen)
631 {
632         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
633         int err;
634
635         err = xts_check_key(tfm, key, keylen);
636         if (err)
637                 return err;
638
639         /* first half of xts-key is for crypt */
640         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
641         if (err)
642                 return err;
643
644         /* second half of xts-key is for tweak */
645         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
646                                   keylen / 2);
647 }
648
649
650 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
651 {
652         aesni_enc(ctx, out, in);
653 }
654
655 #ifdef CONFIG_X86_64
656
657 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
658 {
659         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
660 }
661
662 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
663 {
664         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
665 }
666
667 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
668 {
669         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
670 }
671
672 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
673 {
674         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
675 }
676
677 static const struct common_glue_ctx aesni_enc_xts = {
678         .num_funcs = 2,
679         .fpu_blocks_limit = 1,
680
681         .funcs = { {
682                 .num_blocks = 8,
683                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
684         }, {
685                 .num_blocks = 1,
686                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
687         } }
688 };
689
690 static const struct common_glue_ctx aesni_dec_xts = {
691         .num_funcs = 2,
692         .fpu_blocks_limit = 1,
693
694         .funcs = { {
695                 .num_blocks = 8,
696                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
697         }, {
698                 .num_blocks = 1,
699                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
700         } }
701 };
702
703 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
704                        struct scatterlist *src, unsigned int nbytes)
705 {
706         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
707
708         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
709                                      XTS_TWEAK_CAST(aesni_xts_tweak),
710                                      aes_ctx(ctx->raw_tweak_ctx),
711                                      aes_ctx(ctx->raw_crypt_ctx));
712 }
713
714 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
715                        struct scatterlist *src, unsigned int nbytes)
716 {
717         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
718
719         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
720                                      XTS_TWEAK_CAST(aesni_xts_tweak),
721                                      aes_ctx(ctx->raw_tweak_ctx),
722                                      aes_ctx(ctx->raw_crypt_ctx));
723 }
724
725 #else
726
727 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
728                        struct scatterlist *src, unsigned int nbytes)
729 {
730         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
731         be128 buf[8];
732         struct xts_crypt_req req = {
733                 .tbuf = buf,
734                 .tbuflen = sizeof(buf),
735
736                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
737                 .tweak_fn = aesni_xts_tweak,
738                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
739                 .crypt_fn = lrw_xts_encrypt_callback,
740         };
741         int ret;
742
743         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
744
745         kernel_fpu_begin();
746         ret = xts_crypt(desc, dst, src, nbytes, &req);
747         kernel_fpu_end();
748
749         return ret;
750 }
751
752 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
753                        struct scatterlist *src, unsigned int nbytes)
754 {
755         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
756         be128 buf[8];
757         struct xts_crypt_req req = {
758                 .tbuf = buf,
759                 .tbuflen = sizeof(buf),
760
761                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
762                 .tweak_fn = aesni_xts_tweak,
763                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
764                 .crypt_fn = lrw_xts_decrypt_callback,
765         };
766         int ret;
767
768         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
769
770         kernel_fpu_begin();
771         ret = xts_crypt(desc, dst, src, nbytes, &req);
772         kernel_fpu_end();
773
774         return ret;
775 }
776
777 #endif
778
779 #ifdef CONFIG_X86_64
780 static int rfc4106_init(struct crypto_aead *aead)
781 {
782         struct cryptd_aead *cryptd_tfm;
783         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
784
785         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
786                                        CRYPTO_ALG_INTERNAL,
787                                        CRYPTO_ALG_INTERNAL);
788         if (IS_ERR(cryptd_tfm))
789                 return PTR_ERR(cryptd_tfm);
790
791         *ctx = cryptd_tfm;
792         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
793         return 0;
794 }
795
796 static void rfc4106_exit(struct crypto_aead *aead)
797 {
798         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
799
800         cryptd_free_aead(*ctx);
801 }
802
803 static int
804 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
805 {
806         struct crypto_cipher *tfm;
807         int ret;
808
809         tfm = crypto_alloc_cipher("aes", 0, 0);
810         if (IS_ERR(tfm))
811                 return PTR_ERR(tfm);
812
813         ret = crypto_cipher_setkey(tfm, key, key_len);
814         if (ret)
815                 goto out_free_cipher;
816
817         /* Clear the data in the hash sub key container to zero.*/
818         /* We want to cipher all zeros to create the hash sub key. */
819         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
820
821         crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
822
823 out_free_cipher:
824         crypto_free_cipher(tfm);
825         return ret;
826 }
827
828 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
829                                   unsigned int key_len)
830 {
831         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
832
833         if (key_len < 4) {
834                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
835                 return -EINVAL;
836         }
837         /*Account for 4 byte nonce at the end.*/
838         key_len -= 4;
839
840         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
841
842         return aes_set_key_common(crypto_aead_tfm(aead),
843                                   &ctx->aes_key_expanded, key, key_len) ?:
844                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
845 }
846
847 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
848                            unsigned int key_len)
849 {
850         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
851         struct cryptd_aead *cryptd_tfm = *ctx;
852
853         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
854 }
855
856 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
857                                        unsigned int authsize)
858 {
859         switch (authsize) {
860         case 8:
861         case 12:
862         case 16:
863                 break;
864         default:
865                 return -EINVAL;
866         }
867
868         return 0;
869 }
870
871 /* This is the Integrity Check Value (aka the authentication tag length and can
872  * be 8, 12 or 16 bytes long. */
873 static int rfc4106_set_authsize(struct crypto_aead *parent,
874                                 unsigned int authsize)
875 {
876         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
877         struct cryptd_aead *cryptd_tfm = *ctx;
878
879         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
880 }
881
882 static int helper_rfc4106_encrypt(struct aead_request *req)
883 {
884         u8 one_entry_in_sg = 0;
885         u8 *src, *dst, *assoc;
886         __be32 counter = cpu_to_be32(1);
887         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
888         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
889         void *aes_ctx = &(ctx->aes_key_expanded);
890         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
891         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
892         struct scatter_walk src_sg_walk;
893         struct scatter_walk dst_sg_walk = {};
894         unsigned int i;
895
896         /* Assuming we are supporting rfc4106 64-bit extended */
897         /* sequence numbers We need to have the AAD length equal */
898         /* to 16 or 20 bytes */
899         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
900                 return -EINVAL;
901
902         /* IV below built */
903         for (i = 0; i < 4; i++)
904                 *(iv+i) = ctx->nonce[i];
905         for (i = 0; i < 8; i++)
906                 *(iv+4+i) = req->iv[i];
907         *((__be32 *)(iv+12)) = counter;
908
909         if (sg_is_last(req->src) &&
910             req->src->offset + req->src->length <= PAGE_SIZE &&
911             sg_is_last(req->dst) &&
912             req->dst->offset + req->dst->length <= PAGE_SIZE) {
913                 one_entry_in_sg = 1;
914                 scatterwalk_start(&src_sg_walk, req->src);
915                 assoc = scatterwalk_map(&src_sg_walk);
916                 src = assoc + req->assoclen;
917                 dst = src;
918                 if (unlikely(req->src != req->dst)) {
919                         scatterwalk_start(&dst_sg_walk, req->dst);
920                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
921                 }
922         } else {
923                 /* Allocate memory for src, dst, assoc */
924                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
925                         GFP_ATOMIC);
926                 if (unlikely(!assoc))
927                         return -ENOMEM;
928                 scatterwalk_map_and_copy(assoc, req->src, 0,
929                                          req->assoclen + req->cryptlen, 0);
930                 src = assoc + req->assoclen;
931                 dst = src;
932         }
933
934         kernel_fpu_begin();
935         aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
936                           ctx->hash_subkey, assoc, req->assoclen - 8,
937                           dst + req->cryptlen, auth_tag_len);
938         kernel_fpu_end();
939
940         /* The authTag (aka the Integrity Check Value) needs to be written
941          * back to the packet. */
942         if (one_entry_in_sg) {
943                 if (unlikely(req->src != req->dst)) {
944                         scatterwalk_unmap(dst - req->assoclen);
945                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
946                         scatterwalk_done(&dst_sg_walk, 1, 0);
947                 }
948                 scatterwalk_unmap(assoc);
949                 scatterwalk_advance(&src_sg_walk, req->src->length);
950                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
951         } else {
952                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
953                                          req->cryptlen + auth_tag_len, 1);
954                 kfree(assoc);
955         }
956         return 0;
957 }
958
959 static int helper_rfc4106_decrypt(struct aead_request *req)
960 {
961         u8 one_entry_in_sg = 0;
962         u8 *src, *dst, *assoc;
963         unsigned long tempCipherLen = 0;
964         __be32 counter = cpu_to_be32(1);
965         int retval = 0;
966         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
967         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
968         void *aes_ctx = &(ctx->aes_key_expanded);
969         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
970         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
971         u8 authTag[16];
972         struct scatter_walk src_sg_walk;
973         struct scatter_walk dst_sg_walk = {};
974         unsigned int i;
975
976         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
977                 return -EINVAL;
978
979         /* Assuming we are supporting rfc4106 64-bit extended */
980         /* sequence numbers We need to have the AAD length */
981         /* equal to 16 or 20 bytes */
982
983         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
984         /* IV below built */
985         for (i = 0; i < 4; i++)
986                 *(iv+i) = ctx->nonce[i];
987         for (i = 0; i < 8; i++)
988                 *(iv+4+i) = req->iv[i];
989         *((__be32 *)(iv+12)) = counter;
990
991         if (sg_is_last(req->src) &&
992             req->src->offset + req->src->length <= PAGE_SIZE &&
993             sg_is_last(req->dst) &&
994             req->dst->offset + req->dst->length <= PAGE_SIZE) {
995                 one_entry_in_sg = 1;
996                 scatterwalk_start(&src_sg_walk, req->src);
997                 assoc = scatterwalk_map(&src_sg_walk);
998                 src = assoc + req->assoclen;
999                 dst = src;
1000                 if (unlikely(req->src != req->dst)) {
1001                         scatterwalk_start(&dst_sg_walk, req->dst);
1002                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1003                 }
1004
1005         } else {
1006                 /* Allocate memory for src, dst, assoc */
1007                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1008                 if (!assoc)
1009                         return -ENOMEM;
1010                 scatterwalk_map_and_copy(assoc, req->src, 0,
1011                                          req->assoclen + req->cryptlen, 0);
1012                 src = assoc + req->assoclen;
1013                 dst = src;
1014         }
1015
1016         kernel_fpu_begin();
1017         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1018                           ctx->hash_subkey, assoc, req->assoclen - 8,
1019                           authTag, auth_tag_len);
1020         kernel_fpu_end();
1021
1022         /* Compare generated tag with passed in tag. */
1023         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1024                 -EBADMSG : 0;
1025
1026         if (one_entry_in_sg) {
1027                 if (unlikely(req->src != req->dst)) {
1028                         scatterwalk_unmap(dst - req->assoclen);
1029                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1030                         scatterwalk_done(&dst_sg_walk, 1, 0);
1031                 }
1032                 scatterwalk_unmap(assoc);
1033                 scatterwalk_advance(&src_sg_walk, req->src->length);
1034                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1035         } else {
1036                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1037                                          tempCipherLen, 1);
1038                 kfree(assoc);
1039         }
1040         return retval;
1041 }
1042
1043 static int rfc4106_encrypt(struct aead_request *req)
1044 {
1045         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1046         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1047         struct cryptd_aead *cryptd_tfm = *ctx;
1048
1049         tfm = &cryptd_tfm->base;
1050         if (irq_fpu_usable() && (!in_atomic() ||
1051                                  !cryptd_aead_queued(cryptd_tfm)))
1052                 tfm = cryptd_aead_child(cryptd_tfm);
1053
1054         aead_request_set_tfm(req, tfm);
1055
1056         return crypto_aead_encrypt(req);
1057 }
1058
1059 static int rfc4106_decrypt(struct aead_request *req)
1060 {
1061         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1062         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1063         struct cryptd_aead *cryptd_tfm = *ctx;
1064
1065         tfm = &cryptd_tfm->base;
1066         if (irq_fpu_usable() && (!in_atomic() ||
1067                                  !cryptd_aead_queued(cryptd_tfm)))
1068                 tfm = cryptd_aead_child(cryptd_tfm);
1069
1070         aead_request_set_tfm(req, tfm);
1071
1072         return crypto_aead_decrypt(req);
1073 }
1074 #endif
1075
1076 static struct crypto_alg aesni_algs[] = { {
1077         .cra_name               = "aes",
1078         .cra_driver_name        = "aes-aesni",
1079         .cra_priority           = 300,
1080         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1081         .cra_blocksize          = AES_BLOCK_SIZE,
1082         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1083                                   AESNI_ALIGN - 1,
1084         .cra_alignmask          = 0,
1085         .cra_module             = THIS_MODULE,
1086         .cra_u  = {
1087                 .cipher = {
1088                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1089                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1090                         .cia_setkey             = aes_set_key,
1091                         .cia_encrypt            = aes_encrypt,
1092                         .cia_decrypt            = aes_decrypt
1093                 }
1094         }
1095 }, {
1096         .cra_name               = "__aes-aesni",
1097         .cra_driver_name        = "__driver-aes-aesni",
1098         .cra_priority           = 0,
1099         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1100         .cra_blocksize          = AES_BLOCK_SIZE,
1101         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1102                                   AESNI_ALIGN - 1,
1103         .cra_alignmask          = 0,
1104         .cra_module             = THIS_MODULE,
1105         .cra_u  = {
1106                 .cipher = {
1107                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1108                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1109                         .cia_setkey             = aes_set_key,
1110                         .cia_encrypt            = __aes_encrypt,
1111                         .cia_decrypt            = __aes_decrypt
1112                 }
1113         }
1114 }, {
1115         .cra_name               = "__ecb-aes-aesni",
1116         .cra_driver_name        = "__driver-ecb-aes-aesni",
1117         .cra_priority           = 0,
1118         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1119                                   CRYPTO_ALG_INTERNAL,
1120         .cra_blocksize          = AES_BLOCK_SIZE,
1121         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1122                                   AESNI_ALIGN - 1,
1123         .cra_alignmask          = 0,
1124         .cra_type               = &crypto_blkcipher_type,
1125         .cra_module             = THIS_MODULE,
1126         .cra_u = {
1127                 .blkcipher = {
1128                         .min_keysize    = AES_MIN_KEY_SIZE,
1129                         .max_keysize    = AES_MAX_KEY_SIZE,
1130                         .setkey         = aes_set_key,
1131                         .encrypt        = ecb_encrypt,
1132                         .decrypt        = ecb_decrypt,
1133                 },
1134         },
1135 }, {
1136         .cra_name               = "__cbc-aes-aesni",
1137         .cra_driver_name        = "__driver-cbc-aes-aesni",
1138         .cra_priority           = 0,
1139         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1140                                   CRYPTO_ALG_INTERNAL,
1141         .cra_blocksize          = AES_BLOCK_SIZE,
1142         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1143                                   AESNI_ALIGN - 1,
1144         .cra_alignmask          = 0,
1145         .cra_type               = &crypto_blkcipher_type,
1146         .cra_module             = THIS_MODULE,
1147         .cra_u = {
1148                 .blkcipher = {
1149                         .min_keysize    = AES_MIN_KEY_SIZE,
1150                         .max_keysize    = AES_MAX_KEY_SIZE,
1151                         .setkey         = aes_set_key,
1152                         .encrypt        = cbc_encrypt,
1153                         .decrypt        = cbc_decrypt,
1154                 },
1155         },
1156 }, {
1157         .cra_name               = "ecb(aes)",
1158         .cra_driver_name        = "ecb-aes-aesni",
1159         .cra_priority           = 400,
1160         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1161         .cra_blocksize          = AES_BLOCK_SIZE,
1162         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1163         .cra_alignmask          = 0,
1164         .cra_type               = &crypto_ablkcipher_type,
1165         .cra_module             = THIS_MODULE,
1166         .cra_init               = ablk_ecb_init,
1167         .cra_exit               = ablk_exit,
1168         .cra_u = {
1169                 .ablkcipher = {
1170                         .min_keysize    = AES_MIN_KEY_SIZE,
1171                         .max_keysize    = AES_MAX_KEY_SIZE,
1172                         .setkey         = ablk_set_key,
1173                         .encrypt        = ablk_encrypt,
1174                         .decrypt        = ablk_decrypt,
1175                 },
1176         },
1177 }, {
1178         .cra_name               = "cbc(aes)",
1179         .cra_driver_name        = "cbc-aes-aesni",
1180         .cra_priority           = 400,
1181         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1182         .cra_blocksize          = AES_BLOCK_SIZE,
1183         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1184         .cra_alignmask          = 0,
1185         .cra_type               = &crypto_ablkcipher_type,
1186         .cra_module             = THIS_MODULE,
1187         .cra_init               = ablk_cbc_init,
1188         .cra_exit               = ablk_exit,
1189         .cra_u = {
1190                 .ablkcipher = {
1191                         .min_keysize    = AES_MIN_KEY_SIZE,
1192                         .max_keysize    = AES_MAX_KEY_SIZE,
1193                         .ivsize         = AES_BLOCK_SIZE,
1194                         .setkey         = ablk_set_key,
1195                         .encrypt        = ablk_encrypt,
1196                         .decrypt        = ablk_decrypt,
1197                 },
1198         },
1199 #ifdef CONFIG_X86_64
1200 }, {
1201         .cra_name               = "__ctr-aes-aesni",
1202         .cra_driver_name        = "__driver-ctr-aes-aesni",
1203         .cra_priority           = 0,
1204         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1205                                   CRYPTO_ALG_INTERNAL,
1206         .cra_blocksize          = 1,
1207         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1208                                   AESNI_ALIGN - 1,
1209         .cra_alignmask          = 0,
1210         .cra_type               = &crypto_blkcipher_type,
1211         .cra_module             = THIS_MODULE,
1212         .cra_u = {
1213                 .blkcipher = {
1214                         .min_keysize    = AES_MIN_KEY_SIZE,
1215                         .max_keysize    = AES_MAX_KEY_SIZE,
1216                         .ivsize         = AES_BLOCK_SIZE,
1217                         .setkey         = aes_set_key,
1218                         .encrypt        = ctr_crypt,
1219                         .decrypt        = ctr_crypt,
1220                 },
1221         },
1222 }, {
1223         .cra_name               = "ctr(aes)",
1224         .cra_driver_name        = "ctr-aes-aesni",
1225         .cra_priority           = 400,
1226         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1227         .cra_blocksize          = 1,
1228         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1229         .cra_alignmask          = 0,
1230         .cra_type               = &crypto_ablkcipher_type,
1231         .cra_module             = THIS_MODULE,
1232         .cra_init               = ablk_ctr_init,
1233         .cra_exit               = ablk_exit,
1234         .cra_u = {
1235                 .ablkcipher = {
1236                         .min_keysize    = AES_MIN_KEY_SIZE,
1237                         .max_keysize    = AES_MAX_KEY_SIZE,
1238                         .ivsize         = AES_BLOCK_SIZE,
1239                         .setkey         = ablk_set_key,
1240                         .encrypt        = ablk_encrypt,
1241                         .decrypt        = ablk_encrypt,
1242                         .geniv          = "chainiv",
1243                 },
1244         },
1245 #endif
1246 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1247 }, {
1248         .cra_name               = "pcbc(aes)",
1249         .cra_driver_name        = "pcbc-aes-aesni",
1250         .cra_priority           = 400,
1251         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1252         .cra_blocksize          = AES_BLOCK_SIZE,
1253         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1254         .cra_alignmask          = 0,
1255         .cra_type               = &crypto_ablkcipher_type,
1256         .cra_module             = THIS_MODULE,
1257         .cra_init               = ablk_pcbc_init,
1258         .cra_exit               = ablk_exit,
1259         .cra_u = {
1260                 .ablkcipher = {
1261                         .min_keysize    = AES_MIN_KEY_SIZE,
1262                         .max_keysize    = AES_MAX_KEY_SIZE,
1263                         .ivsize         = AES_BLOCK_SIZE,
1264                         .setkey         = ablk_set_key,
1265                         .encrypt        = ablk_encrypt,
1266                         .decrypt        = ablk_decrypt,
1267                 },
1268         },
1269 #endif
1270 }, {
1271         .cra_name               = "__lrw-aes-aesni",
1272         .cra_driver_name        = "__driver-lrw-aes-aesni",
1273         .cra_priority           = 0,
1274         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1275                                   CRYPTO_ALG_INTERNAL,
1276         .cra_blocksize          = AES_BLOCK_SIZE,
1277         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1278         .cra_alignmask          = 0,
1279         .cra_type               = &crypto_blkcipher_type,
1280         .cra_module             = THIS_MODULE,
1281         .cra_exit               = lrw_aesni_exit_tfm,
1282         .cra_u = {
1283                 .blkcipher = {
1284                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1285                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1286                         .ivsize         = AES_BLOCK_SIZE,
1287                         .setkey         = lrw_aesni_setkey,
1288                         .encrypt        = lrw_encrypt,
1289                         .decrypt        = lrw_decrypt,
1290                 },
1291         },
1292 }, {
1293         .cra_name               = "__xts-aes-aesni",
1294         .cra_driver_name        = "__driver-xts-aes-aesni",
1295         .cra_priority           = 0,
1296         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1297                                   CRYPTO_ALG_INTERNAL,
1298         .cra_blocksize          = AES_BLOCK_SIZE,
1299         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1300         .cra_alignmask          = 0,
1301         .cra_type               = &crypto_blkcipher_type,
1302         .cra_module             = THIS_MODULE,
1303         .cra_u = {
1304                 .blkcipher = {
1305                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1306                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1307                         .ivsize         = AES_BLOCK_SIZE,
1308                         .setkey         = xts_aesni_setkey,
1309                         .encrypt        = xts_encrypt,
1310                         .decrypt        = xts_decrypt,
1311                 },
1312         },
1313 }, {
1314         .cra_name               = "lrw(aes)",
1315         .cra_driver_name        = "lrw-aes-aesni",
1316         .cra_priority           = 400,
1317         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1318         .cra_blocksize          = AES_BLOCK_SIZE,
1319         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1320         .cra_alignmask          = 0,
1321         .cra_type               = &crypto_ablkcipher_type,
1322         .cra_module             = THIS_MODULE,
1323         .cra_init               = ablk_init,
1324         .cra_exit               = ablk_exit,
1325         .cra_u = {
1326                 .ablkcipher = {
1327                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1328                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1329                         .ivsize         = AES_BLOCK_SIZE,
1330                         .setkey         = ablk_set_key,
1331                         .encrypt        = ablk_encrypt,
1332                         .decrypt        = ablk_decrypt,
1333                 },
1334         },
1335 }, {
1336         .cra_name               = "xts(aes)",
1337         .cra_driver_name        = "xts-aes-aesni",
1338         .cra_priority           = 400,
1339         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1340         .cra_blocksize          = AES_BLOCK_SIZE,
1341         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1342         .cra_alignmask          = 0,
1343         .cra_type               = &crypto_ablkcipher_type,
1344         .cra_module             = THIS_MODULE,
1345         .cra_init               = ablk_init,
1346         .cra_exit               = ablk_exit,
1347         .cra_u = {
1348                 .ablkcipher = {
1349                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1350                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1351                         .ivsize         = AES_BLOCK_SIZE,
1352                         .setkey         = ablk_set_key,
1353                         .encrypt        = ablk_encrypt,
1354                         .decrypt        = ablk_decrypt,
1355                 },
1356         },
1357 } };
1358
1359 #ifdef CONFIG_X86_64
1360 static struct aead_alg aesni_aead_algs[] = { {
1361         .setkey                 = common_rfc4106_set_key,
1362         .setauthsize            = common_rfc4106_set_authsize,
1363         .encrypt                = helper_rfc4106_encrypt,
1364         .decrypt                = helper_rfc4106_decrypt,
1365         .ivsize                 = 8,
1366         .maxauthsize            = 16,
1367         .base = {
1368                 .cra_name               = "__gcm-aes-aesni",
1369                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1370                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1371                 .cra_blocksize          = 1,
1372                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1373                 .cra_alignmask          = AESNI_ALIGN - 1,
1374                 .cra_module             = THIS_MODULE,
1375         },
1376 }, {
1377         .init                   = rfc4106_init,
1378         .exit                   = rfc4106_exit,
1379         .setkey                 = rfc4106_set_key,
1380         .setauthsize            = rfc4106_set_authsize,
1381         .encrypt                = rfc4106_encrypt,
1382         .decrypt                = rfc4106_decrypt,
1383         .ivsize                 = 8,
1384         .maxauthsize            = 16,
1385         .base = {
1386                 .cra_name               = "rfc4106(gcm(aes))",
1387                 .cra_driver_name        = "rfc4106-gcm-aesni",
1388                 .cra_priority           = 400,
1389                 .cra_flags              = CRYPTO_ALG_ASYNC,
1390                 .cra_blocksize          = 1,
1391                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1392                 .cra_module             = THIS_MODULE,
1393         },
1394 } };
1395 #else
1396 static struct aead_alg aesni_aead_algs[0];
1397 #endif
1398
1399
1400 static const struct x86_cpu_id aesni_cpu_id[] = {
1401         X86_FEATURE_MATCH(X86_FEATURE_AES),
1402         {}
1403 };
1404 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1405
1406 static int __init aesni_init(void)
1407 {
1408         int err;
1409
1410         if (!x86_match_cpu(aesni_cpu_id))
1411                 return -ENODEV;
1412 #ifdef CONFIG_X86_64
1413 #ifdef CONFIG_AS_AVX2
1414         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1415                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1416                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1417                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1418         } else
1419 #endif
1420 #ifdef CONFIG_AS_AVX
1421         if (boot_cpu_has(X86_FEATURE_AVX)) {
1422                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1423                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1424                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1425         } else
1426 #endif
1427         {
1428                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1429                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1430                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1431         }
1432         aesni_ctr_enc_tfm = aesni_ctr_enc;
1433 #ifdef CONFIG_AS_AVX
1434         if (boot_cpu_has(X86_FEATURE_AVX)) {
1435                 /* optimize performance of ctr mode encryption transform */
1436                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1437                 pr_info("AES CTR mode by8 optimization enabled\n");
1438         }
1439 #endif
1440 #endif
1441
1442         err = crypto_fpu_init();
1443         if (err)
1444                 return err;
1445
1446         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1447         if (err)
1448                 goto fpu_exit;
1449
1450         err = crypto_register_aeads(aesni_aead_algs,
1451                                     ARRAY_SIZE(aesni_aead_algs));
1452         if (err)
1453                 goto unregister_algs;
1454
1455         return err;
1456
1457 unregister_algs:
1458         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1459 fpu_exit:
1460         crypto_fpu_exit();
1461         return err;
1462 }
1463
1464 static void __exit aesni_exit(void)
1465 {
1466         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1467         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1468
1469         crypto_fpu_exit();
1470 }
1471
1472 late_initcall(aesni_init);
1473 module_exit(aesni_exit);
1474
1475 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1476 MODULE_LICENSE("GPL");
1477 MODULE_ALIAS_CRYPTO("aes");