2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2009-01-18 13:28:34 +08:00
|
|
|
/*
|
|
|
|
* Support for Intel AES-NI instructions. This file contains glue
|
|
|
|
* code, the real AES implementation is in intel-aes_asm.S.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008, Intel Corp.
|
|
|
|
* Author: Huang Ying <ying.huang@intel.com>
|
|
|
|
*
|
2010-11-05 03:00:45 +08:00
|
|
|
* Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
|
|
|
|
* interface for 64-bit kernels.
|
|
|
|
* Authors: Adrian Hoban <adrian.hoban@intel.com>
|
|
|
|
* Gabriele Paoloni <gabriele.paoloni@intel.com>
|
|
|
|
* Tadeusz Struk (tadeusz.struk@intel.com)
|
|
|
|
* Aidan O'Mahony (aidan.o.mahony@intel.com)
|
|
|
|
* Copyright (c) 2010, Intel Corporation.
|
2009-01-18 13:28:34 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/types.h>
|
2011-05-28 00:33:10 +08:00
|
|
|
#include <linux/module.h>
|
2009-01-18 13:28:34 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <crypto/algapi.h>
|
|
|
|
#include <crypto/aes.h>
|
2010-03-10 18:28:55 +08:00
|
|
|
#include <crypto/ctr.h>
|
2012-07-22 23:18:37 +08:00
|
|
|
#include <crypto/b128ops.h>
|
2017-08-22 16:08:18 +08:00
|
|
|
#include <crypto/gcm.h>
|
2012-07-22 23:18:37 +08:00
|
|
|
#include <crypto/xts.h>
|
2012-01-26 07:09:06 +08:00
|
|
|
#include <asm/cpu_device_id.h>
|
2019-03-13 13:12:48 +08:00
|
|
|
#include <asm/simd.h>
|
2010-11-05 03:00:45 +08:00
|
|
|
#include <crypto/scatterwalk.h>
|
|
|
|
#include <crypto/internal/aead.h>
|
2016-11-22 20:08:33 +08:00
|
|
|
#include <crypto/internal/simd.h>
|
|
|
|
#include <crypto/internal/skcipher.h>
|
2010-11-05 03:00:45 +08:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/spinlock.h>
|
2009-01-18 13:28:34 +08:00
|
|
|
|
2015-01-14 02:16:43 +08:00
|
|
|
|
2015-06-01 15:53:06 +08:00
|
|
|
#define AESNI_ALIGN 16
|
2016-11-22 20:08:33 +08:00
|
|
|
#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
|
2015-06-01 15:53:06 +08:00
|
|
|
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
|
|
|
|
#define RFC4106_HASH_SUBKEY_SIZE 16
|
2016-11-22 20:08:33 +08:00
|
|
|
#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
|
|
|
|
#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
|
|
|
|
#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
|
2015-06-01 15:53:06 +08:00
|
|
|
|
2010-11-05 03:00:45 +08:00
|
|
|
/* This data is stored at the end of the crypto_tfm struct.
|
|
|
|
* It's a type of per "session" data storage location.
|
|
|
|
* This needs to be 16 byte aligned.
|
|
|
|
*/
|
|
|
|
struct aesni_rfc4106_gcm_ctx {
|
2016-11-22 20:08:33 +08:00
|
|
|
u8 hash_subkey[16] AESNI_ALIGN_ATTR;
|
|
|
|
struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
|
2010-11-05 03:00:45 +08:00
|
|
|
u8 nonce[4];
|
|
|
|
};
|
|
|
|
|
2017-04-29 00:12:02 +08:00
|
|
|
struct generic_gcmaes_ctx {
|
|
|
|
u8 hash_subkey[16] AESNI_ALIGN_ATTR;
|
|
|
|
struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
|
|
|
|
};
|
|
|
|
|
2012-07-22 23:18:37 +08:00
|
|
|
struct aesni_xts_ctx {
|
2016-11-22 20:08:33 +08:00
|
|
|
u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
|
|
|
|
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
|
2012-07-22 23:18:37 +08:00
|
|
|
};
|
|
|
|
|
2018-02-15 01:39:23 +08:00
|
|
|
#define GCM_BLOCK_LEN 16
|
|
|
|
|
|
|
|
struct gcm_context_data {
|
|
|
|
/* init, update and finalize context data */
|
|
|
|
u8 aad_hash[GCM_BLOCK_LEN];
|
|
|
|
u64 aad_length;
|
|
|
|
u64 in_length;
|
|
|
|
u8 partial_block_enc_key[GCM_BLOCK_LEN];
|
|
|
|
u8 orig_IV[GCM_BLOCK_LEN];
|
|
|
|
u8 current_counter[GCM_BLOCK_LEN];
|
|
|
|
u64 partial_block_len;
|
|
|
|
u64 unused;
|
2018-12-11 03:57:00 +08:00
|
|
|
u8 hash_keys[GCM_BLOCK_LEN * 16];
|
2018-02-15 01:39:23 +08:00
|
|
|
};
|
|
|
|
|
2009-01-18 13:28:34 +08:00
|
|
|
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
|
|
|
unsigned int key_len);
|
2019-11-27 14:08:02 +08:00
|
|
|
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
|
|
|
|
asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
|
2009-01-18 13:28:34 +08:00
|
|
|
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len);
|
|
|
|
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len);
|
|
|
|
asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
|
|
|
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
2020-12-08 07:34:02 +08:00
|
|
|
asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
|
|
|
asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
2011-05-18 07:03:34 +08:00
|
|
|
|
2013-12-12 06:28:41 +08:00
|
|
|
#define AVX_GEN2_OPTSIZE 640
|
|
|
|
#define AVX_GEN4_OPTSIZE 4096
|
|
|
|
|
2021-01-01 00:41:54 +08:00
|
|
|
asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
|
|
|
|
|
|
|
asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
|
|
|
|
crypto: aesni-intel - Ported implementation to x86-32
The AES-NI instructions are also available in legacy mode so the 32-bit
architecture may profit from those, too.
To illustrate the performance gain here's a short summary of a dm-crypt
speed test on a Core i7 M620 running at 2.67GHz comparing both assembler
implementations:
x86: i568 aes-ni delta
ECB, 256 bit: 93.8 MB/s 123.3 MB/s +31.4%
CBC, 256 bit: 84.8 MB/s 262.3 MB/s +209.3%
LRW, 256 bit: 108.6 MB/s 222.1 MB/s +104.5%
XTS, 256 bit: 105.0 MB/s 205.5 MB/s +95.7%
Additionally, due to some minor optimizations, the 64-bit version also
got a minor performance gain as seen below:
x86-64: old impl. new impl. delta
ECB, 256 bit: 121.1 MB/s 123.0 MB/s +1.5%
CBC, 256 bit: 285.3 MB/s 290.8 MB/s +1.9%
LRW, 256 bit: 263.7 MB/s 265.3 MB/s +0.6%
XTS, 256 bit: 251.1 MB/s 255.3 MB/s +1.7%
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Reviewed-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2010-11-27 16:34:46 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
crypto: aes - AES CTR x86_64 "by8" AVX optimization
This patch introduces "by8" AES CTR mode AVX optimization inspired by
Intel Optimized IPSEC Cryptograhpic library. For additional information,
please see:
http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
The functions aes_ctr_enc_128_avx_by8(), aes_ctr_enc_192_avx_by8() and
aes_ctr_enc_256_avx_by8() are adapted from
Intel Optimized IPSEC Cryptographic library. When both AES and AVX features
are enabled in a platform, the glue code in AESNI module overrieds the
existing "by4" CTR mode en/decryption with the "by8"
AES CTR mode en/decryption.
On a Haswell desktop, with turbo disabled and all cpus running
at maximum frequency, the "by8" CTR mode optimization
shows better performance results across data & key sizes
as measured by tcrypt.
The average performance improvement of the "by8" version over the "by4"
version is as follows:
For 128 bit key and data sizes >= 256 bytes, there is a 10-16% improvement.
For 192 bit key and data sizes >= 256 bytes, there is a 20-22% improvement.
For 256 bit key and data sizes >= 256 bytes, there is a 20-25% improvement.
A typical run of tcrypt with AES CTR mode encryption of the "by4" and "by8"
optimization shows the following results:
tcrypt with "by4" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 343 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 336 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 491 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1130 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7309 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 346 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 361 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 543 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1321 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9649 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 369 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 366 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1531 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10522 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 336 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 350 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 487 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1129 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7287 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 350 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 359 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 635 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1324 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9595 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 364 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 377 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 604 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1527 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10549 cycles (8192 bytes)
tcrypt with "by8" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 340 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 330 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 450 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1043 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6597 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 339 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 352 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 539 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1153 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8458 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 353 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 360 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 512 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1277 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8745 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 348 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 335 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 451 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1030 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6611 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 354 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 346 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 488 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1154 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8390 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 357 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 362 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 515 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1284 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8681 cycles (8192 bytes)
crypto: Incorporate feed back to AES CTR mode optimization patch
Specifically, the following:
a) alignment around main loop in aes_ctrby8_avx_x86_64.S
b) .rodata around data constants used in the assembely code.
c) the use of CONFIG_AVX in the glue code.
d) fix up white space.
e) informational message for "by8" AES CTR mode optimization
f) "by8" AES CTR mode optimization can be simply enabled
if the platform supports both AES and AVX features. The
optimization works superbly on Sandybridge as well.
Testing on Haswell shows no performance change since the last.
Testing on Sandybridge shows that the "by8" AES CTR mode optimization
greatly improves performance.
tcrypt log with "by4" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 408 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 707 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1864 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12813 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 395 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 432 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 780 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2132 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15765 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 416 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 438 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 842 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2383 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16945 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 389 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 409 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 704 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1865 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12783 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 409 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 434 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 792 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2151 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15804 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 421 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 444 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 840 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2394 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16928 cycles (8192 bytes)
tcrypt log with "by8" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 401 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 522 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1136 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7046 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 394 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 418 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 559 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1263 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9072 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 408 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 428 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1385 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9224 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 390 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 402 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 530 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1135 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7079 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 414 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 417 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 572 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1312 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9073 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 415 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 454 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 598 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1407 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9288 cycles (8192 bytes)
crypto: Fix redundant checks
a) Fix the redundant check for cpu_has_aes
b) Fix the key length check when invoking the CTR mode "by8"
encryptor/decryptor.
crypto: fix typo in AES ctr mode transform
Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com>
Reviewed-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-06-11 00:22:47 +08:00
|
|
|
|
|
|
|
static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
2010-03-10 18:28:55 +08:00
|
|
|
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv);
|
2009-01-18 13:28:34 +08:00
|
|
|
|
2018-02-15 01:40:47 +08:00
|
|
|
/* Scatter / Gather routines, with args similar to above */
|
|
|
|
asmlinkage void aesni_gcm_init(void *ctx,
|
|
|
|
struct gcm_context_data *gdata,
|
|
|
|
u8 *iv,
|
|
|
|
u8 *hash_subkey, const u8 *aad,
|
|
|
|
unsigned long aad_len);
|
|
|
|
asmlinkage void aesni_gcm_enc_update(void *ctx,
|
|
|
|
struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in, unsigned long plaintext_len);
|
|
|
|
asmlinkage void aesni_gcm_dec_update(void *ctx,
|
|
|
|
struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in,
|
|
|
|
unsigned long ciphertext_len);
|
|
|
|
asmlinkage void aesni_gcm_finalize(void *ctx,
|
|
|
|
struct gcm_context_data *gdata,
|
|
|
|
u8 *auth_tag, unsigned long auth_tag_len);
|
2013-12-12 06:28:41 +08:00
|
|
|
|
2019-01-11 04:17:57 +08:00
|
|
|
static const struct aesni_gcm_tfm_s {
|
|
|
|
void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
|
|
|
|
u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
|
|
|
|
void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in, unsigned long plaintext_len);
|
|
|
|
void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in, unsigned long ciphertext_len);
|
|
|
|
void (*finalize)(void *ctx, struct gcm_context_data *gdata,
|
|
|
|
u8 *auth_tag, unsigned long auth_tag_len);
|
2018-12-11 03:59:59 +08:00
|
|
|
} *aesni_gcm_tfm;
|
|
|
|
|
2019-01-11 04:17:57 +08:00
|
|
|
static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
|
2018-12-11 03:59:59 +08:00
|
|
|
.init = &aesni_gcm_init,
|
|
|
|
.enc_update = &aesni_gcm_enc_update,
|
|
|
|
.dec_update = &aesni_gcm_dec_update,
|
|
|
|
.finalize = &aesni_gcm_finalize,
|
|
|
|
};
|
|
|
|
|
crypto: aes - AES CTR x86_64 "by8" AVX optimization
This patch introduces "by8" AES CTR mode AVX optimization inspired by
Intel Optimized IPSEC Cryptograhpic library. For additional information,
please see:
http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
The functions aes_ctr_enc_128_avx_by8(), aes_ctr_enc_192_avx_by8() and
aes_ctr_enc_256_avx_by8() are adapted from
Intel Optimized IPSEC Cryptographic library. When both AES and AVX features
are enabled in a platform, the glue code in AESNI module overrieds the
existing "by4" CTR mode en/decryption with the "by8"
AES CTR mode en/decryption.
On a Haswell desktop, with turbo disabled and all cpus running
at maximum frequency, the "by8" CTR mode optimization
shows better performance results across data & key sizes
as measured by tcrypt.
The average performance improvement of the "by8" version over the "by4"
version is as follows:
For 128 bit key and data sizes >= 256 bytes, there is a 10-16% improvement.
For 192 bit key and data sizes >= 256 bytes, there is a 20-22% improvement.
For 256 bit key and data sizes >= 256 bytes, there is a 20-25% improvement.
A typical run of tcrypt with AES CTR mode encryption of the "by4" and "by8"
optimization shows the following results:
tcrypt with "by4" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 343 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 336 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 491 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1130 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7309 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 346 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 361 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 543 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1321 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9649 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 369 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 366 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1531 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10522 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 336 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 350 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 487 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1129 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7287 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 350 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 359 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 635 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1324 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9595 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 364 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 377 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 604 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1527 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10549 cycles (8192 bytes)
tcrypt with "by8" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 340 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 330 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 450 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1043 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6597 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 339 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 352 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 539 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1153 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8458 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 353 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 360 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 512 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1277 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8745 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 348 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 335 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 451 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1030 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6611 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 354 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 346 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 488 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1154 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8390 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 357 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 362 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 515 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1284 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8681 cycles (8192 bytes)
crypto: Incorporate feed back to AES CTR mode optimization patch
Specifically, the following:
a) alignment around main loop in aes_ctrby8_avx_x86_64.S
b) .rodata around data constants used in the assembely code.
c) the use of CONFIG_AVX in the glue code.
d) fix up white space.
e) informational message for "by8" AES CTR mode optimization
f) "by8" AES CTR mode optimization can be simply enabled
if the platform supports both AES and AVX features. The
optimization works superbly on Sandybridge as well.
Testing on Haswell shows no performance change since the last.
Testing on Sandybridge shows that the "by8" AES CTR mode optimization
greatly improves performance.
tcrypt log with "by4" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 408 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 707 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1864 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12813 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 395 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 432 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 780 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2132 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15765 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 416 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 438 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 842 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2383 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16945 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 389 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 409 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 704 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1865 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12783 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 409 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 434 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 792 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2151 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15804 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 421 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 444 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 840 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2394 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16928 cycles (8192 bytes)
tcrypt log with "by8" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 401 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 522 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1136 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7046 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 394 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 418 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 559 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1263 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9072 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 408 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 428 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1385 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9224 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 390 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 402 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 530 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1135 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7079 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 414 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 417 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 572 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1312 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9073 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 415 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 454 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 598 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1407 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9288 cycles (8192 bytes)
crypto: Fix redundant checks
a) Fix the redundant check for cpu_has_aes
b) Fix the key length check when invoking the CTR mode "by8"
encryptor/decryptor.
crypto: fix typo in AES ctr mode transform
Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com>
Reviewed-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-06-11 00:22:47 +08:00
|
|
|
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
|
|
|
|
void *keys, u8 *out, unsigned int num_bytes);
|
|
|
|
asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
|
|
|
|
void *keys, u8 *out, unsigned int num_bytes);
|
|
|
|
asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
|
|
|
|
void *keys, u8 *out, unsigned int num_bytes);
|
2013-12-12 06:28:41 +08:00
|
|
|
/*
|
2018-12-11 03:59:59 +08:00
|
|
|
* asmlinkage void aesni_gcm_init_avx_gen2()
|
2013-12-12 06:28:41 +08:00
|
|
|
* gcm_data *my_ctx_data, context data
|
|
|
|
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
|
|
|
|
*/
|
2018-12-11 03:59:59 +08:00
|
|
|
asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
|
|
|
|
struct gcm_context_data *gdata,
|
|
|
|
u8 *iv,
|
|
|
|
u8 *hash_subkey,
|
|
|
|
const u8 *aad,
|
|
|
|
unsigned long aad_len);
|
|
|
|
|
|
|
|
asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
|
|
|
|
struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in, unsigned long plaintext_len);
|
|
|
|
asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
|
|
|
|
struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in,
|
|
|
|
unsigned long ciphertext_len);
|
|
|
|
asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
|
|
|
|
struct gcm_context_data *gdata,
|
|
|
|
u8 *auth_tag, unsigned long auth_tag_len);
|
2013-12-12 06:28:41 +08:00
|
|
|
|
2019-01-11 04:17:57 +08:00
|
|
|
static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
|
2018-12-11 03:59:59 +08:00
|
|
|
.init = &aesni_gcm_init_avx_gen2,
|
|
|
|
.enc_update = &aesni_gcm_enc_update_avx_gen2,
|
|
|
|
.dec_update = &aesni_gcm_dec_update_avx_gen2,
|
|
|
|
.finalize = &aesni_gcm_finalize_avx_gen2,
|
|
|
|
};
|
2013-12-12 06:28:41 +08:00
|
|
|
|
|
|
|
/*
|
2018-12-11 03:59:59 +08:00
|
|
|
* asmlinkage void aesni_gcm_init_avx_gen4()
|
2013-12-12 06:28:41 +08:00
|
|
|
* gcm_data *my_ctx_data, context data
|
|
|
|
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
|
|
|
|
*/
|
2018-12-11 03:59:59 +08:00
|
|
|
asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
|
|
|
|
struct gcm_context_data *gdata,
|
|
|
|
u8 *iv,
|
|
|
|
u8 *hash_subkey,
|
|
|
|
const u8 *aad,
|
|
|
|
unsigned long aad_len);
|
|
|
|
|
|
|
|
asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
|
|
|
|
struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in, unsigned long plaintext_len);
|
|
|
|
asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
|
|
|
|
struct gcm_context_data *gdata, u8 *out,
|
|
|
|
const u8 *in,
|
|
|
|
unsigned long ciphertext_len);
|
|
|
|
asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
|
|
|
|
struct gcm_context_data *gdata,
|
|
|
|
u8 *auth_tag, unsigned long auth_tag_len);
|
2013-12-12 06:28:41 +08:00
|
|
|
|
2019-01-11 04:17:57 +08:00
|
|
|
static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
|
2018-12-11 03:59:59 +08:00
|
|
|
.init = &aesni_gcm_init_avx_gen4,
|
|
|
|
.enc_update = &aesni_gcm_enc_update_avx_gen4,
|
|
|
|
.dec_update = &aesni_gcm_dec_update_avx_gen4,
|
|
|
|
.finalize = &aesni_gcm_finalize_avx_gen4,
|
|
|
|
};
|
2013-12-12 06:28:41 +08:00
|
|
|
|
2010-11-05 03:00:45 +08:00
|
|
|
static inline struct
|
|
|
|
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
|
|
|
|
{
|
2015-06-01 15:53:06 +08:00
|
|
|
unsigned long align = AESNI_ALIGN;
|
|
|
|
|
|
|
|
if (align <= crypto_tfm_ctx_alignment())
|
|
|
|
align = 1;
|
|
|
|
return PTR_ALIGN(crypto_aead_ctx(tfm), align);
|
2010-11-05 03:00:45 +08:00
|
|
|
}
|
2017-04-29 00:12:02 +08:00
|
|
|
|
|
|
|
static inline struct
|
|
|
|
generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
unsigned long align = AESNI_ALIGN;
|
|
|
|
|
|
|
|
if (align <= crypto_tfm_ctx_alignment())
|
|
|
|
align = 1;
|
|
|
|
return PTR_ALIGN(crypto_aead_ctx(tfm), align);
|
|
|
|
}
|
2010-11-29 08:35:39 +08:00
|
|
|
#endif
|
2010-11-05 03:00:45 +08:00
|
|
|
|
2009-01-18 13:28:34 +08:00
|
|
|
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
|
|
|
|
{
|
|
|
|
unsigned long addr = (unsigned long)raw_ctx;
|
|
|
|
unsigned long align = AESNI_ALIGN;
|
|
|
|
|
|
|
|
if (align <= crypto_tfm_ctx_alignment())
|
|
|
|
align = 1;
|
|
|
|
return (struct crypto_aes_ctx *)ALIGN(addr, align);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
|
|
|
|
const u8 *in_key, unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
key_len != AES_KEYSIZE_256)
|
2009-01-18 13:28:34 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-03-13 13:12:48 +08:00
|
|
|
if (!crypto_simd_usable())
|
2019-07-03 03:41:23 +08:00
|
|
|
err = aes_expandkey(ctx, in_key, key_len);
|
2009-01-18 13:28:34 +08:00
|
|
|
else {
|
|
|
|
kernel_fpu_begin();
|
|
|
|
err = aesni_set_key(ctx, in_key, key_len);
|
|
|
|
kernel_fpu_end();
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
|
|
|
|
}
|
|
|
|
|
2019-07-03 03:41:20 +08:00
|
|
|
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
2009-01-18 13:28:34 +08:00
|
|
|
{
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
|
|
|
|
2019-07-03 03:41:23 +08:00
|
|
|
if (!crypto_simd_usable()) {
|
|
|
|
aes_encrypt(ctx, dst, src);
|
|
|
|
} else {
|
2009-01-18 13:28:34 +08:00
|
|
|
kernel_fpu_begin();
|
|
|
|
aesni_enc(ctx, dst, src);
|
|
|
|
kernel_fpu_end();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-03 03:41:20 +08:00
|
|
|
static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
2009-01-18 13:28:34 +08:00
|
|
|
{
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
|
|
|
|
2019-07-03 03:41:23 +08:00
|
|
|
if (!crypto_simd_usable()) {
|
|
|
|
aes_decrypt(ctx, dst, src);
|
|
|
|
} else {
|
2009-01-18 13:28:34 +08:00
|
|
|
kernel_fpu_begin();
|
|
|
|
aesni_dec(ctx, dst, src);
|
|
|
|
kernel_fpu_end();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
return aes_set_key_common(crypto_skcipher_tfm(tfm),
|
|
|
|
crypto_skcipher_ctx(tfm), key, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ecb_encrypt(struct skcipher_request *req)
|
2009-01-18 13:28:34 +08:00
|
|
|
{
|
2016-11-22 20:08:33 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes;
|
2009-01-18 13:28:34 +08:00
|
|
|
int err;
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
2009-01-18 13:28:34 +08:00
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
|
|
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
nbytes & AES_BLOCK_MASK);
|
|
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
2009-01-18 13:28:34 +08:00
|
|
|
}
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
static int ecb_decrypt(struct skcipher_request *req)
|
2009-01-18 13:28:34 +08:00
|
|
|
{
|
2016-11-22 20:08:33 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes;
|
2009-01-18 13:28:34 +08:00
|
|
|
int err;
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
2009-01-18 13:28:34 +08:00
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
|
|
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
nbytes & AES_BLOCK_MASK);
|
|
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
2009-01-18 13:28:34 +08:00
|
|
|
}
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
static int cbc_encrypt(struct skcipher_request *req)
|
2009-01-18 13:28:34 +08:00
|
|
|
{
|
2016-11-22 20:08:33 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes;
|
2009-01-18 13:28:34 +08:00
|
|
|
int err;
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
2009-01-18 13:28:34 +08:00
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
|
|
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
2009-01-18 13:28:34 +08:00
|
|
|
}
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
static int cbc_decrypt(struct skcipher_request *req)
|
2009-01-18 13:28:34 +08:00
|
|
|
{
|
2016-11-22 20:08:33 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes;
|
2009-01-18 13:28:34 +08:00
|
|
|
int err;
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
2009-01-18 13:28:34 +08:00
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
|
|
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
2009-01-18 13:28:34 +08:00
|
|
|
}
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-12-08 07:34:02 +08:00
|
|
|
static int cts_cbc_encrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
|
|
|
|
struct scatterlist *src = req->src, *dst = req->dst;
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
|
|
if (req->cryptlen <= AES_BLOCK_SIZE) {
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
cbc_blocks = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cbc_blocks > 0) {
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = cbc_encrypt(&subreq);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (req->cryptlen == AES_BLOCK_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst,
|
|
|
|
subreq.cryptlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle ciphertext stealing */
|
|
|
|
skcipher_request_set_crypt(&subreq, src, dst,
|
|
|
|
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
walk.nbytes, walk.iv);
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return skcipher_walk_done(&walk, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cts_cbc_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
|
|
|
|
struct scatterlist *src = req->src, *dst = req->dst;
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
|
|
if (req->cryptlen <= AES_BLOCK_SIZE) {
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
cbc_blocks = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cbc_blocks > 0) {
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = cbc_decrypt(&subreq);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (req->cryptlen == AES_BLOCK_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst,
|
|
|
|
subreq.cryptlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle ciphertext stealing */
|
|
|
|
skcipher_request_set_crypt(&subreq, src, dst,
|
|
|
|
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
walk.nbytes, walk.iv);
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return skcipher_walk_done(&walk, 0);
|
|
|
|
}
|
|
|
|
|
crypto: aesni-intel - Ported implementation to x86-32
The AES-NI instructions are also available in legacy mode so the 32-bit
architecture may profit from those, too.
To illustrate the performance gain here's a short summary of a dm-crypt
speed test on a Core i7 M620 running at 2.67GHz comparing both assembler
implementations:
x86: i568 aes-ni delta
ECB, 256 bit: 93.8 MB/s 123.3 MB/s +31.4%
CBC, 256 bit: 84.8 MB/s 262.3 MB/s +209.3%
LRW, 256 bit: 108.6 MB/s 222.1 MB/s +104.5%
XTS, 256 bit: 105.0 MB/s 205.5 MB/s +95.7%
Additionally, due to some minor optimizations, the 64-bit version also
got a minor performance gain as seen below:
x86-64: old impl. new impl. delta
ECB, 256 bit: 121.1 MB/s 123.0 MB/s +1.5%
CBC, 256 bit: 285.3 MB/s 290.8 MB/s +1.9%
LRW, 256 bit: 263.7 MB/s 265.3 MB/s +0.6%
XTS, 256 bit: 251.1 MB/s 255.3 MB/s +1.7%
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Reviewed-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2010-11-27 16:34:46 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2010-03-10 18:28:55 +08:00
|
|
|
static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
|
2016-11-22 20:08:33 +08:00
|
|
|
struct skcipher_walk *walk)
|
2010-03-10 18:28:55 +08:00
|
|
|
{
|
|
|
|
u8 *ctrblk = walk->iv;
|
|
|
|
u8 keystream[AES_BLOCK_SIZE];
|
|
|
|
u8 *src = walk->src.virt.addr;
|
|
|
|
u8 *dst = walk->dst.virt.addr;
|
|
|
|
unsigned int nbytes = walk->nbytes;
|
|
|
|
|
|
|
|
aesni_enc(ctx, keystream, ctrblk);
|
crypto: algapi - make crypto_xor() take separate dst and src arguments
There are quite a number of occurrences in the kernel of the pattern
if (dst != src)
memcpy(dst, src, walk.total % AES_BLOCK_SIZE);
crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE);
or
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
where crypto_xor() is preceded or followed by a memcpy() invocation
that is only there because crypto_xor() uses its output parameter as
one of the inputs. To avoid having to add new instances of this pattern
in the arm64 code, which will be refactored to implement non-SIMD
fallbacks, add an alternative implementation called crypto_xor_cpy(),
taking separate input and output arguments. This removes the need for
the separate memcpy().
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2017-07-24 18:28:04 +08:00
|
|
|
crypto_xor_cpy(dst, keystream, src, nbytes);
|
|
|
|
|
2010-03-10 18:28:55 +08:00
|
|
|
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
crypto: aes - AES CTR x86_64 "by8" AVX optimization
This patch introduces "by8" AES CTR mode AVX optimization inspired by
Intel Optimized IPSEC Cryptograhpic library. For additional information,
please see:
http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
The functions aes_ctr_enc_128_avx_by8(), aes_ctr_enc_192_avx_by8() and
aes_ctr_enc_256_avx_by8() are adapted from
Intel Optimized IPSEC Cryptographic library. When both AES and AVX features
are enabled in a platform, the glue code in AESNI module overrieds the
existing "by4" CTR mode en/decryption with the "by8"
AES CTR mode en/decryption.
On a Haswell desktop, with turbo disabled and all cpus running
at maximum frequency, the "by8" CTR mode optimization
shows better performance results across data & key sizes
as measured by tcrypt.
The average performance improvement of the "by8" version over the "by4"
version is as follows:
For 128 bit key and data sizes >= 256 bytes, there is a 10-16% improvement.
For 192 bit key and data sizes >= 256 bytes, there is a 20-22% improvement.
For 256 bit key and data sizes >= 256 bytes, there is a 20-25% improvement.
A typical run of tcrypt with AES CTR mode encryption of the "by4" and "by8"
optimization shows the following results:
tcrypt with "by4" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 343 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 336 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 491 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1130 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7309 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 346 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 361 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 543 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1321 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9649 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 369 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 366 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1531 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10522 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 336 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 350 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 487 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1129 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7287 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 350 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 359 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 635 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1324 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9595 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 364 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 377 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 604 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1527 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10549 cycles (8192 bytes)
tcrypt with "by8" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 340 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 330 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 450 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1043 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6597 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 339 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 352 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 539 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1153 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8458 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 353 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 360 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 512 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1277 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8745 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 348 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 335 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 451 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1030 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6611 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 354 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 346 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 488 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1154 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8390 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 357 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 362 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 515 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1284 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8681 cycles (8192 bytes)
crypto: Incorporate feed back to AES CTR mode optimization patch
Specifically, the following:
a) alignment around main loop in aes_ctrby8_avx_x86_64.S
b) .rodata around data constants used in the assembely code.
c) the use of CONFIG_AVX in the glue code.
d) fix up white space.
e) informational message for "by8" AES CTR mode optimization
f) "by8" AES CTR mode optimization can be simply enabled
if the platform supports both AES and AVX features. The
optimization works superbly on Sandybridge as well.
Testing on Haswell shows no performance change since the last.
Testing on Sandybridge shows that the "by8" AES CTR mode optimization
greatly improves performance.
tcrypt log with "by4" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 408 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 707 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1864 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12813 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 395 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 432 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 780 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2132 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15765 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 416 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 438 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 842 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2383 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16945 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 389 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 409 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 704 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1865 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12783 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 409 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 434 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 792 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2151 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15804 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 421 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 444 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 840 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2394 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16928 cycles (8192 bytes)
tcrypt log with "by8" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 401 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 522 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1136 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7046 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 394 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 418 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 559 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1263 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9072 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 408 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 428 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1385 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9224 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 390 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 402 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 530 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1135 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7079 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 414 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 417 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 572 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1312 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9073 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 415 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 454 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 598 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1407 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9288 cycles (8192 bytes)
crypto: Fix redundant checks
a) Fix the redundant check for cpu_has_aes
b) Fix the key length check when invoking the CTR mode "by8"
encryptor/decryptor.
crypto: fix typo in AES ctr mode transform
Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com>
Reviewed-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-06-11 00:22:47 +08:00
|
|
|
static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
|
|
|
|
const u8 *in, unsigned int len, u8 *iv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* based on key length, override with the by8 version
|
|
|
|
* of ctr mode encryption/decryption for improved performance
|
|
|
|
* aes_set_key_common() ensures that key length is one of
|
|
|
|
* {128,192,256}
|
|
|
|
*/
|
|
|
|
if (ctx->key_length == AES_KEYSIZE_128)
|
|
|
|
aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
|
|
|
|
else if (ctx->key_length == AES_KEYSIZE_192)
|
|
|
|
aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
|
|
|
|
else
|
|
|
|
aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
|
|
|
|
}
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
static int ctr_crypt(struct skcipher_request *req)
|
2010-03-10 18:28:55 +08:00
|
|
|
{
|
2016-11-22 20:08:33 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes;
|
2010-03-10 18:28:55 +08:00
|
|
|
int err;
|
|
|
|
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
2010-03-10 18:28:55 +08:00
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
crypto: aes - AES CTR x86_64 "by8" AVX optimization
This patch introduces "by8" AES CTR mode AVX optimization inspired by
Intel Optimized IPSEC Cryptograhpic library. For additional information,
please see:
http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
The functions aes_ctr_enc_128_avx_by8(), aes_ctr_enc_192_avx_by8() and
aes_ctr_enc_256_avx_by8() are adapted from
Intel Optimized IPSEC Cryptographic library. When both AES and AVX features
are enabled in a platform, the glue code in AESNI module overrieds the
existing "by4" CTR mode en/decryption with the "by8"
AES CTR mode en/decryption.
On a Haswell desktop, with turbo disabled and all cpus running
at maximum frequency, the "by8" CTR mode optimization
shows better performance results across data & key sizes
as measured by tcrypt.
The average performance improvement of the "by8" version over the "by4"
version is as follows:
For 128 bit key and data sizes >= 256 bytes, there is a 10-16% improvement.
For 192 bit key and data sizes >= 256 bytes, there is a 20-22% improvement.
For 256 bit key and data sizes >= 256 bytes, there is a 20-25% improvement.
A typical run of tcrypt with AES CTR mode encryption of the "by4" and "by8"
optimization shows the following results:
tcrypt with "by4" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 343 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 336 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 491 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1130 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7309 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 346 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 361 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 543 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1321 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9649 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 369 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 366 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1531 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10522 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 336 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 350 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 487 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1129 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7287 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 350 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 359 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 635 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1324 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9595 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 364 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 377 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 604 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1527 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10549 cycles (8192 bytes)
tcrypt with "by8" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 340 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 330 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 450 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1043 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6597 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 339 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 352 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 539 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1153 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8458 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 353 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 360 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 512 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1277 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8745 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 348 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 335 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 451 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1030 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6611 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 354 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 346 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 488 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1154 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8390 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 357 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 362 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 515 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1284 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8681 cycles (8192 bytes)
crypto: Incorporate feed back to AES CTR mode optimization patch
Specifically, the following:
a) alignment around main loop in aes_ctrby8_avx_x86_64.S
b) .rodata around data constants used in the assembely code.
c) the use of CONFIG_AVX in the glue code.
d) fix up white space.
e) informational message for "by8" AES CTR mode optimization
f) "by8" AES CTR mode optimization can be simply enabled
if the platform supports both AES and AVX features. The
optimization works superbly on Sandybridge as well.
Testing on Haswell shows no performance change since the last.
Testing on Sandybridge shows that the "by8" AES CTR mode optimization
greatly improves performance.
tcrypt log with "by4" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 408 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 707 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1864 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12813 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 395 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 432 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 780 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2132 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15765 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 416 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 438 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 842 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2383 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16945 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 389 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 409 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 704 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1865 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12783 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 409 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 434 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 792 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2151 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15804 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 421 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 444 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 840 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2394 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16928 cycles (8192 bytes)
tcrypt log with "by8" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 401 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 522 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1136 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7046 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 394 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 418 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 559 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1263 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9072 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 408 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 428 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1385 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9224 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 390 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 402 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 530 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1135 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7079 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 414 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 417 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 572 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1312 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9073 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 415 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 454 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 598 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1407 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9288 cycles (8192 bytes)
crypto: Fix redundant checks
a) Fix the redundant check for cpu_has_aes
b) Fix the key length check when invoking the CTR mode "by8"
encryptor/decryptor.
crypto: fix typo in AES ctr mode transform
Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com>
Reviewed-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-06-11 00:22:47 +08:00
|
|
|
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
2015-01-14 02:16:43 +08:00
|
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
2010-03-10 18:28:55 +08:00
|
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
2010-03-10 18:28:55 +08:00
|
|
|
}
|
|
|
|
if (walk.nbytes) {
|
|
|
|
ctr_crypt_final(ctx, &walk);
|
2016-11-22 20:08:33 +08:00
|
|
|
err = skcipher_walk_done(&walk, 0);
|
2010-03-10 18:28:55 +08:00
|
|
|
}
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2012-07-22 23:18:37 +08:00
|
|
|
|
2010-11-05 03:00:45 +08:00
|
|
|
static int
|
|
|
|
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
|
|
|
{
|
2019-09-05 01:56:32 +08:00
|
|
|
struct crypto_aes_ctx ctx;
|
2016-06-29 18:03:59 +08:00
|
|
|
int ret;
|
2010-11-05 03:00:45 +08:00
|
|
|
|
2019-09-05 01:56:32 +08:00
|
|
|
ret = aes_expandkey(&ctx, key, key_len);
|
2011-01-23 15:56:36 +08:00
|
|
|
if (ret)
|
2019-09-05 01:56:32 +08:00
|
|
|
return ret;
|
2010-11-05 03:00:45 +08:00
|
|
|
|
|
|
|
/* Clear the data in the hash sub key container to zero.*/
|
|
|
|
/* We want to cipher all zeros to create the hash sub key. */
|
|
|
|
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
|
|
|
|
|
2019-09-05 01:56:32 +08:00
|
|
|
aes_encrypt(&ctx, hash_subkey, hash_subkey);
|
2016-06-29 18:03:59 +08:00
|
|
|
|
2019-09-05 01:56:32 +08:00
|
|
|
memzero_explicit(&ctx, sizeof(ctx));
|
|
|
|
return 0;
|
2010-11-05 03:00:45 +08:00
|
|
|
}
|
|
|
|
|
2015-02-07 02:25:20 +08:00
|
|
|
static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
|
|
|
|
unsigned int key_len)
|
2010-11-05 03:00:45 +08:00
|
|
|
{
|
2015-02-07 02:25:20 +08:00
|
|
|
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
|
2010-11-05 03:00:45 +08:00
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (key_len < 4)
|
2010-11-05 03:00:45 +08:00
|
|
|
return -EINVAL;
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
|
2010-11-05 03:00:45 +08:00
|
|
|
/*Account for 4 byte nonce at the end.*/
|
|
|
|
key_len -= 4;
|
|
|
|
|
|
|
|
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
|
|
|
|
|
2015-06-01 15:53:06 +08:00
|
|
|
return aes_set_key_common(crypto_aead_tfm(aead),
|
|
|
|
&ctx->aes_key_expanded, key, key_len) ?:
|
|
|
|
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
|
2010-11-05 03:00:45 +08:00
|
|
|
}
|
|
|
|
|
2019-03-11 03:00:52 +08:00
|
|
|
/* This is the Integrity Check Value (aka the authentication tag) length and can
|
|
|
|
* be 8, 12 or 16 bytes long. */
|
2015-02-07 02:25:20 +08:00
|
|
|
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
|
|
|
|
unsigned int authsize)
|
|
|
|
{
|
2010-11-05 03:00:45 +08:00
|
|
|
switch (authsize) {
|
|
|
|
case 8:
|
|
|
|
case 12:
|
|
|
|
case 16:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-06-01 15:53:06 +08:00
|
|
|
|
2010-11-05 03:00:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-29 00:12:02 +08:00
|
|
|
static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
|
|
|
|
unsigned int authsize)
|
|
|
|
{
|
|
|
|
switch (authsize) {
|
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
case 12:
|
|
|
|
case 13:
|
|
|
|
case 14:
|
|
|
|
case 15:
|
|
|
|
case 16:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-15 01:40:58 +08:00
|
|
|
static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
|
|
|
|
unsigned int assoclen, u8 *hash_subkey,
|
|
|
|
u8 *iv, void *aes_ctx)
|
|
|
|
{
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
2019-01-11 04:17:57 +08:00
|
|
|
const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
|
2021-01-04 23:55:46 +08:00
|
|
|
u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
|
|
|
|
struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
|
2018-02-15 01:40:58 +08:00
|
|
|
struct scatter_walk dst_sg_walk = {};
|
|
|
|
unsigned long left = req->cryptlen;
|
|
|
|
unsigned long len, srclen, dstlen;
|
|
|
|
struct scatter_walk assoc_sg_walk;
|
|
|
|
struct scatter_walk src_sg_walk;
|
|
|
|
struct scatterlist src_start[2];
|
|
|
|
struct scatterlist dst_start[2];
|
|
|
|
struct scatterlist *src_sg;
|
|
|
|
struct scatterlist *dst_sg;
|
|
|
|
u8 *src, *dst, *assoc;
|
|
|
|
u8 *assocmem = NULL;
|
|
|
|
u8 authTag[16];
|
|
|
|
|
|
|
|
if (!enc)
|
|
|
|
left -= auth_tag_len;
|
|
|
|
|
2018-12-11 03:59:59 +08:00
|
|
|
if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
|
|
|
|
gcm_tfm = &aesni_gcm_tfm_avx_gen2;
|
|
|
|
if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
|
|
|
|
gcm_tfm = &aesni_gcm_tfm_sse;
|
|
|
|
|
2018-02-15 01:40:58 +08:00
|
|
|
/* Linearize assoc, if not already linear */
|
2021-01-04 23:55:48 +08:00
|
|
|
if (req->src->length >= assoclen && req->src->length) {
|
2018-02-15 01:40:58 +08:00
|
|
|
scatterwalk_start(&assoc_sg_walk, req->src);
|
|
|
|
assoc = scatterwalk_map(&assoc_sg_walk);
|
|
|
|
} else {
|
2021-01-04 23:55:48 +08:00
|
|
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
|
|
GFP_KERNEL : GFP_ATOMIC;
|
|
|
|
|
2018-02-15 01:40:58 +08:00
|
|
|
/* assoc can be any length, so must be on heap */
|
2021-01-04 23:55:48 +08:00
|
|
|
assocmem = kmalloc(assoclen, flags);
|
2018-02-15 01:40:58 +08:00
|
|
|
if (unlikely(!assocmem))
|
|
|
|
return -ENOMEM;
|
|
|
|
assoc = assocmem;
|
|
|
|
|
|
|
|
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
|
|
|
|
}
|
|
|
|
|
2019-02-01 15:51:40 +08:00
|
|
|
if (left) {
|
|
|
|
src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
|
|
|
|
scatterwalk_start(&src_sg_walk, src_sg);
|
|
|
|
if (req->src != req->dst) {
|
|
|
|
dst_sg = scatterwalk_ffwd(dst_start, req->dst,
|
|
|
|
req->assoclen);
|
|
|
|
scatterwalk_start(&dst_sg_walk, dst_sg);
|
|
|
|
}
|
2018-02-15 01:40:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
2021-01-04 23:55:46 +08:00
|
|
|
gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
|
2018-02-15 01:40:58 +08:00
|
|
|
if (req->src != req->dst) {
|
|
|
|
while (left) {
|
|
|
|
src = scatterwalk_map(&src_sg_walk);
|
|
|
|
dst = scatterwalk_map(&dst_sg_walk);
|
|
|
|
srclen = scatterwalk_clamp(&src_sg_walk, left);
|
|
|
|
dstlen = scatterwalk_clamp(&dst_sg_walk, left);
|
|
|
|
len = min(srclen, dstlen);
|
|
|
|
if (len) {
|
|
|
|
if (enc)
|
2021-01-04 23:55:46 +08:00
|
|
|
gcm_tfm->enc_update(aes_ctx, data,
|
2018-02-15 01:40:58 +08:00
|
|
|
dst, src, len);
|
|
|
|
else
|
2021-01-04 23:55:46 +08:00
|
|
|
gcm_tfm->dec_update(aes_ctx, data,
|
2018-02-15 01:40:58 +08:00
|
|
|
dst, src, len);
|
|
|
|
}
|
|
|
|
left -= len;
|
|
|
|
|
|
|
|
scatterwalk_unmap(src);
|
|
|
|
scatterwalk_unmap(dst);
|
|
|
|
scatterwalk_advance(&src_sg_walk, len);
|
|
|
|
scatterwalk_advance(&dst_sg_walk, len);
|
|
|
|
scatterwalk_done(&src_sg_walk, 0, left);
|
|
|
|
scatterwalk_done(&dst_sg_walk, 1, left);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
while (left) {
|
|
|
|
dst = src = scatterwalk_map(&src_sg_walk);
|
|
|
|
len = scatterwalk_clamp(&src_sg_walk, left);
|
|
|
|
if (len) {
|
|
|
|
if (enc)
|
2021-01-04 23:55:46 +08:00
|
|
|
gcm_tfm->enc_update(aes_ctx, data,
|
2018-02-15 01:40:58 +08:00
|
|
|
src, src, len);
|
|
|
|
else
|
2021-01-04 23:55:46 +08:00
|
|
|
gcm_tfm->dec_update(aes_ctx, data,
|
2018-02-15 01:40:58 +08:00
|
|
|
src, src, len);
|
|
|
|
}
|
|
|
|
left -= len;
|
|
|
|
scatterwalk_unmap(src);
|
|
|
|
scatterwalk_advance(&src_sg_walk, len);
|
|
|
|
scatterwalk_done(&src_sg_walk, 1, left);
|
|
|
|
}
|
|
|
|
}
|
2021-01-04 23:55:46 +08:00
|
|
|
gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
|
2018-02-15 01:40:58 +08:00
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
if (!assocmem)
|
|
|
|
scatterwalk_unmap(assoc);
|
|
|
|
else
|
|
|
|
kfree(assocmem);
|
|
|
|
|
|
|
|
if (!enc) {
|
|
|
|
u8 authTagMsg[16];
|
|
|
|
|
|
|
|
/* Copy out original authTag */
|
|
|
|
scatterwalk_map_and_copy(authTagMsg, req->src,
|
|
|
|
req->assoclen + req->cryptlen -
|
|
|
|
auth_tag_len,
|
|
|
|
auth_tag_len, 0);
|
|
|
|
|
|
|
|
/* Compare generated tag with passed in tag. */
|
|
|
|
return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
|
|
|
|
-EBADMSG : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy in the authTag */
|
|
|
|
scatterwalk_map_and_copy(authTag, req->dst,
|
|
|
|
req->assoclen + req->cryptlen,
|
|
|
|
auth_tag_len, 1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-29 00:12:02 +08:00
|
|
|
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
|
|
|
|
u8 *hash_subkey, u8 *iv, void *aes_ctx)
|
2010-11-05 03:00:45 +08:00
|
|
|
{
|
2018-12-11 03:59:59 +08:00
|
|
|
return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
|
|
|
|
aes_ctx);
|
2010-11-05 03:00:45 +08:00
|
|
|
}
|
|
|
|
|
2017-04-29 00:12:02 +08:00
|
|
|
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
|
|
|
|
u8 *hash_subkey, u8 *iv, void *aes_ctx)
|
2010-11-05 03:00:45 +08:00
|
|
|
{
|
2018-12-11 03:59:59 +08:00
|
|
|
return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
|
|
|
|
aes_ctx);
|
2017-04-29 00:12:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int helper_rfc4106_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
|
|
|
void *aes_ctx = &(ctx->aes_key_expanded);
|
2021-01-04 23:55:46 +08:00
|
|
|
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
|
|
|
|
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
|
2017-04-29 00:12:02 +08:00
|
|
|
unsigned int i;
|
|
|
|
__be32 counter = cpu_to_be32(1);
|
|
|
|
|
|
|
|
/* Assuming we are supporting rfc4106 64-bit extended */
|
|
|
|
/* sequence numbers We need to have the AAD length equal */
|
|
|
|
/* to 16 or 20 bytes */
|
|
|
|
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* IV below built */
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
*(iv+i) = ctx->nonce[i];
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
*(iv+4+i) = req->iv[i];
|
|
|
|
*((__be32 *)(iv+12)) = counter;
|
|
|
|
|
|
|
|
return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
|
|
|
|
aes_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int helper_rfc4106_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
__be32 counter = cpu_to_be32(1);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
|
|
|
void *aes_ctx = &(ctx->aes_key_expanded);
|
2021-01-04 23:55:46 +08:00
|
|
|
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
|
|
|
|
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
|
2017-04-29 00:12:02 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Assuming we are supporting rfc4106 64-bit extended */
|
|
|
|
/* sequence numbers We need to have the AAD length */
|
|
|
|
/* equal to 16 or 20 bytes */
|
|
|
|
|
|
|
|
/* IV below built */
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
*(iv+i) = ctx->nonce[i];
|
|
|
|
for (i = 0; i < 8; i++)
|
|
|
|
*(iv+4+i) = req->iv[i];
|
|
|
|
*((__be32 *)(iv+12)) = counter;
|
|
|
|
|
|
|
|
return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
|
|
|
|
aes_ctx);
|
2010-11-05 03:00:45 +08:00
|
|
|
}
|
2012-05-11 21:00:48 +08:00
|
|
|
#endif
|
2010-11-05 03:00:45 +08:00
|
|
|
|
2021-01-01 00:41:55 +08:00
|
|
|
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = xts_verify_key(tfm, key, keylen);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
keylen /= 2;
|
|
|
|
|
|
|
|
/* first half of xts-key is for crypt */
|
|
|
|
err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
|
|
|
|
key, keylen);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* second half of xts-key is for tweak */
|
|
|
|
return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
|
|
|
|
key + keylen, keylen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xts_crypt(struct skcipher_request *req, bool encrypt)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
int tail = req->cryptlen % AES_BLOCK_SIZE;
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
|
|
|
|
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
|
|
|
|
int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
|
|
|
|
|
|
|
|
skcipher_walk_abort(&walk);
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq,
|
|
|
|
skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
blocks * AES_BLOCK_SIZE, req->iv);
|
|
|
|
req = &subreq;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
} else {
|
|
|
|
tail = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
|
|
|
|
/* calculate first value of T */
|
|
|
|
aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
|
|
|
|
|
|
|
|
while (walk.nbytes > 0) {
|
|
|
|
int nbytes = walk.nbytes;
|
|
|
|
|
|
|
|
if (nbytes < walk.total)
|
|
|
|
nbytes &= ~(AES_BLOCK_SIZE - 1);
|
|
|
|
|
|
|
|
if (encrypt)
|
|
|
|
aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
|
|
|
|
walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
nbytes, walk.iv);
|
|
|
|
else
|
|
|
|
aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
|
|
|
|
walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
nbytes, walk.iv);
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
|
|
|
|
|
|
|
if (walk.nbytes > 0)
|
|
|
|
kernel_fpu_begin();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(tail > 0 && !err)) {
|
|
|
|
struct scatterlist sg_src[2], sg_dst[2];
|
|
|
|
struct scatterlist *src, *dst;
|
|
|
|
|
|
|
|
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
|
|
|
|
if (req->dst != req->src)
|
|
|
|
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
|
|
|
|
|
|
|
|
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
|
|
|
|
req->iv);
|
|
|
|
|
|
|
|
err = skcipher_walk_virt(&walk, &subreq, false);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
if (encrypt)
|
|
|
|
aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
|
|
|
|
walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
walk.nbytes, walk.iv);
|
|
|
|
else
|
|
|
|
aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
|
|
|
|
walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
walk.nbytes, walk.iv);
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
err = skcipher_walk_done(&walk, 0);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xts_encrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
return xts_crypt(req, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xts_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
return xts_crypt(req, false);
|
|
|
|
}
|
|
|
|
|
2019-06-03 13:44:50 +08:00
|
|
|
static struct crypto_alg aesni_cipher_alg = {
|
2012-05-11 21:00:48 +08:00
|
|
|
.cra_name = "aes",
|
|
|
|
.cra_driver_name = "aes-aesni",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
2016-11-22 20:08:33 +08:00
|
|
|
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
|
2012-05-11 21:00:48 +08:00
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
.cra_u = {
|
|
|
|
.cipher = {
|
|
|
|
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.cia_setkey = aes_set_key,
|
2019-07-03 03:41:20 +08:00
|
|
|
.cia_encrypt = aesni_encrypt,
|
|
|
|
.cia_decrypt = aesni_decrypt
|
2012-05-11 21:00:48 +08:00
|
|
|
}
|
|
|
|
}
|
2019-06-03 13:44:50 +08:00
|
|
|
};
|
2016-11-22 20:08:33 +08:00
|
|
|
|
|
|
|
static struct skcipher_alg aesni_skciphers[] = {
|
|
|
|
{
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__ecb(aes)",
|
|
|
|
.cra_driver_name = "__ecb-aes-aesni",
|
|
|
|
.cra_priority = 400,
|
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
2012-05-11 21:00:48 +08:00
|
|
|
},
|
2016-11-22 20:08:33 +08:00
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = aesni_skcipher_setkey,
|
|
|
|
.encrypt = ecb_encrypt,
|
|
|
|
.decrypt = ecb_decrypt,
|
|
|
|
}, {
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__cbc(aes)",
|
|
|
|
.cra_driver_name = "__cbc-aes-aesni",
|
|
|
|
.cra_priority = 400,
|
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
2012-05-11 21:00:48 +08:00
|
|
|
},
|
2016-11-22 20:08:33 +08:00
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesni_skcipher_setkey,
|
|
|
|
.encrypt = cbc_encrypt,
|
|
|
|
.decrypt = cbc_decrypt,
|
2020-12-08 07:34:02 +08:00
|
|
|
}, {
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__cts(cbc(aes))",
|
|
|
|
.cra_driver_name = "__cts-cbc-aes-aesni",
|
|
|
|
.cra_priority = 400,
|
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.walksize = 2 * AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesni_skcipher_setkey,
|
|
|
|
.encrypt = cts_cbc_encrypt,
|
|
|
|
.decrypt = cts_cbc_decrypt,
|
2012-05-11 21:00:48 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2016-11-22 20:08:33 +08:00
|
|
|
}, {
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__ctr(aes)",
|
|
|
|
.cra_driver_name = "__ctr-aes-aesni",
|
|
|
|
.cra_priority = 400,
|
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
2012-05-11 21:00:48 +08:00
|
|
|
},
|
2016-11-22 20:08:33 +08:00
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesni_skcipher_setkey,
|
|
|
|
.encrypt = ctr_crypt,
|
|
|
|
.decrypt = ctr_crypt,
|
2021-01-01 00:41:55 +08:00
|
|
|
#endif
|
2016-11-22 20:08:33 +08:00
|
|
|
}, {
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__xts(aes)",
|
|
|
|
.cra_driver_name = "__xts-aes-aesni",
|
|
|
|
.cra_priority = 401,
|
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = XTS_AES_CTX_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
2012-05-11 21:00:48 +08:00
|
|
|
},
|
2016-11-22 20:08:33 +08:00
|
|
|
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2021-01-01 00:41:55 +08:00
|
|
|
.walksize = 2 * AES_BLOCK_SIZE,
|
2016-11-22 20:08:33 +08:00
|
|
|
.setkey = xts_aesni_setkey,
|
|
|
|
.encrypt = xts_encrypt,
|
|
|
|
.decrypt = xts_decrypt,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-08-21 05:34:38 +08:00
|
|
|
static
|
2016-11-22 20:08:33 +08:00
|
|
|
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
|
|
|
|
|
2015-05-28 22:08:03 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2017-04-29 00:12:02 +08:00
|
|
|
static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
|
|
|
|
|
|
|
|
return aes_set_key_common(crypto_aead_tfm(aead),
|
|
|
|
&ctx->aes_key_expanded, key, key_len) ?:
|
|
|
|
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int generic_gcmaes_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
|
|
|
|
void *aes_ctx = &(ctx->aes_key_expanded);
|
2021-01-04 23:55:46 +08:00
|
|
|
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
|
|
|
|
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
|
2017-04-29 00:12:02 +08:00
|
|
|
__be32 counter = cpu_to_be32(1);
|
|
|
|
|
|
|
|
memcpy(iv, req->iv, 12);
|
|
|
|
*((__be32 *)(iv+12)) = counter;
|
|
|
|
|
|
|
|
return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
|
|
|
|
aes_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int generic_gcmaes_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
__be32 counter = cpu_to_be32(1);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
2017-12-13 21:53:43 +08:00
|
|
|
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
|
2017-04-29 00:12:02 +08:00
|
|
|
void *aes_ctx = &(ctx->aes_key_expanded);
|
2021-01-04 23:55:46 +08:00
|
|
|
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
|
|
|
|
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
|
2017-04-29 00:12:02 +08:00
|
|
|
|
|
|
|
memcpy(iv, req->iv, 12);
|
|
|
|
*((__be32 *)(iv+12)) = counter;
|
|
|
|
|
|
|
|
return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
|
|
|
|
aes_ctx);
|
|
|
|
}
|
|
|
|
|
2019-03-11 03:00:52 +08:00
|
|
|
static struct aead_alg aesni_aeads[] = { {
|
2015-06-01 15:53:06 +08:00
|
|
|
.setkey = common_rfc4106_set_key,
|
|
|
|
.setauthsize = common_rfc4106_set_authsize,
|
|
|
|
.encrypt = helper_rfc4106_encrypt,
|
|
|
|
.decrypt = helper_rfc4106_decrypt,
|
2017-08-22 16:08:18 +08:00
|
|
|
.ivsize = GCM_RFC4106_IV_SIZE,
|
2015-06-01 15:53:06 +08:00
|
|
|
.maxauthsize = 16,
|
|
|
|
.base = {
|
2019-03-11 03:00:52 +08:00
|
|
|
.cra_name = "__rfc4106(gcm(aes))",
|
|
|
|
.cra_driver_name = "__rfc4106-gcm-aesni",
|
|
|
|
.cra_priority = 400,
|
2015-06-01 15:53:06 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
|
|
|
|
.cra_alignmask = AESNI_ALIGN - 1,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
2017-04-29 00:12:02 +08:00
|
|
|
}, {
|
|
|
|
.setkey = generic_gcmaes_set_key,
|
|
|
|
.setauthsize = generic_gcmaes_set_authsize,
|
|
|
|
.encrypt = generic_gcmaes_encrypt,
|
|
|
|
.decrypt = generic_gcmaes_decrypt,
|
2017-08-22 16:08:18 +08:00
|
|
|
.ivsize = GCM_AES_IV_SIZE,
|
2017-04-29 00:12:02 +08:00
|
|
|
.maxauthsize = 16,
|
2017-12-13 21:54:36 +08:00
|
|
|
.base = {
|
2019-03-11 03:00:52 +08:00
|
|
|
.cra_name = "__gcm(aes)",
|
|
|
|
.cra_driver_name = "__generic-gcm-aesni",
|
|
|
|
.cra_priority = 400,
|
2017-12-13 21:54:36 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
|
|
|
|
.cra_alignmask = AESNI_ALIGN - 1,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
2015-05-28 22:08:03 +08:00
|
|
|
} };
|
|
|
|
#else
|
2019-03-11 03:00:52 +08:00
|
|
|
static struct aead_alg aesni_aeads[0];
|
2015-05-28 22:08:03 +08:00
|
|
|
#endif
|
|
|
|
|
2019-03-11 03:00:52 +08:00
|
|
|
static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
|
2012-01-26 07:09:06 +08:00
|
|
|
|
|
|
|
static const struct x86_cpu_id aesni_cpu_id[] = {
|
2020-03-20 21:14:05 +08:00
|
|
|
X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
|
2012-01-26 07:09:06 +08:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
|
|
|
|
|
2009-01-18 13:28:34 +08:00
|
|
|
static int __init aesni_init(void)
|
|
|
|
{
|
2012-07-11 19:20:51 +08:00
|
|
|
int err;
|
2009-01-18 13:28:34 +08:00
|
|
|
|
2012-01-26 07:09:06 +08:00
|
|
|
if (!x86_match_cpu(aesni_cpu_id))
|
2009-01-18 13:28:34 +08:00
|
|
|
return -ENODEV;
|
2013-12-30 21:52:24 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2013-12-12 06:28:41 +08:00
|
|
|
if (boot_cpu_has(X86_FEATURE_AVX2)) {
|
|
|
|
pr_info("AVX2 version of gcm_enc/dec engaged.\n");
|
2018-12-11 03:59:59 +08:00
|
|
|
aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
|
2013-12-12 06:28:41 +08:00
|
|
|
} else
|
|
|
|
if (boot_cpu_has(X86_FEATURE_AVX)) {
|
|
|
|
pr_info("AVX version of gcm_enc/dec engaged.\n");
|
2018-12-11 03:59:59 +08:00
|
|
|
aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
|
2020-03-26 16:00:55 +08:00
|
|
|
} else {
|
2013-12-12 06:28:41 +08:00
|
|
|
pr_info("SSE version of gcm_enc/dec engaged.\n");
|
2018-12-11 03:59:59 +08:00
|
|
|
aesni_gcm_tfm = &aesni_gcm_tfm_sse;
|
2013-12-12 06:28:41 +08:00
|
|
|
}
|
crypto: aes - AES CTR x86_64 "by8" AVX optimization
This patch introduces "by8" AES CTR mode AVX optimization inspired by
Intel Optimized IPSEC Cryptograhpic library. For additional information,
please see:
http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
The functions aes_ctr_enc_128_avx_by8(), aes_ctr_enc_192_avx_by8() and
aes_ctr_enc_256_avx_by8() are adapted from
Intel Optimized IPSEC Cryptographic library. When both AES and AVX features
are enabled in a platform, the glue code in AESNI module overrieds the
existing "by4" CTR mode en/decryption with the "by8"
AES CTR mode en/decryption.
On a Haswell desktop, with turbo disabled and all cpus running
at maximum frequency, the "by8" CTR mode optimization
shows better performance results across data & key sizes
as measured by tcrypt.
The average performance improvement of the "by8" version over the "by4"
version is as follows:
For 128 bit key and data sizes >= 256 bytes, there is a 10-16% improvement.
For 192 bit key and data sizes >= 256 bytes, there is a 20-22% improvement.
For 256 bit key and data sizes >= 256 bytes, there is a 20-25% improvement.
A typical run of tcrypt with AES CTR mode encryption of the "by4" and "by8"
optimization shows the following results:
tcrypt with "by4" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 343 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 336 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 491 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1130 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7309 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 346 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 361 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 543 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1321 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9649 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 369 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 366 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1531 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10522 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 336 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 350 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 487 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1129 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7287 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 350 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 359 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 635 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1324 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9595 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 364 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 377 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 604 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1527 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10549 cycles (8192 bytes)
tcrypt with "by8" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 340 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 330 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 450 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1043 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6597 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 339 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 352 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 539 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1153 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8458 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 353 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 360 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 512 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1277 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8745 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 348 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 335 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 451 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1030 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6611 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 354 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 346 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 488 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1154 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8390 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 357 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 362 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 515 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1284 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8681 cycles (8192 bytes)
crypto: Incorporate feed back to AES CTR mode optimization patch
Specifically, the following:
a) alignment around main loop in aes_ctrby8_avx_x86_64.S
b) .rodata around data constants used in the assembely code.
c) the use of CONFIG_AVX in the glue code.
d) fix up white space.
e) informational message for "by8" AES CTR mode optimization
f) "by8" AES CTR mode optimization can be simply enabled
if the platform supports both AES and AVX features. The
optimization works superbly on Sandybridge as well.
Testing on Haswell shows no performance change since the last.
Testing on Sandybridge shows that the "by8" AES CTR mode optimization
greatly improves performance.
tcrypt log with "by4" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 408 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 707 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1864 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12813 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 395 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 432 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 780 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2132 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15765 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 416 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 438 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 842 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2383 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16945 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 389 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 409 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 704 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1865 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12783 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 409 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 434 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 792 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2151 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15804 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 421 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 444 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 840 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2394 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16928 cycles (8192 bytes)
tcrypt log with "by8" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 401 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 522 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1136 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7046 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 394 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 418 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 559 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1263 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9072 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 408 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 428 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1385 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9224 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 390 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 402 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 530 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1135 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7079 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 414 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 417 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 572 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1312 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9073 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 415 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 454 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 598 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1407 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9288 cycles (8192 bytes)
crypto: Fix redundant checks
a) Fix the redundant check for cpu_has_aes
b) Fix the key length check when invoking the CTR mode "by8"
encryptor/decryptor.
crypto: fix typo in AES ctr mode transform
Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com>
Reviewed-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-06-11 00:22:47 +08:00
|
|
|
aesni_ctr_enc_tfm = aesni_ctr_enc;
|
2016-04-05 04:24:56 +08:00
|
|
|
if (boot_cpu_has(X86_FEATURE_AVX)) {
|
crypto: aes - AES CTR x86_64 "by8" AVX optimization
This patch introduces "by8" AES CTR mode AVX optimization inspired by
Intel Optimized IPSEC Cryptograhpic library. For additional information,
please see:
http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
The functions aes_ctr_enc_128_avx_by8(), aes_ctr_enc_192_avx_by8() and
aes_ctr_enc_256_avx_by8() are adapted from
Intel Optimized IPSEC Cryptographic library. When both AES and AVX features
are enabled in a platform, the glue code in AESNI module overrieds the
existing "by4" CTR mode en/decryption with the "by8"
AES CTR mode en/decryption.
On a Haswell desktop, with turbo disabled and all cpus running
at maximum frequency, the "by8" CTR mode optimization
shows better performance results across data & key sizes
as measured by tcrypt.
The average performance improvement of the "by8" version over the "by4"
version is as follows:
For 128 bit key and data sizes >= 256 bytes, there is a 10-16% improvement.
For 192 bit key and data sizes >= 256 bytes, there is a 20-22% improvement.
For 256 bit key and data sizes >= 256 bytes, there is a 20-25% improvement.
A typical run of tcrypt with AES CTR mode encryption of the "by4" and "by8"
optimization shows the following results:
tcrypt with "by4" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 343 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 336 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 491 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1130 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7309 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 346 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 361 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 543 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1321 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9649 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 369 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 366 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1531 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10522 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 336 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 350 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 487 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1129 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7287 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 350 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 359 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 635 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1324 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9595 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 364 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 377 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 604 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1527 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 10549 cycles (8192 bytes)
tcrypt with "by8" AES CTR mode encryption optimization on a Haswell Desktop:
---------------------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 340 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 330 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 450 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1043 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6597 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 339 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 352 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 539 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1153 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8458 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 353 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 360 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 512 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1277 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8745 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 348 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 335 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 451 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1030 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 6611 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 354 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 346 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 488 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1154 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 8390 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 357 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 362 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 515 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1284 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 8681 cycles (8192 bytes)
crypto: Incorporate feed back to AES CTR mode optimization patch
Specifically, the following:
a) alignment around main loop in aes_ctrby8_avx_x86_64.S
b) .rodata around data constants used in the assembely code.
c) the use of CONFIG_AVX in the glue code.
d) fix up white space.
e) informational message for "by8" AES CTR mode optimization
f) "by8" AES CTR mode optimization can be simply enabled
if the platform supports both AES and AVX features. The
optimization works superbly on Sandybridge as well.
Testing on Haswell shows no performance change since the last.
Testing on Sandybridge shows that the "by8" AES CTR mode optimization
greatly improves performance.
tcrypt log with "by4" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 408 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 707 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1864 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12813 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 395 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 432 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 780 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2132 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15765 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 416 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 438 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 842 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2383 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16945 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 389 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 409 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 704 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1865 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 12783 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 409 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 434 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 792 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 2151 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 15804 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 421 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 444 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 840 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 2394 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 16928 cycles (8192 bytes)
tcrypt log with "by8" AES CTR mode optimization on Sandybridge
--------------------------------------------------------------
testing speed of __ctr-aes-aesni encryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 383 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 401 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 522 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1136 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7046 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 394 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 418 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 559 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1263 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9072 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 408 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 428 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 595 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1385 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9224 cycles (8192 bytes)
testing speed of __ctr-aes-aesni decryption
test 0 (128 bit key, 16 byte blocks): 1 operation in 390 cycles (16 bytes)
test 1 (128 bit key, 64 byte blocks): 1 operation in 402 cycles (64 bytes)
test 2 (128 bit key, 256 byte blocks): 1 operation in 530 cycles (256 bytes)
test 3 (128 bit key, 1024 byte blocks): 1 operation in 1135 cycles (1024 bytes)
test 4 (128 bit key, 8192 byte blocks): 1 operation in 7079 cycles (8192 bytes)
test 5 (192 bit key, 16 byte blocks): 1 operation in 414 cycles (16 bytes)
test 6 (192 bit key, 64 byte blocks): 1 operation in 417 cycles (64 bytes)
test 7 (192 bit key, 256 byte blocks): 1 operation in 572 cycles (256 bytes)
test 8 (192 bit key, 1024 byte blocks): 1 operation in 1312 cycles (1024 bytes)
test 9 (192 bit key, 8192 byte blocks): 1 operation in 9073 cycles (8192 bytes)
test 10 (256 bit key, 16 byte blocks): 1 operation in 415 cycles (16 bytes)
test 11 (256 bit key, 64 byte blocks): 1 operation in 454 cycles (64 bytes)
test 12 (256 bit key, 256 byte blocks): 1 operation in 598 cycles (256 bytes)
test 13 (256 bit key, 1024 byte blocks): 1 operation in 1407 cycles (1024 bytes)
test 14 (256 bit key, 8192 byte blocks): 1 operation in 9288 cycles (8192 bytes)
crypto: Fix redundant checks
a) Fix the redundant check for cpu_has_aes
b) Fix the key length check when invoking the CTR mode "by8"
encryptor/decryptor.
crypto: fix typo in AES ctr mode transform
Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com>
Reviewed-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2014-06-11 00:22:47 +08:00
|
|
|
/* optimize performance of ctr mode encryption transform */
|
|
|
|
aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
|
|
|
|
pr_info("AES CTR mode by8 optimization enabled\n");
|
|
|
|
}
|
2013-12-30 21:52:24 +08:00
|
|
|
#endif
|
2010-11-05 03:00:45 +08:00
|
|
|
|
2019-06-03 13:44:50 +08:00
|
|
|
err = crypto_register_alg(&aesni_cipher_alg);
|
2015-05-28 22:08:03 +08:00
|
|
|
if (err)
|
2018-10-06 01:13:06 +08:00
|
|
|
return err;
|
2015-05-28 22:08:03 +08:00
|
|
|
|
2019-03-11 03:00:51 +08:00
|
|
|
err = simd_register_skciphers_compat(aesni_skciphers,
|
|
|
|
ARRAY_SIZE(aesni_skciphers),
|
|
|
|
aesni_simd_skciphers);
|
2016-11-22 20:08:33 +08:00
|
|
|
if (err)
|
2019-06-03 13:44:50 +08:00
|
|
|
goto unregister_cipher;
|
2016-11-22 20:08:33 +08:00
|
|
|
|
2019-03-11 03:00:52 +08:00
|
|
|
err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
|
|
|
|
aesni_simd_aeads);
|
2015-05-28 22:08:03 +08:00
|
|
|
if (err)
|
2016-11-22 20:08:33 +08:00
|
|
|
goto unregister_skciphers;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
unregister_skciphers:
|
2019-03-11 03:00:51 +08:00
|
|
|
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
|
|
|
aesni_simd_skciphers);
|
2019-06-03 13:44:50 +08:00
|
|
|
unregister_cipher:
|
|
|
|
crypto_unregister_alg(&aesni_cipher_alg);
|
2015-05-28 22:08:03 +08:00
|
|
|
return err;
|
2009-01-18 13:28:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit aesni_exit(void)
|
|
|
|
{
|
2019-03-11 03:00:52 +08:00
|
|
|
simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
|
|
|
|
aesni_simd_aeads);
|
2019-03-11 03:00:51 +08:00
|
|
|
simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
|
|
|
|
aesni_simd_skciphers);
|
2019-06-03 13:44:50 +08:00
|
|
|
crypto_unregister_alg(&aesni_cipher_alg);
|
2009-01-18 13:28:34 +08:00
|
|
|
}
|
|
|
|
|
2015-06-27 14:56:38 +08:00
|
|
|
late_initcall(aesni_init);
|
2009-01-18 13:28:34 +08:00
|
|
|
module_exit(aesni_exit);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
|
|
|
|
MODULE_LICENSE("GPL");
|
2014-11-21 09:05:53 +08:00
|
|
|
MODULE_ALIAS_CRYPTO("aes");
|