Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.8: API: - first part of skcipher low-level conversions - add KPP (Key-agreement Protocol Primitives) interface. Algorithms: - fix IPsec/cryptd reordering issues that affects aesni - RSA no longer does explicit leading zero removal - add SHA3 - add DH - add ECDH - improve DRBG performance by not doing CTR by hand Drivers: - add x86 AVX2 multibuffer SHA256/512 - add POWER8 optimised crc32c - add xts support to vmx - add DH support to qat - add RSA support to caam - add Layerscape support to caam - add SEC1 AEAD support to talitos - improve performance by chaining requests in marvell/cesa - add support for Araneus Alea I USB RNG - add support for Broadcom BCM5301 RNG - add support for Amlogic Meson RNG - add support Broadcom NSP SoC RNG" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (180 commits) crypto: vmx - Fix aes_p8_xts_decrypt build failure crypto: vmx - Ignore generated files crypto: vmx - Adding support for XTS crypto: vmx - Adding asm subroutines for XTS crypto: skcipher - add comment for skcipher_alg->base crypto: testmgr - Print akcipher algorithm name crypto: marvell - Fix wrong flag used for GFP in mv_cesa_dma_add_iv_op crypto: nx - off by one bug in nx_of_update_msc() crypto: rsa-pkcs1pad - fix rsa-pkcs1pad request struct crypto: scatterwalk - Inline start/map/done crypto: scatterwalk - Remove unnecessary BUG in scatterwalk_start crypto: scatterwalk - Remove unnecessary advance in scatterwalk_pagedone crypto: scatterwalk - Fix test in scatterwalk_done crypto: api - Optimise away crypto_yield when hard preemption is on crypto: scatterwalk - add no-copy support to copychunks crypto: scatterwalk - Remove scatterwalk_bytes_sglen crypto: omap - Stop using crypto scatterwalk_bytes_sglen crypto: skcipher - Remove top-level givcipher interface crypto: user - Remove crypto_lookup_skcipher call crypto: cts - Convert to skcipher ...
This commit is contained in:
commit
bbce2ad2d7
|
@ -440,8 +440,8 @@
|
||||||
The type flag specifies the type of the cipher algorithm.
|
The type flag specifies the type of the cipher algorithm.
|
||||||
The caller usually provides a 0 when the caller wants the
|
The caller usually provides a 0 when the caller wants the
|
||||||
default handling. Otherwise, the caller may provide the
|
default handling. Otherwise, the caller may provide the
|
||||||
following selections which match the the aforementioned
|
following selections which match the aforementioned cipher
|
||||||
cipher types:
|
types:
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<itemizedlist>
|
<itemizedlist>
|
||||||
|
|
|
@ -76,7 +76,7 @@ the criterion string:
|
||||||
Looking in /proc/keys, the last 8 hex digits of the key fingerprint are
|
Looking in /proc/keys, the last 8 hex digits of the key fingerprint are
|
||||||
displayed, along with the subtype:
|
displayed, along with the subtype:
|
||||||
|
|
||||||
1a39e171 I----- 1 perm 3f010000 0 0 asymmetri modsign.0: DSA 5acc2142 []
|
1a39e171 I----- 1 perm 3f010000 0 0 asymmetric modsign.0: DSA 5acc2142 []
|
||||||
|
|
||||||
|
|
||||||
=========================
|
=========================
|
||||||
|
|
|
@ -2,7 +2,8 @@ BCM2835 Random number generator
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
|
|
||||||
- compatible : should be "brcm,bcm2835-rng"
|
- compatible : should be "brcm,bcm2835-rng" or "brcm,bcm-nsp-rng" or
|
||||||
|
"brcm,bcm5301x-rng"
|
||||||
- reg : Specifies base physical address and size of the registers.
|
- reg : Specifies base physical address and size of the registers.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
@ -11,3 +12,8 @@ rng {
|
||||||
compatible = "brcm,bcm2835-rng";
|
compatible = "brcm,bcm2835-rng";
|
||||||
reg = <0x7e104000 0x10>;
|
reg = <0x7e104000 0x10>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
rng@18033000 {
|
||||||
|
compatible = "brcm,bcm-nsp-rng";
|
||||||
|
reg = <0x18033000 0x14>;
|
||||||
|
};
|
||||||
|
|
|
@ -3286,6 +3286,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/crypto/
|
F: Documentation/crypto/
|
||||||
|
F: Documentation/devicetree/bindings/crypto/
|
||||||
F: Documentation/DocBook/crypto-API.tmpl
|
F: Documentation/DocBook/crypto-API.tmpl
|
||||||
F: arch/*/crypto/
|
F: arch/*/crypto/
|
||||||
F: crypto/
|
F: crypto/
|
||||||
|
@ -5273,6 +5274,7 @@ M: Matt Mackall <mpm@selenic.com>
|
||||||
M: Herbert Xu <herbert@gondor.apana.org.au>
|
M: Herbert Xu <herbert@gondor.apana.org.au>
|
||||||
L: linux-crypto@vger.kernel.org
|
L: linux-crypto@vger.kernel.org
|
||||||
S: Odd fixes
|
S: Odd fixes
|
||||||
|
F: Documentation/devicetree/bindings/rng/
|
||||||
F: Documentation/hw_random.txt
|
F: Documentation/hw_random.txt
|
||||||
F: drivers/char/hw_random/
|
F: drivers/char/hw_random/
|
||||||
F: include/linux/hw_random.h
|
F: include/linux/hw_random.h
|
||||||
|
@ -9318,7 +9320,8 @@ L: rtc-linux@googlegroups.com
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
QAT DRIVER
|
QAT DRIVER
|
||||||
M: Tadeusz Struk <tadeusz.struk@intel.com>
|
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
|
||||||
|
M: Salvatore Benedetto <salvatore.benedetto@intel.com>
|
||||||
L: qat-linux@intel.com
|
L: qat-linux@intel.com
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/crypto/qat/
|
F: drivers/crypto/qat/
|
||||||
|
|
|
@ -206,6 +206,11 @@
|
||||||
brcm,nand-has-wp;
|
brcm,nand-has-wp;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
rng: rng@33000 {
|
||||||
|
compatible = "brcm,bcm-nsp-rng";
|
||||||
|
reg = <0x33000 0x14>;
|
||||||
|
};
|
||||||
|
|
||||||
ccbtimer0: timer@34000 {
|
ccbtimer0: timer@34000 {
|
||||||
compatible = "arm,sp804";
|
compatible = "arm,sp804";
|
||||||
reg = <0x34000 0x1000>;
|
reg = <0x34000 0x1000>;
|
||||||
|
|
|
@ -154,30 +154,23 @@ static int ghash_async_init(struct ahash_request *req)
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
||||||
|
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
||||||
|
|
||||||
if (!may_use_simd()) {
|
desc->tfm = child;
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
desc->flags = req->base.flags;
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
return crypto_shash_init(desc);
|
||||||
return crypto_ahash_init(cryptd_req);
|
|
||||||
} else {
|
|
||||||
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
||||||
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
|
||||||
|
|
||||||
desc->tfm = child;
|
|
||||||
desc->flags = req->base.flags;
|
|
||||||
return crypto_shash_init(desc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ghash_async_update(struct ahash_request *req)
|
static int ghash_async_update(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
|
||||||
if (!may_use_simd()) {
|
if (!may_use_simd() ||
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
|
||||||
|
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
memcpy(cryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||||
return crypto_ahash_update(cryptd_req);
|
return crypto_ahash_update(cryptd_req);
|
||||||
|
@ -190,12 +183,12 @@ static int ghash_async_update(struct ahash_request *req)
|
||||||
static int ghash_async_final(struct ahash_request *req)
|
static int ghash_async_final(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
|
||||||
if (!may_use_simd()) {
|
if (!may_use_simd() ||
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
|
||||||
|
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
memcpy(cryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||||
return crypto_ahash_final(cryptd_req);
|
return crypto_ahash_final(cryptd_req);
|
||||||
|
@ -212,7 +205,8 @@ static int ghash_async_digest(struct ahash_request *req)
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
|
||||||
if (!may_use_simd()) {
|
if (!may_use_simd() ||
|
||||||
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
memcpy(cryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||||
return crypto_ahash_digest(cryptd_req);
|
return crypto_ahash_digest(cryptd_req);
|
||||||
|
|
|
@ -49,6 +49,10 @@
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "LS1043A RDB Board";
|
model = "LS1043A RDB Board";
|
||||||
|
|
||||||
|
aliases {
|
||||||
|
crypto = &crypto;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&i2c0 {
|
&i2c0 {
|
||||||
|
|
|
@ -159,6 +159,49 @@
|
||||||
big-endian;
|
big-endian;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
crypto: crypto@1700000 {
|
||||||
|
compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
|
||||||
|
"fsl,sec-v4.0";
|
||||||
|
fsl,sec-era = <3>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
ranges = <0x0 0x00 0x1700000 0x100000>;
|
||||||
|
reg = <0x00 0x1700000 0x0 0x100000>;
|
||||||
|
interrupts = <0 75 0x4>;
|
||||||
|
|
||||||
|
sec_jr0: jr@10000 {
|
||||||
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
"fsl,sec-v5.0-job-ring",
|
||||||
|
"fsl,sec-v4.0-job-ring";
|
||||||
|
reg = <0x10000 0x10000>;
|
||||||
|
interrupts = <0 71 0x4>;
|
||||||
|
};
|
||||||
|
|
||||||
|
sec_jr1: jr@20000 {
|
||||||
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
"fsl,sec-v5.0-job-ring",
|
||||||
|
"fsl,sec-v4.0-job-ring";
|
||||||
|
reg = <0x20000 0x10000>;
|
||||||
|
interrupts = <0 72 0x4>;
|
||||||
|
};
|
||||||
|
|
||||||
|
sec_jr2: jr@30000 {
|
||||||
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
"fsl,sec-v5.0-job-ring",
|
||||||
|
"fsl,sec-v4.0-job-ring";
|
||||||
|
reg = <0x30000 0x10000>;
|
||||||
|
interrupts = <0 73 0x4>;
|
||||||
|
};
|
||||||
|
|
||||||
|
sec_jr3: jr@40000 {
|
||||||
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
"fsl,sec-v5.0-job-ring",
|
||||||
|
"fsl,sec-v4.0-job-ring";
|
||||||
|
reg = <0x40000 0x10000>;
|
||||||
|
interrupts = <0 74 0x4>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
dcfg: dcfg@1ee0000 {
|
dcfg: dcfg@1ee0000 {
|
||||||
compatible = "fsl,ls1043a-dcfg", "syscon";
|
compatible = "fsl,ls1043a-dcfg", "syscon";
|
||||||
reg = <0x0 0x1ee0000 0x0 0x10000>;
|
reg = <0x0 0x1ee0000 0x0 0x10000>;
|
||||||
|
|
|
@ -174,13 +174,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
||||||
#define iounmap __iounmap
|
#define iounmap __iounmap
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* io{read,write}{16,32}be() macros
|
* io{read,write}{16,32,64}be() macros
|
||||||
*/
|
*/
|
||||||
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
||||||
#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
||||||
|
#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
|
||||||
|
|
||||||
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
|
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
|
||||||
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
|
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
|
||||||
|
#define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||||
|
|
|
@ -9,9 +9,11 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
|
obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
|
obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
|
obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
|
||||||
|
obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o
|
||||||
|
|
||||||
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
|
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
|
||||||
md5-ppc-y := md5-asm.o md5-glue.o
|
md5-ppc-y := md5-asm.o md5-glue.o
|
||||||
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
|
sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
|
||||||
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
|
sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
|
||||||
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
|
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
|
||||||
|
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#define rLN r7 /* length of data to be processed */
|
#define rLN r7 /* length of data to be processed */
|
||||||
#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */
|
#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */
|
||||||
#define rKT r9 /* pointer to tweak key (XTS mode) */
|
#define rKT r9 /* pointer to tweak key (XTS mode) */
|
||||||
#define rT0 r11 /* pointers to en-/decrpytion tables */
|
#define rT0 r11 /* pointers to en-/decryption tables */
|
||||||
#define rT1 r10
|
#define rT1 r10
|
||||||
#define rD0 r9 /* data */
|
#define rD0 r9 /* data */
|
||||||
#define rD1 r14
|
#define rD1 r14
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,167 @@
|
||||||
|
#include <linux/crc32.h>
|
||||||
|
#include <crypto/internal/hash.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <asm/switch_to.h>
|
||||||
|
|
||||||
|
#define CHKSUM_BLOCK_SIZE 1
|
||||||
|
#define CHKSUM_DIGEST_SIZE 4
|
||||||
|
|
||||||
|
#define VMX_ALIGN 16
|
||||||
|
#define VMX_ALIGN_MASK (VMX_ALIGN-1)
|
||||||
|
|
||||||
|
#define VECTOR_BREAKPOINT 512
|
||||||
|
|
||||||
|
u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len);
|
||||||
|
|
||||||
|
static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
|
||||||
|
{
|
||||||
|
unsigned int prealign;
|
||||||
|
unsigned int tail;
|
||||||
|
|
||||||
|
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
|
||||||
|
return __crc32c_le(crc, p, len);
|
||||||
|
|
||||||
|
if ((unsigned long)p & VMX_ALIGN_MASK) {
|
||||||
|
prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
|
||||||
|
crc = __crc32c_le(crc, p, prealign);
|
||||||
|
len -= prealign;
|
||||||
|
p += prealign;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len & ~VMX_ALIGN_MASK) {
|
||||||
|
pagefault_disable();
|
||||||
|
enable_kernel_altivec();
|
||||||
|
crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
|
||||||
|
pagefault_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
tail = len & VMX_ALIGN_MASK;
|
||||||
|
if (tail) {
|
||||||
|
p += len & ~VMX_ALIGN_MASK;
|
||||||
|
crc = __crc32c_le(crc, p, tail);
|
||||||
|
}
|
||||||
|
|
||||||
|
return crc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
u32 *key = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
|
*key = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setting the seed allows arbitrary accumulators and flexible XOR policy
|
||||||
|
* If your algorithm starts with ~0, then XOR with ~0 before you set
|
||||||
|
* the seed.
|
||||||
|
*/
|
||||||
|
static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key,
|
||||||
|
unsigned int keylen)
|
||||||
|
{
|
||||||
|
u32 *mctx = crypto_shash_ctx(hash);
|
||||||
|
|
||||||
|
if (keylen != sizeof(u32)) {
|
||||||
|
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
*mctx = le32_to_cpup((__le32 *)key);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crc32c_vpmsum_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
u32 *mctx = crypto_shash_ctx(desc->tfm);
|
||||||
|
u32 *crcp = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
*crcp = *mctx;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len)
|
||||||
|
{
|
||||||
|
u32 *crcp = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
*crcp = crc32c_vpmsum(*crcp, data, len);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len,
|
||||||
|
u8 *out)
|
||||||
|
{
|
||||||
|
*(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out)
|
||||||
|
{
|
||||||
|
u32 *crcp = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
*(__le32 *)out = ~cpu_to_le32p(crcp);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len, u8 *out)
|
||||||
|
{
|
||||||
|
return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len,
|
||||||
|
out);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct shash_alg alg = {
|
||||||
|
.setkey = crc32c_vpmsum_setkey,
|
||||||
|
.init = crc32c_vpmsum_init,
|
||||||
|
.update = crc32c_vpmsum_update,
|
||||||
|
.final = crc32c_vpmsum_final,
|
||||||
|
.finup = crc32c_vpmsum_finup,
|
||||||
|
.digest = crc32c_vpmsum_digest,
|
||||||
|
.descsize = sizeof(u32),
|
||||||
|
.digestsize = CHKSUM_DIGEST_SIZE,
|
||||||
|
.base = {
|
||||||
|
.cra_name = "crc32c",
|
||||||
|
.cra_driver_name = "crc32c-vpmsum",
|
||||||
|
.cra_priority = 200,
|
||||||
|
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||||
|
.cra_ctxsize = sizeof(u32),
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
.cra_init = crc32c_vpmsum_cra_init,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init crc32c_vpmsum_mod_init(void)
|
||||||
|
{
|
||||||
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return crypto_register_shash(&alg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit crc32c_vpmsum_mod_fini(void)
|
||||||
|
{
|
||||||
|
crypto_unregister_shash(&alg);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(crc32c_vpmsum_mod_init);
|
||||||
|
module_exit(crc32c_vpmsum_mod_fini);
|
||||||
|
|
||||||
|
MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
|
||||||
|
MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions");
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_ALIAS_CRYPTO("crc32c");
|
||||||
|
MODULE_ALIAS_CRYPTO("crc32c-vpmsum");
|
|
@ -174,6 +174,8 @@
|
||||||
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
|
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
|
||||||
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
|
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
|
||||||
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
|
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
|
||||||
|
#define PPC_INST_MFVSRD 0x7c000066
|
||||||
|
#define PPC_INST_MTVSRD 0x7c000166
|
||||||
#define PPC_INST_SLBFEE 0x7c0007a7
|
#define PPC_INST_SLBFEE 0x7c0007a7
|
||||||
|
|
||||||
#define PPC_INST_STRING 0x7c00042a
|
#define PPC_INST_STRING 0x7c00042a
|
||||||
|
@ -188,6 +190,8 @@
|
||||||
#define PPC_INST_WAIT 0x7c00007c
|
#define PPC_INST_WAIT 0x7c00007c
|
||||||
#define PPC_INST_TLBIVAX 0x7c000624
|
#define PPC_INST_TLBIVAX 0x7c000624
|
||||||
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
|
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
|
||||||
|
#define PPC_INST_VPMSUMW 0x10000488
|
||||||
|
#define PPC_INST_VPMSUMD 0x100004c8
|
||||||
#define PPC_INST_XXLOR 0xf0000510
|
#define PPC_INST_XXLOR 0xf0000510
|
||||||
#define PPC_INST_XXSWAPD 0xf0000250
|
#define PPC_INST_XXSWAPD 0xf0000250
|
||||||
#define PPC_INST_XVCPSGNDP 0xf0000780
|
#define PPC_INST_XVCPSGNDP 0xf0000780
|
||||||
|
@ -359,6 +363,14 @@
|
||||||
VSX_XX1((s), a, b))
|
VSX_XX1((s), a, b))
|
||||||
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
|
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
|
||||||
VSX_XX1((s), a, b))
|
VSX_XX1((s), a, b))
|
||||||
|
#define MFVRD(a, t) stringify_in_c(.long PPC_INST_MFVSRD | \
|
||||||
|
VSX_XX1((t)+32, a, R0))
|
||||||
|
#define MTVRD(t, a) stringify_in_c(.long PPC_INST_MTVSRD | \
|
||||||
|
VSX_XX1((t)+32, a, R0))
|
||||||
|
#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMW | \
|
||||||
|
VSX_XX3((t), a, b))
|
||||||
|
#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMD | \
|
||||||
|
VSX_XX3((t), a, b))
|
||||||
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
|
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
|
||||||
VSX_XX3((t), a, b))
|
VSX_XX3((t), a, b))
|
||||||
#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
|
#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
|
||||||
|
|
|
@ -286,6 +286,9 @@ n:
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define FUNC_START(name) _GLOBAL(name)
|
||||||
|
#define FUNC_END(name)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LOAD_REG_IMMEDIATE(rn, expr)
|
* LOAD_REG_IMMEDIATE(rn, expr)
|
||||||
* Loads the value of the constant expression 'expr' into register 'rn'
|
* Loads the value of the constant expression 'expr' into register 'rn'
|
||||||
|
|
|
@ -38,6 +38,18 @@ EXPORT_SYMBOL(ioread16);
|
||||||
EXPORT_SYMBOL(ioread16be);
|
EXPORT_SYMBOL(ioread16be);
|
||||||
EXPORT_SYMBOL(ioread32);
|
EXPORT_SYMBOL(ioread32);
|
||||||
EXPORT_SYMBOL(ioread32be);
|
EXPORT_SYMBOL(ioread32be);
|
||||||
|
#ifdef __powerpc64__
|
||||||
|
u64 ioread64(void __iomem *addr)
|
||||||
|
{
|
||||||
|
return readq(addr);
|
||||||
|
}
|
||||||
|
u64 ioread64be(void __iomem *addr)
|
||||||
|
{
|
||||||
|
return readq_be(addr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ioread64);
|
||||||
|
EXPORT_SYMBOL(ioread64be);
|
||||||
|
#endif /* __powerpc64__ */
|
||||||
|
|
||||||
void iowrite8(u8 val, void __iomem *addr)
|
void iowrite8(u8 val, void __iomem *addr)
|
||||||
{
|
{
|
||||||
|
@ -64,6 +76,18 @@ EXPORT_SYMBOL(iowrite16);
|
||||||
EXPORT_SYMBOL(iowrite16be);
|
EXPORT_SYMBOL(iowrite16be);
|
||||||
EXPORT_SYMBOL(iowrite32);
|
EXPORT_SYMBOL(iowrite32);
|
||||||
EXPORT_SYMBOL(iowrite32be);
|
EXPORT_SYMBOL(iowrite32be);
|
||||||
|
#ifdef __powerpc64__
|
||||||
|
void iowrite64(u64 val, void __iomem *addr)
|
||||||
|
{
|
||||||
|
writeq(val, addr);
|
||||||
|
}
|
||||||
|
void iowrite64be(u64 val, void __iomem *addr)
|
||||||
|
{
|
||||||
|
writeq_be(val, addr);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(iowrite64);
|
||||||
|
EXPORT_SYMBOL(iowrite64be);
|
||||||
|
#endif /* __powerpc64__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These are the "repeat read/write" functions. Note the
|
* These are the "repeat read/write" functions. Note the
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
|
|
||||||
#include <crypto/aes.h>
|
#include <crypto/aes.h>
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
|
#include <crypto/internal/skcipher.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/cpufeature.h>
|
#include <linux/cpufeature.h>
|
||||||
|
@ -44,7 +45,7 @@ struct s390_aes_ctx {
|
||||||
long dec;
|
long dec;
|
||||||
int key_len;
|
int key_len;
|
||||||
union {
|
union {
|
||||||
struct crypto_blkcipher *blk;
|
struct crypto_skcipher *blk;
|
||||||
struct crypto_cipher *cip;
|
struct crypto_cipher *cip;
|
||||||
} fallback;
|
} fallback;
|
||||||
};
|
};
|
||||||
|
@ -63,7 +64,7 @@ struct s390_xts_ctx {
|
||||||
long enc;
|
long enc;
|
||||||
long dec;
|
long dec;
|
||||||
int key_len;
|
int key_len;
|
||||||
struct crypto_blkcipher *fallback;
|
struct crypto_skcipher *fallback;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -237,16 +238,16 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
|
||||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
|
|
||||||
sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
|
||||||
sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
|
crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
|
|
||||||
|
ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
|
||||||
|
|
||||||
|
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||||
|
tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
|
||||||
|
CRYPTO_TFM_RES_MASK;
|
||||||
|
|
||||||
ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
|
|
||||||
if (ret) {
|
|
||||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
|
||||||
tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,15 +256,17 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
|
||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
struct crypto_blkcipher *tfm;
|
struct crypto_blkcipher *tfm = desc->tfm;
|
||||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
|
||||||
|
SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
|
||||||
|
|
||||||
tfm = desc->tfm;
|
skcipher_request_set_tfm(req, sctx->fallback.blk);
|
||||||
desc->tfm = sctx->fallback.blk;
|
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||||
|
|
||||||
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
|
ret = crypto_skcipher_decrypt(req);
|
||||||
|
|
||||||
desc->tfm = tfm;
|
skcipher_request_zero(req);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,15 +275,15 @@ static int fallback_blk_enc(struct blkcipher_desc *desc,
|
||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
struct crypto_blkcipher *tfm;
|
struct crypto_blkcipher *tfm = desc->tfm;
|
||||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
|
||||||
|
SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
|
||||||
|
|
||||||
tfm = desc->tfm;
|
skcipher_request_set_tfm(req, sctx->fallback.blk);
|
||||||
desc->tfm = sctx->fallback.blk;
|
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||||
|
|
||||||
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
|
ret = crypto_skcipher_encrypt(req);
|
||||||
|
|
||||||
desc->tfm = tfm;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,8 +373,9 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
|
||||||
const char *name = tfm->__crt_alg->cra_name;
|
const char *name = tfm->__crt_alg->cra_name;
|
||||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
|
sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
|
||||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
CRYPTO_ALG_ASYNC |
|
||||||
|
CRYPTO_ALG_NEED_FALLBACK);
|
||||||
|
|
||||||
if (IS_ERR(sctx->fallback.blk)) {
|
if (IS_ERR(sctx->fallback.blk)) {
|
||||||
pr_err("Allocating AES fallback algorithm %s failed\n",
|
pr_err("Allocating AES fallback algorithm %s failed\n",
|
||||||
|
@ -386,8 +390,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_blkcipher(sctx->fallback.blk);
|
crypto_free_skcipher(sctx->fallback.blk);
|
||||||
sctx->fallback.blk = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_alg ecb_aes_alg = {
|
static struct crypto_alg ecb_aes_alg = {
|
||||||
|
@ -536,16 +539,16 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
|
|
||||||
xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||||
xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
|
crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
|
|
||||||
|
ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
|
||||||
|
|
||||||
|
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||||
|
tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
|
||||||
|
CRYPTO_TFM_RES_MASK;
|
||||||
|
|
||||||
ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
|
|
||||||
if (ret) {
|
|
||||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
|
||||||
tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
|
|
||||||
CRYPTO_TFM_RES_MASK);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -553,16 +556,18 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc,
|
||||||
struct scatterlist *dst, struct scatterlist *src,
|
struct scatterlist *dst, struct scatterlist *src,
|
||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct crypto_blkcipher *tfm = desc->tfm;
|
||||||
struct crypto_blkcipher *tfm;
|
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
|
||||||
|
SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
|
|
||||||
tfm = desc->tfm;
|
skcipher_request_set_tfm(req, xts_ctx->fallback);
|
||||||
desc->tfm = xts_ctx->fallback;
|
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||||
|
|
||||||
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
|
ret = crypto_skcipher_decrypt(req);
|
||||||
|
|
||||||
desc->tfm = tfm;
|
skcipher_request_zero(req);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,16 +575,18 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc,
|
||||||
struct scatterlist *dst, struct scatterlist *src,
|
struct scatterlist *dst, struct scatterlist *src,
|
||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct crypto_blkcipher *tfm = desc->tfm;
|
||||||
struct crypto_blkcipher *tfm;
|
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
|
||||||
|
SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
|
||||||
unsigned int ret;
|
unsigned int ret;
|
||||||
|
|
||||||
tfm = desc->tfm;
|
skcipher_request_set_tfm(req, xts_ctx->fallback);
|
||||||
desc->tfm = xts_ctx->fallback;
|
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||||
|
|
||||||
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
|
ret = crypto_skcipher_encrypt(req);
|
||||||
|
|
||||||
desc->tfm = tfm;
|
skcipher_request_zero(req);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -700,8 +707,9 @@ static int xts_fallback_init(struct crypto_tfm *tfm)
|
||||||
const char *name = tfm->__crt_alg->cra_name;
|
const char *name = tfm->__crt_alg->cra_name;
|
||||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
|
xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
|
||||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
CRYPTO_ALG_ASYNC |
|
||||||
|
CRYPTO_ALG_NEED_FALLBACK);
|
||||||
|
|
||||||
if (IS_ERR(xts_ctx->fallback)) {
|
if (IS_ERR(xts_ctx->fallback)) {
|
||||||
pr_err("Allocating XTS fallback algorithm %s failed\n",
|
pr_err("Allocating XTS fallback algorithm %s failed\n",
|
||||||
|
@ -715,8 +723,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_blkcipher(xts_ctx->fallback);
|
crypto_free_skcipher(xts_ctx->fallback);
|
||||||
xts_ctx->fallback = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_alg xts_aes_alg = {
|
static struct crypto_alg xts_aes_alg = {
|
||||||
|
|
|
@ -49,7 +49,9 @@ endif
|
||||||
ifeq ($(avx2_supported),yes)
|
ifeq ($(avx2_supported),yes)
|
||||||
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
|
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
|
||||||
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
|
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
|
obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
|
||||||
|
obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
|
||||||
|
obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
|
||||||
endif
|
endif
|
||||||
|
|
||||||
aes-i586-y := aes-i586-asm_32.o aes_glue.o
|
aes-i586-y := aes-i586-asm_32.o aes_glue.o
|
||||||
|
|
|
@ -59,17 +59,6 @@ struct aesni_rfc4106_gcm_ctx {
|
||||||
u8 nonce[4];
|
u8 nonce[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct aesni_gcm_set_hash_subkey_result {
|
|
||||||
int err;
|
|
||||||
struct completion completion;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct aesni_hash_subkey_req_data {
|
|
||||||
u8 iv[16];
|
|
||||||
struct aesni_gcm_set_hash_subkey_result result;
|
|
||||||
struct scatterlist sg;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct aesni_lrw_ctx {
|
struct aesni_lrw_ctx {
|
||||||
struct lrw_table_ctx lrw_table;
|
struct lrw_table_ctx lrw_table;
|
||||||
u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
|
u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
|
||||||
|
@ -809,71 +798,28 @@ static void rfc4106_exit(struct crypto_aead *aead)
|
||||||
cryptd_free_aead(*ctx);
|
cryptd_free_aead(*ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
|
|
||||||
{
|
|
||||||
struct aesni_gcm_set_hash_subkey_result *result = req->data;
|
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
|
||||||
return;
|
|
||||||
result->err = err;
|
|
||||||
complete(&result->completion);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *ctr_tfm;
|
struct crypto_cipher *tfm;
|
||||||
struct ablkcipher_request *req;
|
int ret;
|
||||||
int ret = -EINVAL;
|
|
||||||
struct aesni_hash_subkey_req_data *req_data;
|
|
||||||
|
|
||||||
ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
|
tfm = crypto_alloc_cipher("aes", 0, 0);
|
||||||
if (IS_ERR(ctr_tfm))
|
if (IS_ERR(tfm))
|
||||||
return PTR_ERR(ctr_tfm);
|
return PTR_ERR(tfm);
|
||||||
|
|
||||||
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
|
ret = crypto_cipher_setkey(tfm, key, key_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_ablkcipher;
|
goto out_free_cipher;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
|
||||||
req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
|
|
||||||
if (!req)
|
|
||||||
goto out_free_ablkcipher;
|
|
||||||
|
|
||||||
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
|
|
||||||
if (!req_data)
|
|
||||||
goto out_free_request;
|
|
||||||
|
|
||||||
memset(req_data->iv, 0, sizeof(req_data->iv));
|
|
||||||
|
|
||||||
/* Clear the data in the hash sub key container to zero.*/
|
/* Clear the data in the hash sub key container to zero.*/
|
||||||
/* We want to cipher all zeros to create the hash sub key. */
|
/* We want to cipher all zeros to create the hash sub key. */
|
||||||
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
|
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
|
||||||
|
|
||||||
init_completion(&req_data->result.completion);
|
crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
|
||||||
sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
|
|
||||||
ablkcipher_request_set_tfm(req, ctr_tfm);
|
|
||||||
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
|
||||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
|
||||||
rfc4106_set_hash_subkey_done,
|
|
||||||
&req_data->result);
|
|
||||||
|
|
||||||
ablkcipher_request_set_crypt(req, &req_data->sg,
|
out_free_cipher:
|
||||||
&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
|
crypto_free_cipher(tfm);
|
||||||
|
|
||||||
ret = crypto_ablkcipher_encrypt(req);
|
|
||||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
|
||||||
ret = wait_for_completion_interruptible
|
|
||||||
(&req_data->result.completion);
|
|
||||||
if (!ret)
|
|
||||||
ret = req_data->result.err;
|
|
||||||
}
|
|
||||||
kfree(req_data);
|
|
||||||
out_free_request:
|
|
||||||
ablkcipher_request_free(req);
|
|
||||||
out_free_ablkcipher:
|
|
||||||
crypto_free_ablkcipher(ctr_tfm);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1098,9 +1044,12 @@ static int rfc4106_encrypt(struct aead_request *req)
|
||||||
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
||||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||||
|
|
||||||
aead_request_set_tfm(req, irq_fpu_usable() ?
|
tfm = &cryptd_tfm->base;
|
||||||
cryptd_aead_child(cryptd_tfm) :
|
if (irq_fpu_usable() && (!in_atomic() ||
|
||||||
&cryptd_tfm->base);
|
!cryptd_aead_queued(cryptd_tfm)))
|
||||||
|
tfm = cryptd_aead_child(cryptd_tfm);
|
||||||
|
|
||||||
|
aead_request_set_tfm(req, tfm);
|
||||||
|
|
||||||
return crypto_aead_encrypt(req);
|
return crypto_aead_encrypt(req);
|
||||||
}
|
}
|
||||||
|
@ -1111,9 +1060,12 @@ static int rfc4106_decrypt(struct aead_request *req)
|
||||||
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
|
||||||
struct cryptd_aead *cryptd_tfm = *ctx;
|
struct cryptd_aead *cryptd_tfm = *ctx;
|
||||||
|
|
||||||
aead_request_set_tfm(req, irq_fpu_usable() ?
|
tfm = &cryptd_tfm->base;
|
||||||
cryptd_aead_child(cryptd_tfm) :
|
if (irq_fpu_usable() && (!in_atomic() ||
|
||||||
&cryptd_tfm->base);
|
!cryptd_aead_queued(cryptd_tfm)))
|
||||||
|
tfm = cryptd_aead_child(cryptd_tfm);
|
||||||
|
|
||||||
|
aead_request_set_tfm(req, tfm);
|
||||||
|
|
||||||
return crypto_aead_decrypt(req);
|
return crypto_aead_decrypt(req);
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||||
struct blkcipher_walk walk;
|
struct blkcipher_walk walk;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!may_use_simd())
|
if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd())
|
||||||
return crypto_chacha20_crypt(desc, dst, src, nbytes);
|
return crypto_chacha20_crypt(desc, dst, src, nbytes);
|
||||||
|
|
||||||
state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
|
state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
|
||||||
|
|
|
@ -168,30 +168,23 @@ static int ghash_async_init(struct ahash_request *req)
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
||||||
|
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
||||||
|
|
||||||
if (!irq_fpu_usable()) {
|
desc->tfm = child;
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
desc->flags = req->base.flags;
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
return crypto_shash_init(desc);
|
||||||
return crypto_ahash_init(cryptd_req);
|
|
||||||
} else {
|
|
||||||
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
||||||
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
|
||||||
|
|
||||||
desc->tfm = child;
|
|
||||||
desc->flags = req->base.flags;
|
|
||||||
return crypto_shash_init(desc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ghash_async_update(struct ahash_request *req)
|
static int ghash_async_update(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
|
||||||
if (!irq_fpu_usable()) {
|
if (!irq_fpu_usable() ||
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
|
||||||
|
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
memcpy(cryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||||
return crypto_ahash_update(cryptd_req);
|
return crypto_ahash_update(cryptd_req);
|
||||||
|
@ -204,12 +197,12 @@ static int ghash_async_update(struct ahash_request *req)
|
||||||
static int ghash_async_final(struct ahash_request *req)
|
static int ghash_async_final(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
|
||||||
if (!irq_fpu_usable()) {
|
if (!irq_fpu_usable() ||
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||||
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
|
||||||
|
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
memcpy(cryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||||
return crypto_ahash_final(cryptd_req);
|
return crypto_ahash_final(cryptd_req);
|
||||||
|
@ -249,7 +242,8 @@ static int ghash_async_digest(struct ahash_request *req)
|
||||||
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
||||||
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
||||||
|
|
||||||
if (!irq_fpu_usable()) {
|
if (!irq_fpu_usable() ||
|
||||||
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
||||||
memcpy(cryptd_req, req, sizeof(*req));
|
memcpy(cryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
||||||
return crypto_ahash_digest(cryptd_req);
|
return crypto_ahash_digest(cryptd_req);
|
||||||
|
|
|
@ -67,7 +67,7 @@
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <asm/fpu/api.h>
|
#include <asm/fpu/api.h>
|
||||||
#include "sha_mb_ctx.h"
|
#include "sha1_mb_ctx.h"
|
||||||
|
|
||||||
#define FLUSH_INTERVAL 1000 /* in usec */
|
#define FLUSH_INTERVAL 1000 /* in usec */
|
||||||
|
|
||||||
|
@ -77,30 +77,34 @@ struct sha1_mb_ctx {
|
||||||
struct mcryptd_ahash *mcryptd_tfm;
|
struct mcryptd_ahash *mcryptd_tfm;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
|
static inline struct mcryptd_hash_request_ctx
|
||||||
|
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
|
||||||
{
|
{
|
||||||
struct shash_desc *desc;
|
struct ahash_request *areq;
|
||||||
|
|
||||||
desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
|
areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
|
||||||
return container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
return container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
|
static inline struct ahash_request
|
||||||
|
*cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
|
||||||
{
|
{
|
||||||
return container_of((void *) ctx, struct ahash_request, __ctx);
|
return container_of((void *) ctx, struct ahash_request, __ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
|
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
|
||||||
struct shash_desc *desc)
|
struct ahash_request *areq)
|
||||||
{
|
{
|
||||||
rctx->flag = HASH_UPDATE;
|
rctx->flag = HASH_UPDATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
|
static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
|
||||||
static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state,
|
static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
|
||||||
struct job_sha1 *job);
|
(struct sha1_mb_mgr *state, struct job_sha1 *job);
|
||||||
static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state);
|
static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
|
||||||
static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state);
|
(struct sha1_mb_mgr *state);
|
||||||
|
static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
|
||||||
|
(struct sha1_mb_mgr *state);
|
||||||
|
|
||||||
static inline void sha1_init_digest(uint32_t *digest)
|
static inline void sha1_init_digest(uint32_t *digest)
|
||||||
{
|
{
|
||||||
|
@ -131,7 +135,8 @@ static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
|
||||||
return i >> SHA1_LOG2_BLOCK_SIZE;
|
return i >> SHA1_LOG2_BLOCK_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx)
|
static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
|
||||||
|
struct sha1_hash_ctx *ctx)
|
||||||
{
|
{
|
||||||
while (ctx) {
|
while (ctx) {
|
||||||
if (ctx->status & HASH_CTX_STS_COMPLETE) {
|
if (ctx->status & HASH_CTX_STS_COMPLETE) {
|
||||||
|
@ -177,8 +182,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
|
||||||
|
|
||||||
ctx->job.buffer = (uint8_t *) buffer;
|
ctx->job.buffer = (uint8_t *) buffer;
|
||||||
ctx->job.len = len;
|
ctx->job.len = len;
|
||||||
ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr,
|
ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
|
||||||
&ctx->job);
|
&ctx->job);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -191,13 +196,15 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
|
||||||
if (ctx->status & HASH_CTX_STS_LAST) {
|
if (ctx->status & HASH_CTX_STS_LAST) {
|
||||||
|
|
||||||
uint8_t *buf = ctx->partial_block_buffer;
|
uint8_t *buf = ctx->partial_block_buffer;
|
||||||
uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length);
|
uint32_t n_extra_blocks =
|
||||||
|
sha1_pad(buf, ctx->total_length);
|
||||||
|
|
||||||
ctx->status = (HASH_CTX_STS_PROCESSING |
|
ctx->status = (HASH_CTX_STS_PROCESSING |
|
||||||
HASH_CTX_STS_COMPLETE);
|
HASH_CTX_STS_COMPLETE);
|
||||||
ctx->job.buffer = buf;
|
ctx->job.buffer = buf;
|
||||||
ctx->job.len = (uint32_t) n_extra_blocks;
|
ctx->job.len = (uint32_t) n_extra_blocks;
|
||||||
ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
|
ctx = (struct sha1_hash_ctx *)
|
||||||
|
sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,14 +215,17 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
|
static struct sha1_hash_ctx
|
||||||
|
*sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* If get_comp_job returns NULL, there are no jobs complete.
|
* If get_comp_job returns NULL, there are no jobs complete.
|
||||||
* If get_comp_job returns a job, verify that it is safe to return to the user.
|
* If get_comp_job returns a job, verify that it is safe to return to
|
||||||
|
* the user.
|
||||||
* If it is not ready, resubmit the job to finish processing.
|
* If it is not ready, resubmit the job to finish processing.
|
||||||
* If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
|
* If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
|
||||||
* Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing.
|
* Otherwise, all jobs currently being managed by the hash_ctx_mgr
|
||||||
|
* still need processing.
|
||||||
*/
|
*/
|
||||||
struct sha1_hash_ctx *ctx;
|
struct sha1_hash_ctx *ctx;
|
||||||
|
|
||||||
|
@ -235,7 +245,10 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
if (flags & (~HASH_ENTIRE)) {
|
if (flags & (~HASH_ENTIRE)) {
|
||||||
/* User should not pass anything other than FIRST, UPDATE, or LAST */
|
/*
|
||||||
|
* User should not pass anything other than FIRST, UPDATE, or
|
||||||
|
* LAST
|
||||||
|
*/
|
||||||
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
|
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
@ -264,14 +277,20 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
|
||||||
ctx->partial_block_buffer_length = 0;
|
ctx->partial_block_buffer_length = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we made it here, there were no errors during this call to submit */
|
/*
|
||||||
|
* If we made it here, there were no errors during this call to
|
||||||
|
* submit
|
||||||
|
*/
|
||||||
ctx->error = HASH_CTX_ERROR_NONE;
|
ctx->error = HASH_CTX_ERROR_NONE;
|
||||||
|
|
||||||
/* Store buffer ptr info from user */
|
/* Store buffer ptr info from user */
|
||||||
ctx->incoming_buffer = buffer;
|
ctx->incoming_buffer = buffer;
|
||||||
ctx->incoming_buffer_length = len;
|
ctx->incoming_buffer_length = len;
|
||||||
|
|
||||||
/* Store the user's request flags and mark this ctx as currently being processed. */
|
/*
|
||||||
|
* Store the user's request flags and mark this ctx as currently
|
||||||
|
* being processed.
|
||||||
|
*/
|
||||||
ctx->status = (flags & HASH_LAST) ?
|
ctx->status = (flags & HASH_LAST) ?
|
||||||
(HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
|
(HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
|
||||||
HASH_CTX_STS_PROCESSING;
|
HASH_CTX_STS_PROCESSING;
|
||||||
|
@ -285,9 +304,13 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
|
||||||
* Or if the user's buffer contains less than a whole block,
|
* Or if the user's buffer contains less than a whole block,
|
||||||
* append as much as possible to the extra block.
|
* append as much as possible to the extra block.
|
||||||
*/
|
*/
|
||||||
if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) {
|
if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
|
||||||
/* Compute how many bytes to copy from user buffer into extra block */
|
/*
|
||||||
uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length;
|
* Compute how many bytes to copy from user buffer into
|
||||||
|
* extra block
|
||||||
|
*/
|
||||||
|
uint32_t copy_len = SHA1_BLOCK_SIZE -
|
||||||
|
ctx->partial_block_buffer_length;
|
||||||
if (len < copy_len)
|
if (len < copy_len)
|
||||||
copy_len = len;
|
copy_len = len;
|
||||||
|
|
||||||
|
@ -297,20 +320,28 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
|
||||||
buffer, copy_len);
|
buffer, copy_len);
|
||||||
|
|
||||||
ctx->partial_block_buffer_length += copy_len;
|
ctx->partial_block_buffer_length += copy_len;
|
||||||
ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
|
ctx->incoming_buffer = (const void *)
|
||||||
|
((const char *)buffer + copy_len);
|
||||||
ctx->incoming_buffer_length = len - copy_len;
|
ctx->incoming_buffer_length = len - copy_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The extra block should never contain more than 1 block here */
|
/*
|
||||||
|
* The extra block should never contain more than 1 block
|
||||||
|
* here
|
||||||
|
*/
|
||||||
assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
|
assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
|
||||||
|
|
||||||
/* If the extra block buffer contains exactly 1 block, it can be hashed. */
|
/*
|
||||||
|
* If the extra block buffer contains exactly 1 block, it can
|
||||||
|
* be hashed.
|
||||||
|
*/
|
||||||
if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
|
if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
|
||||||
ctx->partial_block_buffer_length = 0;
|
ctx->partial_block_buffer_length = 0;
|
||||||
|
|
||||||
ctx->job.buffer = ctx->partial_block_buffer;
|
ctx->job.buffer = ctx->partial_block_buffer;
|
||||||
ctx->job.len = 1;
|
ctx->job.len = 1;
|
||||||
ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
|
ctx = (struct sha1_hash_ctx *)
|
||||||
|
sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,23 +360,24 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If flush returned a job, resubmit the job to finish processing.
|
* If flush returned a job, resubmit the job to finish
|
||||||
|
* processing.
|
||||||
*/
|
*/
|
||||||
ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
|
ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
|
* If sha1_ctx_mgr_resubmit returned a job, it is ready to be
|
||||||
* Otherwise, all jobs currently being managed by the sha1_ctx_mgr
|
* returned. Otherwise, all jobs currently being managed by the
|
||||||
* still need processing. Loop.
|
* sha1_ctx_mgr still need processing. Loop.
|
||||||
*/
|
*/
|
||||||
if (ctx)
|
if (ctx)
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_init(struct shash_desc *desc)
|
static int sha1_mb_init(struct ahash_request *areq)
|
||||||
{
|
{
|
||||||
struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
|
struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
|
||||||
|
|
||||||
hash_ctx_init(sctx);
|
hash_ctx_init(sctx);
|
||||||
sctx->job.result_digest[0] = SHA1_H0;
|
sctx->job.result_digest[0] = SHA1_H0;
|
||||||
|
@ -363,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc)
|
||||||
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
|
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
|
struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
|
||||||
__be32 *dst = (__be32 *) rctx->out;
|
__be32 *dst = (__be32 *) rctx->out;
|
||||||
|
|
||||||
for (i = 0; i < 5; ++i)
|
for (i = 0; i < 5; ++i)
|
||||||
|
@ -394,9 +426,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
|
||||||
flag |= HASH_LAST;
|
flag |= HASH_LAST;
|
||||||
|
|
||||||
}
|
}
|
||||||
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc);
|
sha_ctx = (struct sha1_hash_ctx *)
|
||||||
|
ahash_request_ctx(&rctx->areq);
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
|
||||||
|
rctx->walk.data, nbytes, flag);
|
||||||
if (!sha_ctx) {
|
if (!sha_ctx) {
|
||||||
if (flush)
|
if (flush)
|
||||||
sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
|
sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
|
||||||
|
@ -485,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
|
||||||
mcryptd_arm_flusher(cstate, delay);
|
mcryptd_arm_flusher(cstate, delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
|
static int sha1_mb_update(struct ahash_request *areq)
|
||||||
unsigned int len)
|
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_request_ctx *rctx =
|
struct mcryptd_hash_request_ctx *rctx =
|
||||||
container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
||||||
struct mcryptd_alg_cstate *cstate =
|
struct mcryptd_alg_cstate *cstate =
|
||||||
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
||||||
|
|
||||||
|
@ -505,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* need to init context */
|
/* need to init context */
|
||||||
req_ctx_init(rctx, desc);
|
req_ctx_init(rctx, areq);
|
||||||
|
|
||||||
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
||||||
|
|
||||||
|
@ -518,10 +551,11 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
|
||||||
rctx->flag |= HASH_DONE;
|
rctx->flag |= HASH_DONE;
|
||||||
|
|
||||||
/* submit */
|
/* submit */
|
||||||
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
|
sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
|
||||||
sha1_mb_add_list(rctx, cstate);
|
sha1_mb_add_list(rctx, cstate);
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE);
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
|
||||||
|
nbytes, HASH_UPDATE);
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
/* check if anything is returned */
|
/* check if anything is returned */
|
||||||
|
@ -544,11 +578,10 @@ done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
|
static int sha1_mb_finup(struct ahash_request *areq)
|
||||||
unsigned int len, u8 *out)
|
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_request_ctx *rctx =
|
struct mcryptd_hash_request_ctx *rctx =
|
||||||
container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
||||||
struct mcryptd_alg_cstate *cstate =
|
struct mcryptd_alg_cstate *cstate =
|
||||||
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
||||||
|
|
||||||
|
@ -563,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* need to init context */
|
/* need to init context */
|
||||||
req_ctx_init(rctx, desc);
|
req_ctx_init(rctx, areq);
|
||||||
|
|
||||||
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
|
||||||
|
|
||||||
|
@ -576,15 +609,15 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
|
||||||
rctx->flag |= HASH_DONE;
|
rctx->flag |= HASH_DONE;
|
||||||
flag = HASH_LAST;
|
flag = HASH_LAST;
|
||||||
}
|
}
|
||||||
rctx->out = out;
|
|
||||||
|
|
||||||
/* submit */
|
/* submit */
|
||||||
rctx->flag |= HASH_FINAL;
|
rctx->flag |= HASH_FINAL;
|
||||||
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
|
sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
|
||||||
sha1_mb_add_list(rctx, cstate);
|
sha1_mb_add_list(rctx, cstate);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag);
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
|
||||||
|
nbytes, flag);
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
/* check if anything is returned */
|
/* check if anything is returned */
|
||||||
|
@ -605,10 +638,10 @@ done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_final(struct shash_desc *desc, u8 *out)
|
static int sha1_mb_final(struct ahash_request *areq)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_request_ctx *rctx =
|
struct mcryptd_hash_request_ctx *rctx =
|
||||||
container_of(desc, struct mcryptd_hash_request_ctx, desc);
|
container_of(areq, struct mcryptd_hash_request_ctx, areq);
|
||||||
struct mcryptd_alg_cstate *cstate =
|
struct mcryptd_alg_cstate *cstate =
|
||||||
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
|
||||||
|
|
||||||
|
@ -623,16 +656,16 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* need to init context */
|
/* need to init context */
|
||||||
req_ctx_init(rctx, desc);
|
req_ctx_init(rctx, areq);
|
||||||
|
|
||||||
rctx->out = out;
|
|
||||||
rctx->flag |= HASH_DONE | HASH_FINAL;
|
rctx->flag |= HASH_DONE | HASH_FINAL;
|
||||||
|
|
||||||
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
|
sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
|
||||||
/* flag HASH_FINAL and 0 data size */
|
/* flag HASH_FINAL and 0 data size */
|
||||||
sha1_mb_add_list(rctx, cstate);
|
sha1_mb_add_list(rctx, cstate);
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST);
|
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
|
||||||
|
HASH_LAST);
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
|
||||||
/* check if anything is returned */
|
/* check if anything is returned */
|
||||||
|
@ -654,48 +687,98 @@ done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_export(struct shash_desc *desc, void *out)
|
static int sha1_mb_export(struct ahash_request *areq, void *out)
|
||||||
{
|
{
|
||||||
struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
|
struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
|
||||||
|
|
||||||
memcpy(out, sctx, sizeof(*sctx));
|
memcpy(out, sctx, sizeof(*sctx));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_import(struct shash_desc *desc, const void *in)
|
static int sha1_mb_import(struct ahash_request *areq, const void *in)
|
||||||
{
|
{
|
||||||
struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
|
struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
|
||||||
|
|
||||||
memcpy(sctx, in, sizeof(*sctx));
|
memcpy(sctx, in, sizeof(*sctx));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
struct mcryptd_ahash *mcryptd_tfm;
|
||||||
|
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
|
struct mcryptd_hash_ctx *mctx;
|
||||||
|
|
||||||
static struct shash_alg sha1_mb_shash_alg = {
|
mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
|
||||||
.digestsize = SHA1_DIGEST_SIZE,
|
CRYPTO_ALG_INTERNAL,
|
||||||
|
CRYPTO_ALG_INTERNAL);
|
||||||
|
if (IS_ERR(mcryptd_tfm))
|
||||||
|
return PTR_ERR(mcryptd_tfm);
|
||||||
|
mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
|
||||||
|
mctx->alg_state = &sha1_mb_alg_state;
|
||||||
|
ctx->mcryptd_tfm = mcryptd_tfm;
|
||||||
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||||
|
sizeof(struct ahash_request) +
|
||||||
|
crypto_ahash_reqsize(&mcryptd_tfm->base));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
|
mcryptd_free_ahash(ctx->mcryptd_tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||||
|
sizeof(struct ahash_request) +
|
||||||
|
sizeof(struct sha1_hash_ctx));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
|
mcryptd_free_ahash(ctx->mcryptd_tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ahash_alg sha1_mb_areq_alg = {
|
||||||
.init = sha1_mb_init,
|
.init = sha1_mb_init,
|
||||||
.update = sha1_mb_update,
|
.update = sha1_mb_update,
|
||||||
.final = sha1_mb_final,
|
.final = sha1_mb_final,
|
||||||
.finup = sha1_mb_finup,
|
.finup = sha1_mb_finup,
|
||||||
.export = sha1_mb_export,
|
.export = sha1_mb_export,
|
||||||
.import = sha1_mb_import,
|
.import = sha1_mb_import,
|
||||||
.descsize = sizeof(struct sha1_hash_ctx),
|
.halg = {
|
||||||
.statesize = sizeof(struct sha1_hash_ctx),
|
.digestsize = SHA1_DIGEST_SIZE,
|
||||||
.base = {
|
.statesize = sizeof(struct sha1_hash_ctx),
|
||||||
.cra_name = "__sha1-mb",
|
.base = {
|
||||||
.cra_driver_name = "__intel_sha1-mb",
|
.cra_name = "__sha1-mb",
|
||||||
.cra_priority = 100,
|
.cra_driver_name = "__intel_sha1-mb",
|
||||||
/*
|
.cra_priority = 100,
|
||||||
* use ASYNC flag as some buffers in multi-buffer
|
/*
|
||||||
* algo may not have completed before hashing thread sleep
|
* use ASYNC flag as some buffers in multi-buffer
|
||||||
*/
|
* algo may not have completed before hashing thread
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
|
* sleep
|
||||||
CRYPTO_ALG_INTERNAL,
|
*/
|
||||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
||||||
.cra_module = THIS_MODULE,
|
CRYPTO_ALG_ASYNC |
|
||||||
.cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
|
CRYPTO_ALG_INTERNAL,
|
||||||
|
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
.cra_list = LIST_HEAD_INIT
|
||||||
|
(sha1_mb_areq_alg.halg.base.cra_list),
|
||||||
|
.cra_init = sha1_mb_areq_init_tfm,
|
||||||
|
.cra_exit = sha1_mb_areq_exit_tfm,
|
||||||
|
.cra_ctxsize = sizeof(struct sha1_hash_ctx),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -780,48 +863,22 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in)
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
|
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
|
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
|
||||||
struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
|
struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
|
||||||
struct mcryptd_hash_request_ctx *rctx;
|
struct mcryptd_hash_request_ctx *rctx;
|
||||||
struct shash_desc *desc;
|
struct ahash_request *areq;
|
||||||
|
|
||||||
memcpy(mcryptd_req, req, sizeof(*req));
|
memcpy(mcryptd_req, req, sizeof(*req));
|
||||||
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
|
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
|
||||||
rctx = ahash_request_ctx(mcryptd_req);
|
rctx = ahash_request_ctx(mcryptd_req);
|
||||||
desc = &rctx->desc;
|
areq = &rctx->areq;
|
||||||
desc->tfm = child;
|
|
||||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
ahash_request_set_tfm(areq, child);
|
||||||
|
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
|
rctx->complete, req);
|
||||||
|
|
||||||
return crypto_ahash_import(mcryptd_req, in);
|
return crypto_ahash_import(mcryptd_req, in);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct mcryptd_ahash *mcryptd_tfm;
|
|
||||||
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
||||||
struct mcryptd_hash_ctx *mctx;
|
|
||||||
|
|
||||||
mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
|
|
||||||
CRYPTO_ALG_INTERNAL,
|
|
||||||
CRYPTO_ALG_INTERNAL);
|
|
||||||
if (IS_ERR(mcryptd_tfm))
|
|
||||||
return PTR_ERR(mcryptd_tfm);
|
|
||||||
mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
|
|
||||||
mctx->alg_state = &sha1_mb_alg_state;
|
|
||||||
ctx->mcryptd_tfm = mcryptd_tfm;
|
|
||||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
||||||
sizeof(struct ahash_request) +
|
|
||||||
crypto_ahash_reqsize(&mcryptd_tfm->base));
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
||||||
|
|
||||||
mcryptd_free_ahash(ctx->mcryptd_tfm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ahash_alg sha1_mb_async_alg = {
|
static struct ahash_alg sha1_mb_async_alg = {
|
||||||
.init = sha1_mb_async_init,
|
.init = sha1_mb_async_init,
|
||||||
.update = sha1_mb_async_update,
|
.update = sha1_mb_async_update,
|
||||||
|
@ -866,7 +923,8 @@ static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
|
||||||
if (time_before(cur_time, rctx->tag.expire))
|
if (time_before(cur_time, rctx->tag.expire))
|
||||||
break;
|
break;
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr);
|
sha_ctx = (struct sha1_hash_ctx *)
|
||||||
|
sha1_ctx_mgr_flush(cstate->mgr);
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
if (!sha_ctx) {
|
if (!sha_ctx) {
|
||||||
pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
|
pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
|
||||||
|
@ -927,7 +985,7 @@ static int __init sha1_mb_mod_init(void)
|
||||||
}
|
}
|
||||||
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
|
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
|
||||||
|
|
||||||
err = crypto_register_shash(&sha1_mb_shash_alg);
|
err = crypto_register_ahash(&sha1_mb_areq_alg);
|
||||||
if (err)
|
if (err)
|
||||||
goto err2;
|
goto err2;
|
||||||
err = crypto_register_ahash(&sha1_mb_async_alg);
|
err = crypto_register_ahash(&sha1_mb_async_alg);
|
||||||
|
@ -937,7 +995,7 @@ static int __init sha1_mb_mod_init(void)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err1:
|
err1:
|
||||||
crypto_unregister_shash(&sha1_mb_shash_alg);
|
crypto_unregister_ahash(&sha1_mb_areq_alg);
|
||||||
err2:
|
err2:
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
||||||
|
@ -953,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void)
|
||||||
struct mcryptd_alg_cstate *cpu_state;
|
struct mcryptd_alg_cstate *cpu_state;
|
||||||
|
|
||||||
crypto_unregister_ahash(&sha1_mb_async_alg);
|
crypto_unregister_ahash(&sha1_mb_async_alg);
|
||||||
crypto_unregister_shash(&sha1_mb_shash_alg);
|
crypto_unregister_ahash(&sha1_mb_areq_alg);
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
|
||||||
kfree(cpu_state->mgr);
|
kfree(cpu_state->mgr);
|
|
@ -54,7 +54,7 @@
|
||||||
#ifndef _SHA_MB_CTX_INTERNAL_H
|
#ifndef _SHA_MB_CTX_INTERNAL_H
|
||||||
#define _SHA_MB_CTX_INTERNAL_H
|
#define _SHA_MB_CTX_INTERNAL_H
|
||||||
|
|
||||||
#include "sha_mb_mgr.h"
|
#include "sha1_mb_mgr.h"
|
||||||
|
|
||||||
#define HASH_UPDATE 0x00
|
#define HASH_UPDATE 0x00
|
||||||
#define HASH_FIRST 0x01
|
#define HASH_FIRST 0x01
|
|
@ -51,7 +51,7 @@
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "sha_mb_mgr.h"
|
#include "sha1_mb_mgr.h"
|
||||||
|
|
||||||
void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
|
void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
|
||||||
{
|
{
|
|
@ -374,3 +374,9 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("sha1");
|
MODULE_ALIAS_CRYPTO("sha1");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha1-ssse3");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha1-avx");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha1-avx2");
|
||||||
|
#ifdef CONFIG_AS_SHA1_NI
|
||||||
|
MODULE_ALIAS_CRYPTO("sha1-ni");
|
||||||
|
#endif
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
#
|
||||||
|
# Arch-specific CryptoAPI modules.
|
||||||
|
#
|
||||||
|
|
||||||
|
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
|
||||||
|
$(comma)4)$(comma)%ymm2,yes,no)
|
||||||
|
ifeq ($(avx2_supported),yes)
|
||||||
|
obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
|
||||||
|
sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
|
||||||
|
sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
|
||||||
|
endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,136 @@
|
||||||
|
/*
|
||||||
|
* Header file for multi buffer SHA256 context
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _SHA_MB_CTX_INTERNAL_H
|
||||||
|
#define _SHA_MB_CTX_INTERNAL_H
|
||||||
|
|
||||||
|
#include "sha256_mb_mgr.h"
|
||||||
|
|
||||||
|
#define HASH_UPDATE 0x00
|
||||||
|
#define HASH_FIRST 0x01
|
||||||
|
#define HASH_LAST 0x02
|
||||||
|
#define HASH_ENTIRE 0x03
|
||||||
|
#define HASH_DONE 0x04
|
||||||
|
#define HASH_FINAL 0x08
|
||||||
|
|
||||||
|
#define HASH_CTX_STS_IDLE 0x00
|
||||||
|
#define HASH_CTX_STS_PROCESSING 0x01
|
||||||
|
#define HASH_CTX_STS_LAST 0x02
|
||||||
|
#define HASH_CTX_STS_COMPLETE 0x04
|
||||||
|
|
||||||
|
enum hash_ctx_error {
|
||||||
|
HASH_CTX_ERROR_NONE = 0,
|
||||||
|
HASH_CTX_ERROR_INVALID_FLAGS = -1,
|
||||||
|
HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
|
||||||
|
HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
|
||||||
|
|
||||||
|
#ifdef HASH_CTX_DEBUG
|
||||||
|
HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define hash_ctx_user_data(ctx) ((ctx)->user_data)
|
||||||
|
#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
|
||||||
|
#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
|
||||||
|
#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
|
||||||
|
#define hash_ctx_status(ctx) ((ctx)->status)
|
||||||
|
#define hash_ctx_error(ctx) ((ctx)->error)
|
||||||
|
#define hash_ctx_init(ctx) \
|
||||||
|
do { \
|
||||||
|
(ctx)->error = HASH_CTX_ERROR_NONE; \
|
||||||
|
(ctx)->status = HASH_CTX_STS_COMPLETE; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
/* Hash Constants and Typedefs */
|
||||||
|
#define SHA256_DIGEST_LENGTH 8
|
||||||
|
#define SHA256_LOG2_BLOCK_SIZE 6
|
||||||
|
|
||||||
|
#define SHA256_PADLENGTHFIELD_SIZE 8
|
||||||
|
|
||||||
|
#ifdef SHA_MB_DEBUG
|
||||||
|
#define assert(expr) \
|
||||||
|
do { \
|
||||||
|
if (unlikely(!(expr))) { \
|
||||||
|
printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
|
||||||
|
#expr, __FILE__, __func__, __LINE__); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#else
|
||||||
|
#define assert(expr) do {} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct sha256_ctx_mgr {
|
||||||
|
struct sha256_mb_mgr mgr;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
|
||||||
|
|
||||||
|
struct sha256_hash_ctx {
|
||||||
|
/* Must be at struct offset 0 */
|
||||||
|
struct job_sha256 job;
|
||||||
|
/* status flag */
|
||||||
|
int status;
|
||||||
|
/* error flag */
|
||||||
|
int error;
|
||||||
|
|
||||||
|
uint32_t total_length;
|
||||||
|
const void *incoming_buffer;
|
||||||
|
uint32_t incoming_buffer_length;
|
||||||
|
uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
|
||||||
|
uint32_t partial_block_buffer_length;
|
||||||
|
void *user_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,108 @@
|
||||||
|
/*
|
||||||
|
* Header file for multi buffer SHA256 algorithm manager
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
#ifndef __SHA_MB_MGR_H
|
||||||
|
#define __SHA_MB_MGR_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#define NUM_SHA256_DIGEST_WORDS 8
|
||||||
|
|
||||||
|
enum job_sts { STS_UNKNOWN = 0,
|
||||||
|
STS_BEING_PROCESSED = 1,
|
||||||
|
STS_COMPLETED = 2,
|
||||||
|
STS_INTERNAL_ERROR = 3,
|
||||||
|
STS_ERROR = 4
|
||||||
|
};
|
||||||
|
|
||||||
|
struct job_sha256 {
|
||||||
|
u8 *buffer;
|
||||||
|
u32 len;
|
||||||
|
u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
|
||||||
|
enum job_sts status;
|
||||||
|
void *user_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* SHA256 out-of-order scheduler */
|
||||||
|
|
||||||
|
/* typedef uint32_t sha8_digest_array[8][8]; */
|
||||||
|
|
||||||
|
struct sha256_args_x8 {
|
||||||
|
uint32_t digest[8][8];
|
||||||
|
uint8_t *data_ptr[8];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sha256_lane_data {
|
||||||
|
struct job_sha256 *job_in_lane;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sha256_mb_mgr {
|
||||||
|
struct sha256_args_x8 args;
|
||||||
|
|
||||||
|
uint32_t lens[8];
|
||||||
|
|
||||||
|
/* each byte is index (0...7) of unused lanes */
|
||||||
|
uint64_t unused_lanes;
|
||||||
|
/* byte 4 is set to FF as a flag */
|
||||||
|
struct sha256_lane_data ldata[8];
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define SHA256_MB_MGR_NUM_LANES_AVX2 8
|
||||||
|
|
||||||
|
void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
|
||||||
|
struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
|
||||||
|
struct job_sha256 *job);
|
||||||
|
struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
|
||||||
|
struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,304 @@
|
||||||
|
/*
|
||||||
|
* Header file for multi buffer SHA256 algorithm data structure
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
# Macros for defining data structures
|
||||||
|
|
||||||
|
# Usage example
|
||||||
|
|
||||||
|
#START_FIELDS # JOB_AES
|
||||||
|
### name size align
|
||||||
|
#FIELD _plaintext, 8, 8 # pointer to plaintext
|
||||||
|
#FIELD _ciphertext, 8, 8 # pointer to ciphertext
|
||||||
|
#FIELD _IV, 16, 8 # IV
|
||||||
|
#FIELD _keys, 8, 8 # pointer to keys
|
||||||
|
#FIELD _len, 4, 4 # length in bytes
|
||||||
|
#FIELD _status, 4, 4 # status enumeration
|
||||||
|
#FIELD _user_data, 8, 8 # pointer to user data
|
||||||
|
#UNION _union, size1, align1, \
|
||||||
|
# size2, align2, \
|
||||||
|
# size3, align3, \
|
||||||
|
# ...
|
||||||
|
#END_FIELDS
|
||||||
|
#%assign _JOB_AES_size _FIELD_OFFSET
|
||||||
|
#%assign _JOB_AES_align _STRUCT_ALIGN
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
|
||||||
|
# Alternate "struc-like" syntax:
|
||||||
|
# STRUCT job_aes2
|
||||||
|
# RES_Q .plaintext, 1
|
||||||
|
# RES_Q .ciphertext, 1
|
||||||
|
# RES_DQ .IV, 1
|
||||||
|
# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
|
||||||
|
# RES_U .union, size1, align1, \
|
||||||
|
# size2, align2, \
|
||||||
|
# ...
|
||||||
|
# ENDSTRUCT
|
||||||
|
# # Following only needed if nesting
|
||||||
|
# %assign job_aes2_size _FIELD_OFFSET
|
||||||
|
# %assign job_aes2_align _STRUCT_ALIGN
|
||||||
|
#
|
||||||
|
# RES_* macros take a name, a count and an optional alignment.
|
||||||
|
# The count in in terms of the base size of the macro, and the
|
||||||
|
# default alignment is the base size.
|
||||||
|
# The macros are:
|
||||||
|
# Macro Base size
|
||||||
|
# RES_B 1
|
||||||
|
# RES_W 2
|
||||||
|
# RES_D 4
|
||||||
|
# RES_Q 8
|
||||||
|
# RES_DQ 16
|
||||||
|
# RES_Y 32
|
||||||
|
# RES_Z 64
|
||||||
|
#
|
||||||
|
# RES_U defines a union. It's arguments are a name and two or more
|
||||||
|
# pairs of "size, alignment"
|
||||||
|
#
|
||||||
|
# The two assigns are only needed if this structure is being nested
|
||||||
|
# within another. Even if the assigns are not done, one can still use
|
||||||
|
# STRUCT_NAME_size as the size of the structure.
|
||||||
|
#
|
||||||
|
# Note that for nesting, you still need to assign to STRUCT_NAME_size.
|
||||||
|
#
|
||||||
|
# The differences between this and using "struc" directly are that each
|
||||||
|
# type is implicitly aligned to its natural length (although this can be
|
||||||
|
# over-ridden with an explicit third parameter), and that the structure
|
||||||
|
# is padded at the end to its overall alignment.
|
||||||
|
#
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
|
||||||
|
#ifndef _DATASTRUCT_ASM_
|
||||||
|
#define _DATASTRUCT_ASM_
|
||||||
|
|
||||||
|
#define SZ8 8*SHA256_DIGEST_WORD_SIZE
|
||||||
|
#define ROUNDS 64*SZ8
|
||||||
|
#define PTR_SZ 8
|
||||||
|
#define SHA256_DIGEST_WORD_SIZE 4
|
||||||
|
#define MAX_SHA256_LANES 8
|
||||||
|
#define SHA256_DIGEST_WORDS 8
|
||||||
|
#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
|
||||||
|
#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
|
||||||
|
#define SHA256_BLK_SZ 64
|
||||||
|
|
||||||
|
# START_FIELDS
|
||||||
|
.macro START_FIELDS
|
||||||
|
_FIELD_OFFSET = 0
|
||||||
|
_STRUCT_ALIGN = 0
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# FIELD name size align
|
||||||
|
.macro FIELD name size align
|
||||||
|
_FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
|
||||||
|
\name = _FIELD_OFFSET
|
||||||
|
_FIELD_OFFSET = _FIELD_OFFSET + (\size)
|
||||||
|
.if (\align > _STRUCT_ALIGN)
|
||||||
|
_STRUCT_ALIGN = \align
|
||||||
|
.endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# END_FIELDS
|
||||||
|
.macro END_FIELDS
|
||||||
|
_FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
|
||||||
|
.endm
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
|
||||||
|
.macro STRUCT p1
|
||||||
|
START_FIELDS
|
||||||
|
.struc \p1
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro ENDSTRUCT
|
||||||
|
tmp = _FIELD_OFFSET
|
||||||
|
END_FIELDS
|
||||||
|
tmp = (_FIELD_OFFSET - %%tmp)
|
||||||
|
.if (tmp > 0)
|
||||||
|
.lcomm tmp
|
||||||
|
.endif
|
||||||
|
.endstruc
|
||||||
|
.endm
|
||||||
|
|
||||||
|
## RES_int name size align
|
||||||
|
.macro RES_int p1 p2 p3
|
||||||
|
name = \p1
|
||||||
|
size = \p2
|
||||||
|
align = .\p3
|
||||||
|
|
||||||
|
_FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
|
||||||
|
.align align
|
||||||
|
.lcomm name size
|
||||||
|
_FIELD_OFFSET = _FIELD_OFFSET + (size)
|
||||||
|
.if (align > _STRUCT_ALIGN)
|
||||||
|
_STRUCT_ALIGN = align
|
||||||
|
.endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_B name, size [, align]
|
||||||
|
.macro RES_B _name, _size, _align=1
|
||||||
|
RES_int _name _size _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_W name, size [, align]
|
||||||
|
.macro RES_W _name, _size, _align=2
|
||||||
|
RES_int _name 2*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_D name, size [, align]
|
||||||
|
.macro RES_D _name, _size, _align=4
|
||||||
|
RES_int _name 4*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_Q name, size [, align]
|
||||||
|
.macro RES_Q _name, _size, _align=8
|
||||||
|
RES_int _name 8*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_DQ name, size [, align]
|
||||||
|
.macro RES_DQ _name, _size, _align=16
|
||||||
|
RES_int _name 16*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_Y name, size [, align]
|
||||||
|
.macro RES_Y _name, _size, _align=32
|
||||||
|
RES_int _name 32*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_Z name, size [, align]
|
||||||
|
.macro RES_Z _name, _size, _align=64
|
||||||
|
RES_int _name 64*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
#### Define SHA256 Out Of Order Data Structures
|
||||||
|
########################################################################
|
||||||
|
|
||||||
|
START_FIELDS # LANE_DATA
|
||||||
|
### name size align
|
||||||
|
FIELD _job_in_lane, 8, 8 # pointer to job object
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_LANE_DATA_size = _FIELD_OFFSET
|
||||||
|
_LANE_DATA_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
|
||||||
|
START_FIELDS # SHA256_ARGS_X4
|
||||||
|
### name size align
|
||||||
|
FIELD _digest, 4*8*8, 4 # transposed digest
|
||||||
|
FIELD _data_ptr, 8*8, 8 # array of pointers to data
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_SHA256_ARGS_X4_size = _FIELD_OFFSET
|
||||||
|
_SHA256_ARGS_X4_align = _STRUCT_ALIGN
|
||||||
|
_SHA256_ARGS_X8_size = _FIELD_OFFSET
|
||||||
|
_SHA256_ARGS_X8_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
START_FIELDS # MB_MGR
|
||||||
|
### name size align
|
||||||
|
FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
|
||||||
|
FIELD _lens, 4*8, 8
|
||||||
|
FIELD _unused_lanes, 8, 8
|
||||||
|
FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_MB_MGR_size = _FIELD_OFFSET
|
||||||
|
_MB_MGR_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
_args_digest = _args + _digest
|
||||||
|
_args_data_ptr = _args + _data_ptr
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
START_FIELDS #STACK_FRAME
|
||||||
|
### name size align
|
||||||
|
FIELD _data, 16*SZ8, 1 # transposed digest
|
||||||
|
FIELD _digest, 8*SZ8, 1 # array of pointers to data
|
||||||
|
FIELD _ytmp, 4*SZ8, 1
|
||||||
|
FIELD _rsp, 8, 1
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_STACK_FRAME_size = _FIELD_OFFSET
|
||||||
|
_STACK_FRAME_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
#### Define constants
|
||||||
|
########################################################################
|
||||||
|
|
||||||
|
#define STS_UNKNOWN 0
|
||||||
|
#define STS_BEING_PROCESSED 1
|
||||||
|
#define STS_COMPLETED 2
|
||||||
|
|
||||||
|
########################################################################
|
||||||
|
#### Define JOB_SHA256 structure
|
||||||
|
########################################################################
|
||||||
|
|
||||||
|
START_FIELDS # JOB_SHA256
|
||||||
|
|
||||||
|
### name size align
|
||||||
|
FIELD _buffer, 8, 8 # pointer to buffer
|
||||||
|
FIELD _len, 8, 8 # length in bytes
|
||||||
|
FIELD _result_digest, 8*4, 32 # Digest (output)
|
||||||
|
FIELD _status, 4, 4
|
||||||
|
FIELD _user_data, 8, 8
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_JOB_SHA256_size = _FIELD_OFFSET
|
||||||
|
_JOB_SHA256_align = _STRUCT_ALIGN
|
|
@ -0,0 +1,304 @@
|
||||||
|
/*
|
||||||
|
* Flush routine for SHA256 multibuffer
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/frame.h>
|
||||||
|
#include "sha256_mb_mgr_datastruct.S"
|
||||||
|
|
||||||
|
.extern sha256_x8_avx2
|
||||||
|
|
||||||
|
#LINUX register definitions
|
||||||
|
#define arg1 %rdi
|
||||||
|
#define arg2 %rsi
|
||||||
|
|
||||||
|
# Common register definitions
|
||||||
|
#define state arg1
|
||||||
|
#define job arg2
|
||||||
|
#define len2 arg2
|
||||||
|
|
||||||
|
# idx must be a register not clobberred by sha1_mult
|
||||||
|
#define idx %r8
|
||||||
|
#define DWORD_idx %r8d
|
||||||
|
|
||||||
|
#define unused_lanes %rbx
|
||||||
|
#define lane_data %rbx
|
||||||
|
#define tmp2 %rbx
|
||||||
|
#define tmp2_w %ebx
|
||||||
|
|
||||||
|
#define job_rax %rax
|
||||||
|
#define tmp1 %rax
|
||||||
|
#define size_offset %rax
|
||||||
|
#define tmp %rax
|
||||||
|
#define start_offset %rax
|
||||||
|
|
||||||
|
#define tmp3 %arg1
|
||||||
|
|
||||||
|
#define extra_blocks %arg2
|
||||||
|
#define p %arg2
|
||||||
|
|
||||||
|
.macro LABEL prefix n
|
||||||
|
\prefix\n\():
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro JNE_SKIP i
|
||||||
|
jne skip_\i
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.altmacro
|
||||||
|
.macro SET_OFFSET _offset
|
||||||
|
offset = \_offset
|
||||||
|
.endm
|
||||||
|
.noaltmacro
|
||||||
|
|
||||||
|
# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
|
||||||
|
# arg 1 : rcx : state
|
||||||
|
ENTRY(sha256_mb_mgr_flush_avx2)
|
||||||
|
FRAME_BEGIN
|
||||||
|
push %rbx
|
||||||
|
|
||||||
|
# If bit (32+3) is set, then all lanes are empty
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
bt $32+3, unused_lanes
|
||||||
|
jc return_null
|
||||||
|
|
||||||
|
# find a lane with a non-null job
|
||||||
|
xor idx, idx
|
||||||
|
offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne one(%rip), idx
|
||||||
|
offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne two(%rip), idx
|
||||||
|
offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne three(%rip), idx
|
||||||
|
offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne four(%rip), idx
|
||||||
|
offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne five(%rip), idx
|
||||||
|
offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne six(%rip), idx
|
||||||
|
offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne seven(%rip), idx
|
||||||
|
|
||||||
|
# copy idx to empty lanes
|
||||||
|
copy_lane_data:
|
||||||
|
offset = (_args + _data_ptr)
|
||||||
|
mov offset(state,idx,8), tmp
|
||||||
|
|
||||||
|
I = 0
|
||||||
|
.rep 8
|
||||||
|
offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
.altmacro
|
||||||
|
JNE_SKIP %I
|
||||||
|
offset = (_args + _data_ptr + 8*I)
|
||||||
|
mov tmp, offset(state)
|
||||||
|
offset = (_lens + 4*I)
|
||||||
|
movl $0xFFFFFFFF, offset(state)
|
||||||
|
LABEL skip_ %I
|
||||||
|
I = (I+1)
|
||||||
|
.noaltmacro
|
||||||
|
.endr
|
||||||
|
|
||||||
|
# Find min length
|
||||||
|
vmovdqa _lens+0*16(state), %xmm0
|
||||||
|
vmovdqa _lens+1*16(state), %xmm1
|
||||||
|
|
||||||
|
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
|
||||||
|
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
|
||||||
|
vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
|
||||||
|
vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
|
||||||
|
vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
|
||||||
|
|
||||||
|
vmovd %xmm2, DWORD_idx
|
||||||
|
mov idx, len2
|
||||||
|
and $0xF, idx
|
||||||
|
shr $4, len2
|
||||||
|
jz len_is_0
|
||||||
|
|
||||||
|
vpand clear_low_nibble(%rip), %xmm2, %xmm2
|
||||||
|
vpshufd $0, %xmm2, %xmm2
|
||||||
|
|
||||||
|
vpsubd %xmm2, %xmm0, %xmm0
|
||||||
|
vpsubd %xmm2, %xmm1, %xmm1
|
||||||
|
|
||||||
|
vmovdqa %xmm0, _lens+0*16(state)
|
||||||
|
vmovdqa %xmm1, _lens+1*16(state)
|
||||||
|
|
||||||
|
# "state" and "args" are the same address, arg1
|
||||||
|
# len is arg2
|
||||||
|
call sha256_x8_avx2
|
||||||
|
# state and idx are intact
|
||||||
|
|
||||||
|
len_is_0:
|
||||||
|
# process completed job "idx"
|
||||||
|
imul $_LANE_DATA_size, idx, lane_data
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
|
||||||
|
mov _job_in_lane(lane_data), job_rax
|
||||||
|
movq $0, _job_in_lane(lane_data)
|
||||||
|
movl $STS_COMPLETED, _status(job_rax)
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
shl $4, unused_lanes
|
||||||
|
or idx, unused_lanes
|
||||||
|
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
movl $0xFFFFFFFF, _lens(state,idx,4)
|
||||||
|
|
||||||
|
vmovd _args_digest(state , idx, 4) , %xmm0
|
||||||
|
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
|
||||||
|
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
|
||||||
|
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
|
||||||
|
vmovd _args_digest+4*32(state, idx, 4), %xmm1
|
||||||
|
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
|
||||||
|
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
|
||||||
|
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
|
||||||
|
|
||||||
|
vmovdqu %xmm0, _result_digest(job_rax)
|
||||||
|
offset = (_result_digest + 1*16)
|
||||||
|
vmovdqu %xmm1, offset(job_rax)
|
||||||
|
|
||||||
|
return:
|
||||||
|
pop %rbx
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
|
||||||
|
return_null:
|
||||||
|
xor job_rax, job_rax
|
||||||
|
jmp return
|
||||||
|
ENDPROC(sha256_mb_mgr_flush_avx2)
|
||||||
|
|
||||||
|
##############################################################################
|
||||||
|
|
||||||
|
.align 16
|
||||||
|
ENTRY(sha256_mb_mgr_get_comp_job_avx2)
|
||||||
|
push %rbx
|
||||||
|
|
||||||
|
## if bit 32+3 is set, then all lanes are empty
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
bt $(32+3), unused_lanes
|
||||||
|
jc .return_null
|
||||||
|
|
||||||
|
# Find min length
|
||||||
|
vmovdqa _lens(state), %xmm0
|
||||||
|
vmovdqa _lens+1*16(state), %xmm1
|
||||||
|
|
||||||
|
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
|
||||||
|
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
|
||||||
|
vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
|
||||||
|
vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
|
||||||
|
vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
|
||||||
|
|
||||||
|
vmovd %xmm2, DWORD_idx
|
||||||
|
test $~0xF, idx
|
||||||
|
jnz .return_null
|
||||||
|
|
||||||
|
# process completed job "idx"
|
||||||
|
imul $_LANE_DATA_size, idx, lane_data
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
|
||||||
|
mov _job_in_lane(lane_data), job_rax
|
||||||
|
movq $0, _job_in_lane(lane_data)
|
||||||
|
movl $STS_COMPLETED, _status(job_rax)
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
shl $4, unused_lanes
|
||||||
|
or idx, unused_lanes
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
|
||||||
|
movl $0xFFFFFFFF, _lens(state, idx, 4)
|
||||||
|
|
||||||
|
vmovd _args_digest(state, idx, 4), %xmm0
|
||||||
|
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
|
||||||
|
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
|
||||||
|
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
|
||||||
|
movl _args_digest+4*32(state, idx, 4), tmp2_w
|
||||||
|
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
|
||||||
|
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
|
||||||
|
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
|
||||||
|
|
||||||
|
vmovdqu %xmm0, _result_digest(job_rax)
|
||||||
|
movl tmp2_w, _result_digest+1*16(job_rax)
|
||||||
|
|
||||||
|
pop %rbx
|
||||||
|
|
||||||
|
ret
|
||||||
|
|
||||||
|
.return_null:
|
||||||
|
xor job_rax, job_rax
|
||||||
|
pop %rbx
|
||||||
|
ret
|
||||||
|
ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
|
||||||
|
|
||||||
|
.data
|
||||||
|
|
||||||
|
.align 16
|
||||||
|
clear_low_nibble:
|
||||||
|
.octa 0x000000000000000000000000FFFFFFF0
|
||||||
|
one:
|
||||||
|
.quad 1
|
||||||
|
two:
|
||||||
|
.quad 2
|
||||||
|
three:
|
||||||
|
.quad 3
|
||||||
|
four:
|
||||||
|
.quad 4
|
||||||
|
five:
|
||||||
|
.quad 5
|
||||||
|
six:
|
||||||
|
.quad 6
|
||||||
|
seven:
|
||||||
|
.quad 7
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
* Initialization code for multi buffer SHA256 algorithm for AVX2
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "sha256_mb_mgr.h"
|
||||||
|
|
||||||
|
void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
|
||||||
|
{
|
||||||
|
unsigned int j;
|
||||||
|
|
||||||
|
state->unused_lanes = 0xF76543210ULL;
|
||||||
|
for (j = 0; j < 8; j++) {
|
||||||
|
state->lens[j] = 0xFFFFFFFF;
|
||||||
|
state->ldata[j].job_in_lane = NULL;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,215 @@
|
||||||
|
/*
|
||||||
|
* Buffer submit code for multi buffer SHA256 algorithm
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/frame.h>
|
||||||
|
#include "sha256_mb_mgr_datastruct.S"
|
||||||
|
|
||||||
|
.extern sha256_x8_avx2
|
||||||
|
|
||||||
|
# LINUX register definitions
|
||||||
|
arg1 = %rdi
|
||||||
|
arg2 = %rsi
|
||||||
|
size_offset = %rcx
|
||||||
|
tmp2 = %rcx
|
||||||
|
extra_blocks = %rdx
|
||||||
|
|
||||||
|
# Common definitions
|
||||||
|
#define state arg1
|
||||||
|
#define job %rsi
|
||||||
|
#define len2 arg2
|
||||||
|
#define p2 arg2
|
||||||
|
|
||||||
|
# idx must be a register not clobberred by sha1_x8_avx2
|
||||||
|
idx = %r8
|
||||||
|
DWORD_idx = %r8d
|
||||||
|
last_len = %r8
|
||||||
|
|
||||||
|
p = %r11
|
||||||
|
start_offset = %r11
|
||||||
|
|
||||||
|
unused_lanes = %rbx
|
||||||
|
BYTE_unused_lanes = %bl
|
||||||
|
|
||||||
|
job_rax = %rax
|
||||||
|
len = %rax
|
||||||
|
DWORD_len = %eax
|
||||||
|
|
||||||
|
lane = %r12
|
||||||
|
tmp3 = %r12
|
||||||
|
|
||||||
|
tmp = %r9
|
||||||
|
DWORD_tmp = %r9d
|
||||||
|
|
||||||
|
lane_data = %r10
|
||||||
|
|
||||||
|
# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
|
||||||
|
# arg 1 : rcx : state
|
||||||
|
# arg 2 : rdx : job
|
||||||
|
ENTRY(sha256_mb_mgr_submit_avx2)
|
||||||
|
FRAME_BEGIN
|
||||||
|
push %rbx
|
||||||
|
push %r12
|
||||||
|
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
mov unused_lanes, lane
|
||||||
|
and $0xF, lane
|
||||||
|
shr $4, unused_lanes
|
||||||
|
imul $_LANE_DATA_size, lane, lane_data
|
||||||
|
movl $STS_BEING_PROCESSED, _status(job)
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
movl _len(job), DWORD_len
|
||||||
|
|
||||||
|
mov job, _job_in_lane(lane_data)
|
||||||
|
shl $4, len
|
||||||
|
or lane, len
|
||||||
|
|
||||||
|
movl DWORD_len, _lens(state , lane, 4)
|
||||||
|
|
||||||
|
# Load digest words from result_digest
|
||||||
|
vmovdqu _result_digest(job), %xmm0
|
||||||
|
vmovdqu _result_digest+1*16(job), %xmm1
|
||||||
|
vmovd %xmm0, _args_digest(state, lane, 4)
|
||||||
|
vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
|
||||||
|
vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
|
||||||
|
vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
|
||||||
|
vmovd %xmm1, _args_digest+4*32(state , lane, 4)
|
||||||
|
|
||||||
|
vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
|
||||||
|
vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
|
||||||
|
vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
|
||||||
|
|
||||||
|
mov _buffer(job), p
|
||||||
|
mov p, _args_data_ptr(state, lane, 8)
|
||||||
|
|
||||||
|
cmp $0xF, unused_lanes
|
||||||
|
jne return_null
|
||||||
|
|
||||||
|
start_loop:
|
||||||
|
# Find min length
|
||||||
|
vmovdqa _lens(state), %xmm0
|
||||||
|
vmovdqa _lens+1*16(state), %xmm1
|
||||||
|
|
||||||
|
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
|
||||||
|
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
|
||||||
|
vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
|
||||||
|
vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
|
||||||
|
vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
|
||||||
|
|
||||||
|
vmovd %xmm2, DWORD_idx
|
||||||
|
mov idx, len2
|
||||||
|
and $0xF, idx
|
||||||
|
shr $4, len2
|
||||||
|
jz len_is_0
|
||||||
|
|
||||||
|
vpand clear_low_nibble(%rip), %xmm2, %xmm2
|
||||||
|
vpshufd $0, %xmm2, %xmm2
|
||||||
|
|
||||||
|
vpsubd %xmm2, %xmm0, %xmm0
|
||||||
|
vpsubd %xmm2, %xmm1, %xmm1
|
||||||
|
|
||||||
|
vmovdqa %xmm0, _lens + 0*16(state)
|
||||||
|
vmovdqa %xmm1, _lens + 1*16(state)
|
||||||
|
|
||||||
|
# "state" and "args" are the same address, arg1
|
||||||
|
# len is arg2
|
||||||
|
call sha256_x8_avx2
|
||||||
|
|
||||||
|
# state and idx are intact
|
||||||
|
|
||||||
|
len_is_0:
|
||||||
|
# process completed job "idx"
|
||||||
|
imul $_LANE_DATA_size, idx, lane_data
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
|
||||||
|
mov _job_in_lane(lane_data), job_rax
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
movq $0, _job_in_lane(lane_data)
|
||||||
|
movl $STS_COMPLETED, _status(job_rax)
|
||||||
|
shl $4, unused_lanes
|
||||||
|
or idx, unused_lanes
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
|
||||||
|
movl $0xFFFFFFFF, _lens(state,idx,4)
|
||||||
|
|
||||||
|
vmovd _args_digest(state, idx, 4), %xmm0
|
||||||
|
vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
|
||||||
|
vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
|
||||||
|
vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
|
||||||
|
vmovd _args_digest+4*32(state, idx, 4), %xmm1
|
||||||
|
|
||||||
|
vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
|
||||||
|
vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
|
||||||
|
vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
|
||||||
|
|
||||||
|
vmovdqu %xmm0, _result_digest(job_rax)
|
||||||
|
vmovdqu %xmm1, _result_digest+1*16(job_rax)
|
||||||
|
|
||||||
|
return:
|
||||||
|
pop %r12
|
||||||
|
pop %rbx
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
|
||||||
|
return_null:
|
||||||
|
xor job_rax, job_rax
|
||||||
|
jmp return
|
||||||
|
|
||||||
|
ENDPROC(sha256_mb_mgr_submit_avx2)
|
||||||
|
|
||||||
|
.data
|
||||||
|
|
||||||
|
.align 16
|
||||||
|
clear_low_nibble:
|
||||||
|
.octa 0x000000000000000000000000FFFFFFF0
|
|
@ -0,0 +1,593 @@
|
||||||
|
/*
|
||||||
|
* Multi-buffer SHA256 algorithm hash compute routine
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include "sha256_mb_mgr_datastruct.S"
|
||||||
|
|
||||||
|
## code to compute oct SHA256 using SSE-256
|
||||||
|
## outer calling routine takes care of save and restore of XMM registers
|
||||||
|
## Logic designed/laid out by JDG
|
||||||
|
|
||||||
|
## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
|
||||||
|
## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
|
||||||
|
## Linux preserves: rdi rbp r8
|
||||||
|
##
|
||||||
|
## clobbers %ymm0-15
|
||||||
|
|
||||||
|
arg1 = %rdi
|
||||||
|
arg2 = %rsi
|
||||||
|
reg3 = %rcx
|
||||||
|
reg4 = %rdx
|
||||||
|
|
||||||
|
# Common definitions
|
||||||
|
STATE = arg1
|
||||||
|
INP_SIZE = arg2
|
||||||
|
|
||||||
|
IDX = %rax
|
||||||
|
ROUND = %rbx
|
||||||
|
TBL = reg3
|
||||||
|
|
||||||
|
inp0 = %r9
|
||||||
|
inp1 = %r10
|
||||||
|
inp2 = %r11
|
||||||
|
inp3 = %r12
|
||||||
|
inp4 = %r13
|
||||||
|
inp5 = %r14
|
||||||
|
inp6 = %r15
|
||||||
|
inp7 = reg4
|
||||||
|
|
||||||
|
a = %ymm0
|
||||||
|
b = %ymm1
|
||||||
|
c = %ymm2
|
||||||
|
d = %ymm3
|
||||||
|
e = %ymm4
|
||||||
|
f = %ymm5
|
||||||
|
g = %ymm6
|
||||||
|
h = %ymm7
|
||||||
|
|
||||||
|
T1 = %ymm8
|
||||||
|
|
||||||
|
a0 = %ymm12
|
||||||
|
a1 = %ymm13
|
||||||
|
a2 = %ymm14
|
||||||
|
TMP = %ymm15
|
||||||
|
TMP0 = %ymm6
|
||||||
|
TMP1 = %ymm7
|
||||||
|
|
||||||
|
TT0 = %ymm8
|
||||||
|
TT1 = %ymm9
|
||||||
|
TT2 = %ymm10
|
||||||
|
TT3 = %ymm11
|
||||||
|
TT4 = %ymm12
|
||||||
|
TT5 = %ymm13
|
||||||
|
TT6 = %ymm14
|
||||||
|
TT7 = %ymm15
|
||||||
|
|
||||||
|
# Define stack usage
|
||||||
|
|
||||||
|
# Assume stack aligned to 32 bytes before call
|
||||||
|
# Therefore FRAMESZ mod 32 must be 32-8 = 24
|
||||||
|
|
||||||
|
#define FRAMESZ 0x388
|
||||||
|
|
||||||
|
#define VMOVPS vmovups
|
||||||
|
|
||||||
|
# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
|
||||||
|
# "transpose" data in {r0...r7} using temps {t0...t1}
|
||||||
|
# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
|
||||||
|
# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
|
||||||
|
# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
|
||||||
|
# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
|
||||||
|
# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
|
||||||
|
# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
|
||||||
|
# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
|
||||||
|
# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
|
||||||
|
# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
|
||||||
|
#
|
||||||
|
# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
|
||||||
|
# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
|
||||||
|
# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
|
||||||
|
# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
|
||||||
|
# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
|
||||||
|
# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
|
||||||
|
# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
|
||||||
|
# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
|
||||||
|
# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
|
||||||
|
#
|
||||||
|
|
||||||
|
.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
|
||||||
|
# process top half (r0..r3) {a...d}
|
||||||
|
vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
|
||||||
|
vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
|
||||||
|
vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
|
||||||
|
vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
|
||||||
|
vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
|
||||||
|
vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
|
||||||
|
vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
|
||||||
|
vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
|
||||||
|
|
||||||
|
# use r2 in place of t0
|
||||||
|
# process bottom half (r4..r7) {e...h}
|
||||||
|
vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
|
||||||
|
vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
|
||||||
|
vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
|
||||||
|
vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
|
||||||
|
vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
|
||||||
|
vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
|
||||||
|
vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
|
||||||
|
vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
|
||||||
|
|
||||||
|
vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
|
||||||
|
vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
|
||||||
|
vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
|
||||||
|
vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
|
||||||
|
vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
|
||||||
|
vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
|
||||||
|
vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
|
||||||
|
vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
|
||||||
|
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro ROTATE_ARGS
|
||||||
|
TMP_ = h
|
||||||
|
h = g
|
||||||
|
g = f
|
||||||
|
f = e
|
||||||
|
e = d
|
||||||
|
d = c
|
||||||
|
c = b
|
||||||
|
b = a
|
||||||
|
a = TMP_
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro _PRORD reg imm tmp
|
||||||
|
vpslld $(32-\imm),\reg,\tmp
|
||||||
|
vpsrld $\imm,\reg, \reg
|
||||||
|
vpor \tmp,\reg, \reg
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# PRORD_nd reg, imm, tmp, src
|
||||||
|
.macro _PRORD_nd reg imm tmp src
|
||||||
|
vpslld $(32-\imm), \src, \tmp
|
||||||
|
vpsrld $\imm, \src, \reg
|
||||||
|
vpor \tmp, \reg, \reg
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# PRORD dst/src, amt
|
||||||
|
.macro PRORD reg imm
|
||||||
|
_PRORD \reg,\imm,TMP
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# PRORD_nd dst, src, amt
|
||||||
|
.macro PRORD_nd reg tmp imm
|
||||||
|
_PRORD_nd \reg, \imm, TMP, \tmp
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# arguments passed implicitly in preprocessor symbols i, a...h
|
||||||
|
.macro ROUND_00_15 _T1 i
|
||||||
|
PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
|
||||||
|
|
||||||
|
vpxor g, f, a2 # ch: a2 = f^g
|
||||||
|
vpand e,a2, a2 # ch: a2 = (f^g)&e
|
||||||
|
vpxor g, a2, a2 # a2 = ch
|
||||||
|
|
||||||
|
PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
|
||||||
|
|
||||||
|
vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
|
||||||
|
vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
|
||||||
|
vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
|
||||||
|
PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
|
||||||
|
vpaddd a2, h, h # h = h + ch
|
||||||
|
PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
|
||||||
|
vpaddd \_T1,h, h # h = h + ch + W + K
|
||||||
|
vpxor a1, a0, a0 # a0 = sigma1
|
||||||
|
PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
|
||||||
|
vpxor c, a, \_T1 # maj: T1 = a^c
|
||||||
|
add $SZ8, ROUND # ROUND++
|
||||||
|
vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
|
||||||
|
vpaddd a0, h, h
|
||||||
|
vpaddd h, d, d
|
||||||
|
vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
|
||||||
|
PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
|
||||||
|
vpxor a1, a2, a2 # a2 = sig0
|
||||||
|
vpand c, a, a1 # maj: a1 = a&c
|
||||||
|
vpor \_T1, a1, a1 # a1 = maj
|
||||||
|
vpaddd a1, h, h # h = h + ch + W + K + maj
|
||||||
|
vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
|
||||||
|
ROTATE_ARGS
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# arguments passed implicitly in preprocessor symbols i, a...h
|
||||||
|
.macro ROUND_16_XX _T1 i
|
||||||
|
vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
|
||||||
|
vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
|
||||||
|
vmovdqu \_T1, a0
|
||||||
|
PRORD \_T1,11
|
||||||
|
vmovdqu a1, a2
|
||||||
|
PRORD a1,2
|
||||||
|
vpxor a0, \_T1, \_T1
|
||||||
|
PRORD \_T1, 7
|
||||||
|
vpxor a2, a1, a1
|
||||||
|
PRORD a1, 17
|
||||||
|
vpsrld $3, a0, a0
|
||||||
|
vpxor a0, \_T1, \_T1
|
||||||
|
vpsrld $10, a2, a2
|
||||||
|
vpxor a2, a1, a1
|
||||||
|
vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
|
||||||
|
vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
|
||||||
|
vpaddd a1, \_T1, \_T1
|
||||||
|
|
||||||
|
ROUND_00_15 \_T1,\i
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# SHA256_ARGS:
|
||||||
|
# UINT128 digest[8]; // transposed digests
|
||||||
|
# UINT8 *data_ptr[4];
|
||||||
|
|
||||||
|
# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
|
||||||
|
# arg 1 : STATE : pointer to array of pointers to input data
|
||||||
|
# arg 2 : INP_SIZE : size of input in blocks
|
||||||
|
# general registers preserved in outer calling routine
|
||||||
|
# outer calling routine saves all the XMM registers
|
||||||
|
# save rsp, allocate 32-byte aligned for local variables
|
||||||
|
ENTRY(sha256_x8_avx2)
|
||||||
|
|
||||||
|
# save callee-saved clobbered registers to comply with C function ABI
|
||||||
|
push %r12
|
||||||
|
push %r13
|
||||||
|
push %r14
|
||||||
|
push %r15
|
||||||
|
|
||||||
|
mov %rsp, IDX
|
||||||
|
sub $FRAMESZ, %rsp
|
||||||
|
and $~0x1F, %rsp
|
||||||
|
mov IDX, _rsp(%rsp)
|
||||||
|
|
||||||
|
# Load the pre-transposed incoming digest.
|
||||||
|
vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
|
||||||
|
vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
|
||||||
|
vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
|
||||||
|
vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
|
||||||
|
vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
|
||||||
|
vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
|
||||||
|
vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
|
||||||
|
vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
|
||||||
|
|
||||||
|
lea K256_8(%rip),TBL
|
||||||
|
|
||||||
|
# load the address of each of the 4 message lanes
|
||||||
|
# getting ready to transpose input onto stack
|
||||||
|
mov _args_data_ptr+0*PTR_SZ(STATE),inp0
|
||||||
|
mov _args_data_ptr+1*PTR_SZ(STATE),inp1
|
||||||
|
mov _args_data_ptr+2*PTR_SZ(STATE),inp2
|
||||||
|
mov _args_data_ptr+3*PTR_SZ(STATE),inp3
|
||||||
|
mov _args_data_ptr+4*PTR_SZ(STATE),inp4
|
||||||
|
mov _args_data_ptr+5*PTR_SZ(STATE),inp5
|
||||||
|
mov _args_data_ptr+6*PTR_SZ(STATE),inp6
|
||||||
|
mov _args_data_ptr+7*PTR_SZ(STATE),inp7
|
||||||
|
|
||||||
|
xor IDX, IDX
|
||||||
|
lloop:
|
||||||
|
xor ROUND, ROUND
|
||||||
|
|
||||||
|
# save old digest
|
||||||
|
vmovdqu a, _digest(%rsp)
|
||||||
|
vmovdqu b, _digest+1*SZ8(%rsp)
|
||||||
|
vmovdqu c, _digest+2*SZ8(%rsp)
|
||||||
|
vmovdqu d, _digest+3*SZ8(%rsp)
|
||||||
|
vmovdqu e, _digest+4*SZ8(%rsp)
|
||||||
|
vmovdqu f, _digest+5*SZ8(%rsp)
|
||||||
|
vmovdqu g, _digest+6*SZ8(%rsp)
|
||||||
|
vmovdqu h, _digest+7*SZ8(%rsp)
|
||||||
|
i = 0
|
||||||
|
.rep 2
|
||||||
|
VMOVPS i*32(inp0, IDX), TT0
|
||||||
|
VMOVPS i*32(inp1, IDX), TT1
|
||||||
|
VMOVPS i*32(inp2, IDX), TT2
|
||||||
|
VMOVPS i*32(inp3, IDX), TT3
|
||||||
|
VMOVPS i*32(inp4, IDX), TT4
|
||||||
|
VMOVPS i*32(inp5, IDX), TT5
|
||||||
|
VMOVPS i*32(inp6, IDX), TT6
|
||||||
|
VMOVPS i*32(inp7, IDX), TT7
|
||||||
|
vmovdqu g, _ytmp(%rsp)
|
||||||
|
vmovdqu h, _ytmp+1*SZ8(%rsp)
|
||||||
|
TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
|
||||||
|
vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
|
||||||
|
vmovdqu _ytmp(%rsp), g
|
||||||
|
vpshufb TMP1, TT0, TT0
|
||||||
|
vpshufb TMP1, TT1, TT1
|
||||||
|
vpshufb TMP1, TT2, TT2
|
||||||
|
vpshufb TMP1, TT3, TT3
|
||||||
|
vpshufb TMP1, TT4, TT4
|
||||||
|
vpshufb TMP1, TT5, TT5
|
||||||
|
vpshufb TMP1, TT6, TT6
|
||||||
|
vpshufb TMP1, TT7, TT7
|
||||||
|
vmovdqu _ytmp+1*SZ8(%rsp), h
|
||||||
|
vmovdqu TT4, _ytmp(%rsp)
|
||||||
|
vmovdqu TT5, _ytmp+1*SZ8(%rsp)
|
||||||
|
vmovdqu TT6, _ytmp+2*SZ8(%rsp)
|
||||||
|
vmovdqu TT7, _ytmp+3*SZ8(%rsp)
|
||||||
|
ROUND_00_15 TT0,(i*8+0)
|
||||||
|
vmovdqu _ytmp(%rsp), TT0
|
||||||
|
ROUND_00_15 TT1,(i*8+1)
|
||||||
|
vmovdqu _ytmp+1*SZ8(%rsp), TT1
|
||||||
|
ROUND_00_15 TT2,(i*8+2)
|
||||||
|
vmovdqu _ytmp+2*SZ8(%rsp), TT2
|
||||||
|
ROUND_00_15 TT3,(i*8+3)
|
||||||
|
vmovdqu _ytmp+3*SZ8(%rsp), TT3
|
||||||
|
ROUND_00_15 TT0,(i*8+4)
|
||||||
|
ROUND_00_15 TT1,(i*8+5)
|
||||||
|
ROUND_00_15 TT2,(i*8+6)
|
||||||
|
ROUND_00_15 TT3,(i*8+7)
|
||||||
|
i = (i+1)
|
||||||
|
.endr
|
||||||
|
add $64, IDX
|
||||||
|
i = (i*8)
|
||||||
|
|
||||||
|
jmp Lrounds_16_xx
|
||||||
|
.align 16
|
||||||
|
Lrounds_16_xx:
|
||||||
|
.rep 16
|
||||||
|
ROUND_16_XX T1, i
|
||||||
|
i = (i+1)
|
||||||
|
.endr
|
||||||
|
|
||||||
|
cmp $ROUNDS,ROUND
|
||||||
|
jb Lrounds_16_xx
|
||||||
|
|
||||||
|
# add old digest
|
||||||
|
vpaddd _digest+0*SZ8(%rsp), a, a
|
||||||
|
vpaddd _digest+1*SZ8(%rsp), b, b
|
||||||
|
vpaddd _digest+2*SZ8(%rsp), c, c
|
||||||
|
vpaddd _digest+3*SZ8(%rsp), d, d
|
||||||
|
vpaddd _digest+4*SZ8(%rsp), e, e
|
||||||
|
vpaddd _digest+5*SZ8(%rsp), f, f
|
||||||
|
vpaddd _digest+6*SZ8(%rsp), g, g
|
||||||
|
vpaddd _digest+7*SZ8(%rsp), h, h
|
||||||
|
|
||||||
|
sub $1, INP_SIZE # unit is blocks
|
||||||
|
jne lloop
|
||||||
|
|
||||||
|
# write back to memory (state object) the transposed digest
|
||||||
|
vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
|
||||||
|
|
||||||
|
# update input pointers
|
||||||
|
add IDX, inp0
|
||||||
|
mov inp0, _args_data_ptr+0*8(STATE)
|
||||||
|
add IDX, inp1
|
||||||
|
mov inp1, _args_data_ptr+1*8(STATE)
|
||||||
|
add IDX, inp2
|
||||||
|
mov inp2, _args_data_ptr+2*8(STATE)
|
||||||
|
add IDX, inp3
|
||||||
|
mov inp3, _args_data_ptr+3*8(STATE)
|
||||||
|
add IDX, inp4
|
||||||
|
mov inp4, _args_data_ptr+4*8(STATE)
|
||||||
|
add IDX, inp5
|
||||||
|
mov inp5, _args_data_ptr+5*8(STATE)
|
||||||
|
add IDX, inp6
|
||||||
|
mov inp6, _args_data_ptr+6*8(STATE)
|
||||||
|
add IDX, inp7
|
||||||
|
mov inp7, _args_data_ptr+7*8(STATE)
|
||||||
|
|
||||||
|
# Postamble
|
||||||
|
mov _rsp(%rsp), %rsp
|
||||||
|
|
||||||
|
# restore callee-saved clobbered registers
|
||||||
|
pop %r15
|
||||||
|
pop %r14
|
||||||
|
pop %r13
|
||||||
|
pop %r12
|
||||||
|
|
||||||
|
ret
|
||||||
|
ENDPROC(sha256_x8_avx2)
|
||||||
|
.data
|
||||||
|
.align 64
|
||||||
|
K256_8:
|
||||||
|
.octa 0x428a2f98428a2f98428a2f98428a2f98
|
||||||
|
.octa 0x428a2f98428a2f98428a2f98428a2f98
|
||||||
|
.octa 0x71374491713744917137449171374491
|
||||||
|
.octa 0x71374491713744917137449171374491
|
||||||
|
.octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
|
||||||
|
.octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
|
||||||
|
.octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
|
||||||
|
.octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
|
||||||
|
.octa 0x3956c25b3956c25b3956c25b3956c25b
|
||||||
|
.octa 0x3956c25b3956c25b3956c25b3956c25b
|
||||||
|
.octa 0x59f111f159f111f159f111f159f111f1
|
||||||
|
.octa 0x59f111f159f111f159f111f159f111f1
|
||||||
|
.octa 0x923f82a4923f82a4923f82a4923f82a4
|
||||||
|
.octa 0x923f82a4923f82a4923f82a4923f82a4
|
||||||
|
.octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
|
||||||
|
.octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
|
||||||
|
.octa 0xd807aa98d807aa98d807aa98d807aa98
|
||||||
|
.octa 0xd807aa98d807aa98d807aa98d807aa98
|
||||||
|
.octa 0x12835b0112835b0112835b0112835b01
|
||||||
|
.octa 0x12835b0112835b0112835b0112835b01
|
||||||
|
.octa 0x243185be243185be243185be243185be
|
||||||
|
.octa 0x243185be243185be243185be243185be
|
||||||
|
.octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
|
||||||
|
.octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
|
||||||
|
.octa 0x72be5d7472be5d7472be5d7472be5d74
|
||||||
|
.octa 0x72be5d7472be5d7472be5d7472be5d74
|
||||||
|
.octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
|
||||||
|
.octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
|
||||||
|
.octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
|
||||||
|
.octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
|
||||||
|
.octa 0xc19bf174c19bf174c19bf174c19bf174
|
||||||
|
.octa 0xc19bf174c19bf174c19bf174c19bf174
|
||||||
|
.octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
|
||||||
|
.octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
|
||||||
|
.octa 0xefbe4786efbe4786efbe4786efbe4786
|
||||||
|
.octa 0xefbe4786efbe4786efbe4786efbe4786
|
||||||
|
.octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
|
||||||
|
.octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
|
||||||
|
.octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
|
||||||
|
.octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
|
||||||
|
.octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
|
||||||
|
.octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
|
||||||
|
.octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
|
||||||
|
.octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
|
||||||
|
.octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
|
||||||
|
.octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
|
||||||
|
.octa 0x76f988da76f988da76f988da76f988da
|
||||||
|
.octa 0x76f988da76f988da76f988da76f988da
|
||||||
|
.octa 0x983e5152983e5152983e5152983e5152
|
||||||
|
.octa 0x983e5152983e5152983e5152983e5152
|
||||||
|
.octa 0xa831c66da831c66da831c66da831c66d
|
||||||
|
.octa 0xa831c66da831c66da831c66da831c66d
|
||||||
|
.octa 0xb00327c8b00327c8b00327c8b00327c8
|
||||||
|
.octa 0xb00327c8b00327c8b00327c8b00327c8
|
||||||
|
.octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
|
||||||
|
.octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
|
||||||
|
.octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
|
||||||
|
.octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
|
||||||
|
.octa 0xd5a79147d5a79147d5a79147d5a79147
|
||||||
|
.octa 0xd5a79147d5a79147d5a79147d5a79147
|
||||||
|
.octa 0x06ca635106ca635106ca635106ca6351
|
||||||
|
.octa 0x06ca635106ca635106ca635106ca6351
|
||||||
|
.octa 0x14292967142929671429296714292967
|
||||||
|
.octa 0x14292967142929671429296714292967
|
||||||
|
.octa 0x27b70a8527b70a8527b70a8527b70a85
|
||||||
|
.octa 0x27b70a8527b70a8527b70a8527b70a85
|
||||||
|
.octa 0x2e1b21382e1b21382e1b21382e1b2138
|
||||||
|
.octa 0x2e1b21382e1b21382e1b21382e1b2138
|
||||||
|
.octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
|
||||||
|
.octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
|
||||||
|
.octa 0x53380d1353380d1353380d1353380d13
|
||||||
|
.octa 0x53380d1353380d1353380d1353380d13
|
||||||
|
.octa 0x650a7354650a7354650a7354650a7354
|
||||||
|
.octa 0x650a7354650a7354650a7354650a7354
|
||||||
|
.octa 0x766a0abb766a0abb766a0abb766a0abb
|
||||||
|
.octa 0x766a0abb766a0abb766a0abb766a0abb
|
||||||
|
.octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
|
||||||
|
.octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
|
||||||
|
.octa 0x92722c8592722c8592722c8592722c85
|
||||||
|
.octa 0x92722c8592722c8592722c8592722c85
|
||||||
|
.octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
|
||||||
|
.octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
|
||||||
|
.octa 0xa81a664ba81a664ba81a664ba81a664b
|
||||||
|
.octa 0xa81a664ba81a664ba81a664ba81a664b
|
||||||
|
.octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
|
||||||
|
.octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
|
||||||
|
.octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
|
||||||
|
.octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
|
||||||
|
.octa 0xd192e819d192e819d192e819d192e819
|
||||||
|
.octa 0xd192e819d192e819d192e819d192e819
|
||||||
|
.octa 0xd6990624d6990624d6990624d6990624
|
||||||
|
.octa 0xd6990624d6990624d6990624d6990624
|
||||||
|
.octa 0xf40e3585f40e3585f40e3585f40e3585
|
||||||
|
.octa 0xf40e3585f40e3585f40e3585f40e3585
|
||||||
|
.octa 0x106aa070106aa070106aa070106aa070
|
||||||
|
.octa 0x106aa070106aa070106aa070106aa070
|
||||||
|
.octa 0x19a4c11619a4c11619a4c11619a4c116
|
||||||
|
.octa 0x19a4c11619a4c11619a4c11619a4c116
|
||||||
|
.octa 0x1e376c081e376c081e376c081e376c08
|
||||||
|
.octa 0x1e376c081e376c081e376c081e376c08
|
||||||
|
.octa 0x2748774c2748774c2748774c2748774c
|
||||||
|
.octa 0x2748774c2748774c2748774c2748774c
|
||||||
|
.octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
|
||||||
|
.octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
|
||||||
|
.octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
|
||||||
|
.octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
|
||||||
|
.octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
|
||||||
|
.octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
|
||||||
|
.octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
|
||||||
|
.octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
|
||||||
|
.octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
|
||||||
|
.octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
|
||||||
|
.octa 0x748f82ee748f82ee748f82ee748f82ee
|
||||||
|
.octa 0x748f82ee748f82ee748f82ee748f82ee
|
||||||
|
.octa 0x78a5636f78a5636f78a5636f78a5636f
|
||||||
|
.octa 0x78a5636f78a5636f78a5636f78a5636f
|
||||||
|
.octa 0x84c8781484c8781484c8781484c87814
|
||||||
|
.octa 0x84c8781484c8781484c8781484c87814
|
||||||
|
.octa 0x8cc702088cc702088cc702088cc70208
|
||||||
|
.octa 0x8cc702088cc702088cc702088cc70208
|
||||||
|
.octa 0x90befffa90befffa90befffa90befffa
|
||||||
|
.octa 0x90befffa90befffa90befffa90befffa
|
||||||
|
.octa 0xa4506ceba4506ceba4506ceba4506ceb
|
||||||
|
.octa 0xa4506ceba4506ceba4506ceba4506ceb
|
||||||
|
.octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
|
||||||
|
.octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
|
||||||
|
.octa 0xc67178f2c67178f2c67178f2c67178f2
|
||||||
|
.octa 0xc67178f2c67178f2c67178f2c67178f2
|
||||||
|
PSHUFFLE_BYTE_FLIP_MASK:
|
||||||
|
.octa 0x0c0d0e0f08090a0b0405060700010203
|
||||||
|
.octa 0x0c0d0e0f08090a0b0405060700010203
|
||||||
|
|
||||||
|
.align 64
|
||||||
|
.global K256
|
||||||
|
K256:
|
||||||
|
.int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||||
|
.int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||||
|
.int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||||
|
.int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||||
|
.int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||||
|
.int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||||
|
.int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||||
|
.int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||||
|
.int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||||
|
.int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||||
|
.int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||||
|
.int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||||
|
.int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||||
|
.int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||||
|
.int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||||
|
.int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
|
@ -427,4 +427,14 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("sha256");
|
MODULE_ALIAS_CRYPTO("sha256");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha256-ssse3");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha256-avx");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha256-avx2");
|
||||||
MODULE_ALIAS_CRYPTO("sha224");
|
MODULE_ALIAS_CRYPTO("sha224");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha224-ssse3");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha224-avx");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha224-avx2");
|
||||||
|
#ifdef CONFIG_AS_SHA256_NI
|
||||||
|
MODULE_ALIAS_CRYPTO("sha256-ni");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha224-ni");
|
||||||
|
#endif
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
#
|
||||||
|
# Arch-specific CryptoAPI modules.
|
||||||
|
#
|
||||||
|
|
||||||
|
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
|
||||||
|
$(comma)4)$(comma)%ymm2,yes,no)
|
||||||
|
ifeq ($(avx2_supported),yes)
|
||||||
|
obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
|
||||||
|
sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
|
||||||
|
sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
|
||||||
|
endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,130 @@
|
||||||
|
/*
|
||||||
|
* Header file for multi buffer SHA512 context
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _SHA_MB_CTX_INTERNAL_H
|
||||||
|
#define _SHA_MB_CTX_INTERNAL_H
|
||||||
|
|
||||||
|
#include "sha512_mb_mgr.h"
|
||||||
|
|
||||||
|
#define HASH_UPDATE 0x00
|
||||||
|
#define HASH_FIRST 0x01
|
||||||
|
#define HASH_LAST 0x02
|
||||||
|
#define HASH_ENTIRE 0x03
|
||||||
|
#define HASH_DONE 0x04
|
||||||
|
#define HASH_FINAL 0x08
|
||||||
|
|
||||||
|
#define HASH_CTX_STS_IDLE 0x00
|
||||||
|
#define HASH_CTX_STS_PROCESSING 0x01
|
||||||
|
#define HASH_CTX_STS_LAST 0x02
|
||||||
|
#define HASH_CTX_STS_COMPLETE 0x04
|
||||||
|
|
||||||
|
enum hash_ctx_error {
|
||||||
|
HASH_CTX_ERROR_NONE = 0,
|
||||||
|
HASH_CTX_ERROR_INVALID_FLAGS = -1,
|
||||||
|
HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
|
||||||
|
HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define hash_ctx_user_data(ctx) ((ctx)->user_data)
|
||||||
|
#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
|
||||||
|
#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
|
||||||
|
#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
|
||||||
|
#define hash_ctx_status(ctx) ((ctx)->status)
|
||||||
|
#define hash_ctx_error(ctx) ((ctx)->error)
|
||||||
|
#define hash_ctx_init(ctx) \
|
||||||
|
do { \
|
||||||
|
(ctx)->error = HASH_CTX_ERROR_NONE; \
|
||||||
|
(ctx)->status = HASH_CTX_STS_COMPLETE; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/* Hash Constants and Typedefs */
|
||||||
|
#define SHA512_DIGEST_LENGTH 8
|
||||||
|
#define SHA512_LOG2_BLOCK_SIZE 7
|
||||||
|
|
||||||
|
#define SHA512_PADLENGTHFIELD_SIZE 16
|
||||||
|
|
||||||
|
#ifdef SHA_MB_DEBUG
|
||||||
|
#define assert(expr) \
|
||||||
|
do { \
|
||||||
|
if (unlikely(!(expr))) { \
|
||||||
|
printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
|
||||||
|
#expr, __FILE__, __func__, __LINE__); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#else
|
||||||
|
#define assert(expr) do {} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct sha512_ctx_mgr {
|
||||||
|
struct sha512_mb_mgr mgr;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
|
||||||
|
|
||||||
|
struct sha512_hash_ctx {
|
||||||
|
/* Must be at struct offset 0 */
|
||||||
|
struct job_sha512 job;
|
||||||
|
/* status flag */
|
||||||
|
int status;
|
||||||
|
/* error flag */
|
||||||
|
int error;
|
||||||
|
|
||||||
|
uint32_t total_length;
|
||||||
|
const void *incoming_buffer;
|
||||||
|
uint32_t incoming_buffer_length;
|
||||||
|
uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
|
||||||
|
uint32_t partial_block_buffer_length;
|
||||||
|
void *user_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,104 @@
|
||||||
|
/*
|
||||||
|
* Header file for multi buffer SHA512 algorithm manager
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __SHA_MB_MGR_H
|
||||||
|
#define __SHA_MB_MGR_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#define NUM_SHA512_DIGEST_WORDS 8
|
||||||
|
|
||||||
|
enum job_sts {STS_UNKNOWN = 0,
|
||||||
|
STS_BEING_PROCESSED = 1,
|
||||||
|
STS_COMPLETED = 2,
|
||||||
|
STS_INTERNAL_ERROR = 3,
|
||||||
|
STS_ERROR = 4
|
||||||
|
};
|
||||||
|
|
||||||
|
struct job_sha512 {
|
||||||
|
u8 *buffer;
|
||||||
|
u64 len;
|
||||||
|
u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
|
||||||
|
enum job_sts status;
|
||||||
|
void *user_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sha512_args_x4 {
|
||||||
|
uint64_t digest[8][4];
|
||||||
|
uint8_t *data_ptr[4];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sha512_lane_data {
|
||||||
|
struct job_sha512 *job_in_lane;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sha512_mb_mgr {
|
||||||
|
struct sha512_args_x4 args;
|
||||||
|
|
||||||
|
uint64_t lens[4];
|
||||||
|
|
||||||
|
/* each byte is index (0...7) of unused lanes */
|
||||||
|
uint64_t unused_lanes;
|
||||||
|
/* byte 4 is set to FF as a flag */
|
||||||
|
struct sha512_lane_data ldata[4];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define SHA512_MB_MGR_NUM_LANES_AVX2 4
|
||||||
|
|
||||||
|
void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
|
||||||
|
struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
|
||||||
|
struct job_sha512 *job);
|
||||||
|
struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
|
||||||
|
struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,281 @@
|
||||||
|
/*
|
||||||
|
* Header file for multi buffer SHA256 algorithm data structure
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
# Macros for defining data structures
|
||||||
|
|
||||||
|
# Usage example
|
||||||
|
|
||||||
|
#START_FIELDS # JOB_AES
|
||||||
|
### name size align
|
||||||
|
#FIELD _plaintext, 8, 8 # pointer to plaintext
|
||||||
|
#FIELD _ciphertext, 8, 8 # pointer to ciphertext
|
||||||
|
#FIELD _IV, 16, 8 # IV
|
||||||
|
#FIELD _keys, 8, 8 # pointer to keys
|
||||||
|
#FIELD _len, 4, 4 # length in bytes
|
||||||
|
#FIELD _status, 4, 4 # status enumeration
|
||||||
|
#FIELD _user_data, 8, 8 # pointer to user data
|
||||||
|
#UNION _union, size1, align1, \
|
||||||
|
# size2, align2, \
|
||||||
|
# size3, align3, \
|
||||||
|
# ...
|
||||||
|
#END_FIELDS
|
||||||
|
#%assign _JOB_AES_size _FIELD_OFFSET
|
||||||
|
#%assign _JOB_AES_align _STRUCT_ALIGN
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
|
||||||
|
# Alternate "struc-like" syntax:
|
||||||
|
# STRUCT job_aes2
|
||||||
|
# RES_Q .plaintext, 1
|
||||||
|
# RES_Q .ciphertext, 1
|
||||||
|
# RES_DQ .IV, 1
|
||||||
|
# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
|
||||||
|
# RES_U .union, size1, align1, \
|
||||||
|
# size2, align2, \
|
||||||
|
# ...
|
||||||
|
# ENDSTRUCT
|
||||||
|
# # Following only needed if nesting
|
||||||
|
# %assign job_aes2_size _FIELD_OFFSET
|
||||||
|
# %assign job_aes2_align _STRUCT_ALIGN
|
||||||
|
#
|
||||||
|
# RES_* macros take a name, a count and an optional alignment.
|
||||||
|
# The count in in terms of the base size of the macro, and the
|
||||||
|
# default alignment is the base size.
|
||||||
|
# The macros are:
|
||||||
|
# Macro Base size
|
||||||
|
# RES_B 1
|
||||||
|
# RES_W 2
|
||||||
|
# RES_D 4
|
||||||
|
# RES_Q 8
|
||||||
|
# RES_DQ 16
|
||||||
|
# RES_Y 32
|
||||||
|
# RES_Z 64
|
||||||
|
#
|
||||||
|
# RES_U defines a union. It's arguments are a name and two or more
|
||||||
|
# pairs of "size, alignment"
|
||||||
|
#
|
||||||
|
# The two assigns are only needed if this structure is being nested
|
||||||
|
# within another. Even if the assigns are not done, one can still use
|
||||||
|
# STRUCT_NAME_size as the size of the structure.
|
||||||
|
#
|
||||||
|
# Note that for nesting, you still need to assign to STRUCT_NAME_size.
|
||||||
|
#
|
||||||
|
# The differences between this and using "struc" directly are that each
|
||||||
|
# type is implicitly aligned to its natural length (although this can be
|
||||||
|
# over-ridden with an explicit third parameter), and that the structure
|
||||||
|
# is padded at the end to its overall alignment.
|
||||||
|
#
|
||||||
|
|
||||||
|
#########################################################################
|
||||||
|
|
||||||
|
#ifndef _DATASTRUCT_ASM_
|
||||||
|
#define _DATASTRUCT_ASM_
|
||||||
|
|
||||||
|
#define PTR_SZ 8
|
||||||
|
#define SHA512_DIGEST_WORD_SIZE 8
|
||||||
|
#define SHA512_MB_MGR_NUM_LANES_AVX2 4
|
||||||
|
#define NUM_SHA512_DIGEST_WORDS 8
|
||||||
|
#define SZ4 4*SHA512_DIGEST_WORD_SIZE
|
||||||
|
#define ROUNDS 80*SZ4
|
||||||
|
#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
|
||||||
|
|
||||||
|
# START_FIELDS
|
||||||
|
.macro START_FIELDS
|
||||||
|
_FIELD_OFFSET = 0
|
||||||
|
_STRUCT_ALIGN = 0
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# FIELD name size align
|
||||||
|
.macro FIELD name size align
|
||||||
|
_FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
|
||||||
|
\name = _FIELD_OFFSET
|
||||||
|
_FIELD_OFFSET = _FIELD_OFFSET + (\size)
|
||||||
|
.if (\align > _STRUCT_ALIGN)
|
||||||
|
_STRUCT_ALIGN = \align
|
||||||
|
.endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# END_FIELDS
|
||||||
|
.macro END_FIELDS
|
||||||
|
_FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro STRUCT p1
|
||||||
|
START_FIELDS
|
||||||
|
.struc \p1
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro ENDSTRUCT
|
||||||
|
tmp = _FIELD_OFFSET
|
||||||
|
END_FIELDS
|
||||||
|
tmp = (_FIELD_OFFSET - ##tmp)
|
||||||
|
.if (tmp > 0)
|
||||||
|
.lcomm tmp
|
||||||
|
.endm
|
||||||
|
|
||||||
|
## RES_int name size align
|
||||||
|
.macro RES_int p1 p2 p3
|
||||||
|
name = \p1
|
||||||
|
size = \p2
|
||||||
|
align = .\p3
|
||||||
|
|
||||||
|
_FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
|
||||||
|
.align align
|
||||||
|
.lcomm name size
|
||||||
|
_FIELD_OFFSET = _FIELD_OFFSET + (size)
|
||||||
|
.if (align > _STRUCT_ALIGN)
|
||||||
|
_STRUCT_ALIGN = align
|
||||||
|
.endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_B name, size [, align]
|
||||||
|
.macro RES_B _name, _size, _align=1
|
||||||
|
RES_int _name _size _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_W name, size [, align]
|
||||||
|
.macro RES_W _name, _size, _align=2
|
||||||
|
RES_int _name 2*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_D name, size [, align]
|
||||||
|
.macro RES_D _name, _size, _align=4
|
||||||
|
RES_int _name 4*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_Q name, size [, align]
|
||||||
|
.macro RES_Q _name, _size, _align=8
|
||||||
|
RES_int _name 8*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_DQ name, size [, align]
|
||||||
|
.macro RES_DQ _name, _size, _align=16
|
||||||
|
RES_int _name 16*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_Y name, size [, align]
|
||||||
|
.macro RES_Y _name, _size, _align=32
|
||||||
|
RES_int _name 32*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# macro RES_Z name, size [, align]
|
||||||
|
.macro RES_Z _name, _size, _align=64
|
||||||
|
RES_int _name 64*(_size) _align
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
### Define SHA512 Out Of Order Data Structures
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
START_FIELDS # LANE_DATA
|
||||||
|
### name size align
|
||||||
|
FIELD _job_in_lane, 8, 8 # pointer to job object
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_LANE_DATA_size = _FIELD_OFFSET
|
||||||
|
_LANE_DATA_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
####################################################################
|
||||||
|
|
||||||
|
START_FIELDS # SHA512_ARGS_X4
|
||||||
|
### name size align
|
||||||
|
FIELD _digest, 8*8*4, 4 # transposed digest
|
||||||
|
FIELD _data_ptr, 8*4, 8 # array of pointers to data
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_SHA512_ARGS_X4_size = _FIELD_OFFSET
|
||||||
|
_SHA512_ARGS_X4_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
#####################################################################
|
||||||
|
|
||||||
|
START_FIELDS # MB_MGR
|
||||||
|
### name size align
|
||||||
|
FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
|
||||||
|
FIELD _lens, 8*4, 8
|
||||||
|
FIELD _unused_lanes, 8, 8
|
||||||
|
FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_MB_MGR_size = _FIELD_OFFSET
|
||||||
|
_MB_MGR_align = _STRUCT_ALIGN
|
||||||
|
|
||||||
|
_args_digest = _args + _digest
|
||||||
|
_args_data_ptr = _args + _data_ptr
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
#### Define constants
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
#define STS_UNKNOWN 0
|
||||||
|
#define STS_BEING_PROCESSED 1
|
||||||
|
#define STS_COMPLETED 2
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
#### Define JOB_SHA512 structure
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
START_FIELDS # JOB_SHA512
|
||||||
|
### name size align
|
||||||
|
FIELD _buffer, 8, 8 # pointer to buffer
|
||||||
|
FIELD _len, 8, 8 # length in bytes
|
||||||
|
FIELD _result_digest, 8*8, 32 # Digest (output)
|
||||||
|
FIELD _status, 4, 4
|
||||||
|
FIELD _user_data, 8, 8
|
||||||
|
END_FIELDS
|
||||||
|
|
||||||
|
_JOB_SHA512_size = _FIELD_OFFSET
|
||||||
|
_JOB_SHA512_align = _STRUCT_ALIGN
|
|
@ -0,0 +1,291 @@
|
||||||
|
/*
|
||||||
|
* Flush routine for SHA512 multibuffer
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/frame.h>
|
||||||
|
#include "sha512_mb_mgr_datastruct.S"
|
||||||
|
|
||||||
|
.extern sha512_x4_avx2
|
||||||
|
|
||||||
|
# LINUX register definitions
|
||||||
|
#define arg1 %rdi
|
||||||
|
#define arg2 %rsi
|
||||||
|
|
||||||
|
# idx needs to be other than arg1, arg2, rbx, r12
|
||||||
|
#define idx %rdx
|
||||||
|
|
||||||
|
# Common definitions
|
||||||
|
#define state arg1
|
||||||
|
#define job arg2
|
||||||
|
#define len2 arg2
|
||||||
|
|
||||||
|
#define unused_lanes %rbx
|
||||||
|
#define lane_data %rbx
|
||||||
|
#define tmp2 %rbx
|
||||||
|
|
||||||
|
#define job_rax %rax
|
||||||
|
#define tmp1 %rax
|
||||||
|
#define size_offset %rax
|
||||||
|
#define tmp %rax
|
||||||
|
#define start_offset %rax
|
||||||
|
|
||||||
|
#define tmp3 arg1
|
||||||
|
|
||||||
|
#define extra_blocks arg2
|
||||||
|
#define p arg2
|
||||||
|
|
||||||
|
#define tmp4 %r8
|
||||||
|
#define lens0 %r8
|
||||||
|
|
||||||
|
#define lens1 %r9
|
||||||
|
#define lens2 %r10
|
||||||
|
#define lens3 %r11
|
||||||
|
|
||||||
|
.macro LABEL prefix n
|
||||||
|
\prefix\n\():
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro JNE_SKIP i
|
||||||
|
jne skip_\i
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.altmacro
|
||||||
|
.macro SET_OFFSET _offset
|
||||||
|
offset = \_offset
|
||||||
|
.endm
|
||||||
|
.noaltmacro
|
||||||
|
|
||||||
|
# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
|
||||||
|
# arg 1 : rcx : state
|
||||||
|
ENTRY(sha512_mb_mgr_flush_avx2)
|
||||||
|
FRAME_BEGIN
|
||||||
|
push %rbx
|
||||||
|
|
||||||
|
# If bit (32+3) is set, then all lanes are empty
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
bt $32+7, unused_lanes
|
||||||
|
jc return_null
|
||||||
|
|
||||||
|
# find a lane with a non-null job
|
||||||
|
xor idx, idx
|
||||||
|
offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne one(%rip), idx
|
||||||
|
offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne two(%rip), idx
|
||||||
|
offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
cmovne three(%rip), idx
|
||||||
|
|
||||||
|
# copy idx to empty lanes
|
||||||
|
copy_lane_data:
|
||||||
|
offset = (_args + _data_ptr)
|
||||||
|
mov offset(state,idx,8), tmp
|
||||||
|
|
||||||
|
I = 0
|
||||||
|
.rep 4
|
||||||
|
offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
|
||||||
|
cmpq $0, offset(state)
|
||||||
|
.altmacro
|
||||||
|
JNE_SKIP %I
|
||||||
|
offset = (_args + _data_ptr + 8*I)
|
||||||
|
mov tmp, offset(state)
|
||||||
|
offset = (_lens + 8*I +4)
|
||||||
|
movl $0xFFFFFFFF, offset(state)
|
||||||
|
LABEL skip_ %I
|
||||||
|
I = (I+1)
|
||||||
|
.noaltmacro
|
||||||
|
.endr
|
||||||
|
|
||||||
|
# Find min length
|
||||||
|
mov _lens + 0*8(state),lens0
|
||||||
|
mov lens0,idx
|
||||||
|
mov _lens + 1*8(state),lens1
|
||||||
|
cmp idx,lens1
|
||||||
|
cmovb lens1,idx
|
||||||
|
mov _lens + 2*8(state),lens2
|
||||||
|
cmp idx,lens2
|
||||||
|
cmovb lens2,idx
|
||||||
|
mov _lens + 3*8(state),lens3
|
||||||
|
cmp idx,lens3
|
||||||
|
cmovb lens3,idx
|
||||||
|
mov idx,len2
|
||||||
|
and $0xF,idx
|
||||||
|
and $~0xFF,len2
|
||||||
|
jz len_is_0
|
||||||
|
|
||||||
|
sub len2, lens0
|
||||||
|
sub len2, lens1
|
||||||
|
sub len2, lens2
|
||||||
|
sub len2, lens3
|
||||||
|
shr $32,len2
|
||||||
|
mov lens0, _lens + 0*8(state)
|
||||||
|
mov lens1, _lens + 1*8(state)
|
||||||
|
mov lens2, _lens + 2*8(state)
|
||||||
|
mov lens3, _lens + 3*8(state)
|
||||||
|
|
||||||
|
# "state" and "args" are the same address, arg1
|
||||||
|
# len is arg2
|
||||||
|
call sha512_x4_avx2
|
||||||
|
# state and idx are intact
|
||||||
|
|
||||||
|
len_is_0:
|
||||||
|
# process completed job "idx"
|
||||||
|
imul $_LANE_DATA_size, idx, lane_data
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
|
||||||
|
mov _job_in_lane(lane_data), job_rax
|
||||||
|
movq $0, _job_in_lane(lane_data)
|
||||||
|
movl $STS_COMPLETED, _status(job_rax)
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
shl $8, unused_lanes
|
||||||
|
or idx, unused_lanes
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
|
||||||
|
movl $0xFFFFFFFF, _lens+4(state, idx, 8)
|
||||||
|
|
||||||
|
vmovq _args_digest+0*32(state, idx, 8), %xmm0
|
||||||
|
vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
|
||||||
|
vmovq _args_digest+2*32(state, idx, 8), %xmm1
|
||||||
|
vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
|
||||||
|
vmovq _args_digest+4*32(state, idx, 8), %xmm2
|
||||||
|
vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
|
||||||
|
vmovq _args_digest+6*32(state, idx, 8), %xmm3
|
||||||
|
vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
|
||||||
|
|
||||||
|
vmovdqu %xmm0, _result_digest(job_rax)
|
||||||
|
vmovdqu %xmm1, _result_digest+1*16(job_rax)
|
||||||
|
vmovdqu %xmm2, _result_digest+2*16(job_rax)
|
||||||
|
vmovdqu %xmm3, _result_digest+3*16(job_rax)
|
||||||
|
|
||||||
|
return:
|
||||||
|
pop %rbx
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
|
||||||
|
return_null:
|
||||||
|
xor job_rax, job_rax
|
||||||
|
jmp return
|
||||||
|
ENDPROC(sha512_mb_mgr_flush_avx2)
|
||||||
|
.align 16
|
||||||
|
|
||||||
|
ENTRY(sha512_mb_mgr_get_comp_job_avx2)
|
||||||
|
push %rbx
|
||||||
|
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
bt $(32+7), unused_lanes
|
||||||
|
jc .return_null
|
||||||
|
|
||||||
|
# Find min length
|
||||||
|
mov _lens(state),lens0
|
||||||
|
mov lens0,idx
|
||||||
|
mov _lens+1*8(state),lens1
|
||||||
|
cmp idx,lens1
|
||||||
|
cmovb lens1,idx
|
||||||
|
mov _lens+2*8(state),lens2
|
||||||
|
cmp idx,lens2
|
||||||
|
cmovb lens2,idx
|
||||||
|
mov _lens+3*8(state),lens3
|
||||||
|
cmp idx,lens3
|
||||||
|
cmovb lens3,idx
|
||||||
|
test $~0xF,idx
|
||||||
|
jnz .return_null
|
||||||
|
and $0xF,idx
|
||||||
|
|
||||||
|
#process completed job "idx"
|
||||||
|
imul $_LANE_DATA_size, idx, lane_data
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
|
||||||
|
mov _job_in_lane(lane_data), job_rax
|
||||||
|
movq $0, _job_in_lane(lane_data)
|
||||||
|
movl $STS_COMPLETED, _status(job_rax)
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
shl $8, unused_lanes
|
||||||
|
or idx, unused_lanes
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
|
||||||
|
movl $0xFFFFFFFF, _lens+4(state, idx, 8)
|
||||||
|
|
||||||
|
vmovq _args_digest(state, idx, 8), %xmm0
|
||||||
|
vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
|
||||||
|
vmovq _args_digest+2*32(state, idx, 8), %xmm1
|
||||||
|
vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
|
||||||
|
vmovq _args_digest+4*32(state, idx, 8), %xmm2
|
||||||
|
vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
|
||||||
|
vmovq _args_digest+6*32(state, idx, 8), %xmm3
|
||||||
|
vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
|
||||||
|
|
||||||
|
vmovdqu %xmm0, _result_digest+0*16(job_rax)
|
||||||
|
vmovdqu %xmm1, _result_digest+1*16(job_rax)
|
||||||
|
vmovdqu %xmm2, _result_digest+2*16(job_rax)
|
||||||
|
vmovdqu %xmm3, _result_digest+3*16(job_rax)
|
||||||
|
|
||||||
|
pop %rbx
|
||||||
|
|
||||||
|
ret
|
||||||
|
|
||||||
|
.return_null:
|
||||||
|
xor job_rax, job_rax
|
||||||
|
pop %rbx
|
||||||
|
ret
|
||||||
|
ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
|
||||||
|
.data
|
||||||
|
|
||||||
|
.align 16
|
||||||
|
one:
|
||||||
|
.quad 1
|
||||||
|
two:
|
||||||
|
.quad 2
|
||||||
|
three:
|
||||||
|
.quad 3
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
* Initialization code for multi buffer SHA256 algorithm for AVX2
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "sha512_mb_mgr.h"
|
||||||
|
|
||||||
|
void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
|
||||||
|
{
|
||||||
|
unsigned int j;
|
||||||
|
|
||||||
|
state->lens[0] = 0;
|
||||||
|
state->lens[1] = 1;
|
||||||
|
state->lens[2] = 2;
|
||||||
|
state->lens[3] = 3;
|
||||||
|
state->unused_lanes = 0xFF03020100;
|
||||||
|
for (j = 0; j < 4; j++)
|
||||||
|
state->ldata[j].job_in_lane = NULL;
|
||||||
|
}
|
|
@ -0,0 +1,222 @@
|
||||||
|
/*
|
||||||
|
* Buffer submit code for multi buffer SHA512 algorithm
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/frame.h>
|
||||||
|
#include "sha512_mb_mgr_datastruct.S"
|
||||||
|
|
||||||
|
.extern sha512_x4_avx2
|
||||||
|
|
||||||
|
#define arg1 %rdi
|
||||||
|
#define arg2 %rsi
|
||||||
|
|
||||||
|
#define idx %rdx
|
||||||
|
#define last_len %rdx
|
||||||
|
|
||||||
|
#define size_offset %rcx
|
||||||
|
#define tmp2 %rcx
|
||||||
|
|
||||||
|
# Common definitions
|
||||||
|
#define state arg1
|
||||||
|
#define job arg2
|
||||||
|
#define len2 arg2
|
||||||
|
#define p2 arg2
|
||||||
|
|
||||||
|
#define p %r11
|
||||||
|
#define start_offset %r11
|
||||||
|
|
||||||
|
#define unused_lanes %rbx
|
||||||
|
|
||||||
|
#define job_rax %rax
|
||||||
|
#define len %rax
|
||||||
|
|
||||||
|
#define lane %r12
|
||||||
|
#define tmp3 %r12
|
||||||
|
#define lens3 %r12
|
||||||
|
|
||||||
|
#define extra_blocks %r8
|
||||||
|
#define lens0 %r8
|
||||||
|
|
||||||
|
#define tmp %r9
|
||||||
|
#define lens1 %r9
|
||||||
|
|
||||||
|
#define lane_data %r10
|
||||||
|
#define lens2 %r10
|
||||||
|
|
||||||
|
#define DWORD_len %eax
|
||||||
|
|
||||||
|
# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
|
||||||
|
# arg 1 : rcx : state
|
||||||
|
# arg 2 : rdx : job
|
||||||
|
ENTRY(sha512_mb_mgr_submit_avx2)
|
||||||
|
FRAME_BEGIN
|
||||||
|
push %rbx
|
||||||
|
push %r12
|
||||||
|
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
movzb %bl,lane
|
||||||
|
shr $8, unused_lanes
|
||||||
|
imul $_LANE_DATA_size, lane,lane_data
|
||||||
|
movl $STS_BEING_PROCESSED, _status(job)
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
movl _len(job), DWORD_len
|
||||||
|
|
||||||
|
mov job, _job_in_lane(lane_data)
|
||||||
|
movl DWORD_len,_lens+4(state , lane, 8)
|
||||||
|
|
||||||
|
# Load digest words from result_digest
|
||||||
|
vmovdqu _result_digest+0*16(job), %xmm0
|
||||||
|
vmovdqu _result_digest+1*16(job), %xmm1
|
||||||
|
vmovdqu _result_digest+2*16(job), %xmm2
|
||||||
|
vmovdqu _result_digest+3*16(job), %xmm3
|
||||||
|
|
||||||
|
vmovq %xmm0, _args_digest(state, lane, 8)
|
||||||
|
vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
|
||||||
|
vmovq %xmm1, _args_digest+2*32(state , lane, 8)
|
||||||
|
vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
|
||||||
|
vmovq %xmm2, _args_digest+4*32(state , lane, 8)
|
||||||
|
vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
|
||||||
|
vmovq %xmm3, _args_digest+6*32(state , lane, 8)
|
||||||
|
vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
|
||||||
|
|
||||||
|
mov _buffer(job), p
|
||||||
|
mov p, _args_data_ptr(state, lane, 8)
|
||||||
|
|
||||||
|
cmp $0xFF, unused_lanes
|
||||||
|
jne return_null
|
||||||
|
|
||||||
|
start_loop:
|
||||||
|
|
||||||
|
# Find min length
|
||||||
|
mov _lens+0*8(state),lens0
|
||||||
|
mov lens0,idx
|
||||||
|
mov _lens+1*8(state),lens1
|
||||||
|
cmp idx,lens1
|
||||||
|
cmovb lens1, idx
|
||||||
|
mov _lens+2*8(state),lens2
|
||||||
|
cmp idx,lens2
|
||||||
|
cmovb lens2,idx
|
||||||
|
mov _lens+3*8(state),lens3
|
||||||
|
cmp idx,lens3
|
||||||
|
cmovb lens3,idx
|
||||||
|
mov idx,len2
|
||||||
|
and $0xF,idx
|
||||||
|
and $~0xFF,len2
|
||||||
|
jz len_is_0
|
||||||
|
|
||||||
|
sub len2,lens0
|
||||||
|
sub len2,lens1
|
||||||
|
sub len2,lens2
|
||||||
|
sub len2,lens3
|
||||||
|
shr $32,len2
|
||||||
|
mov lens0, _lens + 0*8(state)
|
||||||
|
mov lens1, _lens + 1*8(state)
|
||||||
|
mov lens2, _lens + 2*8(state)
|
||||||
|
mov lens3, _lens + 3*8(state)
|
||||||
|
|
||||||
|
# "state" and "args" are the same address, arg1
|
||||||
|
# len is arg2
|
||||||
|
call sha512_x4_avx2
|
||||||
|
# state and idx are intact
|
||||||
|
|
||||||
|
len_is_0:
|
||||||
|
|
||||||
|
# process completed job "idx"
|
||||||
|
imul $_LANE_DATA_size, idx, lane_data
|
||||||
|
lea _ldata(state, lane_data), lane_data
|
||||||
|
|
||||||
|
mov _job_in_lane(lane_data), job_rax
|
||||||
|
mov _unused_lanes(state), unused_lanes
|
||||||
|
movq $0, _job_in_lane(lane_data)
|
||||||
|
movl $STS_COMPLETED, _status(job_rax)
|
||||||
|
shl $8, unused_lanes
|
||||||
|
or idx, unused_lanes
|
||||||
|
mov unused_lanes, _unused_lanes(state)
|
||||||
|
|
||||||
|
movl $0xFFFFFFFF,_lens+4(state,idx,8)
|
||||||
|
vmovq _args_digest+0*32(state , idx, 8), %xmm0
|
||||||
|
vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
|
||||||
|
vmovq _args_digest+2*32(state , idx, 8), %xmm1
|
||||||
|
vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
|
||||||
|
vmovq _args_digest+4*32(state , idx, 8), %xmm2
|
||||||
|
vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
|
||||||
|
vmovq _args_digest+6*32(state , idx, 8), %xmm3
|
||||||
|
vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
|
||||||
|
|
||||||
|
vmovdqu %xmm0, _result_digest + 0*16(job_rax)
|
||||||
|
vmovdqu %xmm1, _result_digest + 1*16(job_rax)
|
||||||
|
vmovdqu %xmm2, _result_digest + 2*16(job_rax)
|
||||||
|
vmovdqu %xmm3, _result_digest + 3*16(job_rax)
|
||||||
|
|
||||||
|
return:
|
||||||
|
pop %r12
|
||||||
|
pop %rbx
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
|
||||||
|
return_null:
|
||||||
|
xor job_rax, job_rax
|
||||||
|
jmp return
|
||||||
|
ENDPROC(sha512_mb_mgr_submit_avx2)
|
||||||
|
.data
|
||||||
|
|
||||||
|
.align 16
|
||||||
|
H0: .int 0x6a09e667
|
||||||
|
H1: .int 0xbb67ae85
|
||||||
|
H2: .int 0x3c6ef372
|
||||||
|
H3: .int 0xa54ff53a
|
||||||
|
H4: .int 0x510e527f
|
||||||
|
H5: .int 0x9b05688c
|
||||||
|
H6: .int 0x1f83d9ab
|
||||||
|
H7: .int 0x5be0cd19
|
|
@ -0,0 +1,529 @@
|
||||||
|
/*
|
||||||
|
* Multi-buffer SHA512 algorithm hash compute routine
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Megha Dey <megha.dey@linux.intel.com>
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2016 Intel Corporation.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
# code to compute quad SHA512 using AVX2
|
||||||
|
# use YMMs to tackle the larger digest size
|
||||||
|
# outer calling routine takes care of save and restore of XMM registers
|
||||||
|
# Logic designed/laid out by JDG
|
||||||
|
|
||||||
|
# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
|
||||||
|
# Stack must be aligned to 32 bytes before call
|
||||||
|
# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
|
||||||
|
# Linux preserves: rcx rdx rdi rbp r13 r14 r15
|
||||||
|
# clobbers ymm0-15
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include "sha512_mb_mgr_datastruct.S"
|
||||||
|
|
||||||
|
arg1 = %rdi
|
||||||
|
arg2 = %rsi
|
||||||
|
|
||||||
|
# Common definitions
|
||||||
|
STATE = arg1
|
||||||
|
INP_SIZE = arg2
|
||||||
|
|
||||||
|
IDX = %rax
|
||||||
|
ROUND = %rbx
|
||||||
|
TBL = %r8
|
||||||
|
|
||||||
|
inp0 = %r9
|
||||||
|
inp1 = %r10
|
||||||
|
inp2 = %r11
|
||||||
|
inp3 = %r12
|
||||||
|
|
||||||
|
a = %ymm0
|
||||||
|
b = %ymm1
|
||||||
|
c = %ymm2
|
||||||
|
d = %ymm3
|
||||||
|
e = %ymm4
|
||||||
|
f = %ymm5
|
||||||
|
g = %ymm6
|
||||||
|
h = %ymm7
|
||||||
|
|
||||||
|
a0 = %ymm8
|
||||||
|
a1 = %ymm9
|
||||||
|
a2 = %ymm10
|
||||||
|
|
||||||
|
TT0 = %ymm14
|
||||||
|
TT1 = %ymm13
|
||||||
|
TT2 = %ymm12
|
||||||
|
TT3 = %ymm11
|
||||||
|
TT4 = %ymm10
|
||||||
|
TT5 = %ymm9
|
||||||
|
|
||||||
|
T1 = %ymm14
|
||||||
|
TMP = %ymm15
|
||||||
|
|
||||||
|
# Define stack usage
|
||||||
|
STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
|
||||||
|
|
||||||
|
#define VMOVPD vmovupd
|
||||||
|
_digest = SZ4*16
|
||||||
|
|
||||||
|
# transpose r0, r1, r2, r3, t0, t1
|
||||||
|
# "transpose" data in {r0..r3} using temps {t0..t3}
|
||||||
|
# Input looks like: {r0 r1 r2 r3}
|
||||||
|
# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
|
||||||
|
# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
|
||||||
|
# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
|
||||||
|
# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
|
||||||
|
#
|
||||||
|
# output looks like: {t0 r1 r0 r3}
|
||||||
|
# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
|
||||||
|
# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
|
||||||
|
# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
|
||||||
|
# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
|
||||||
|
|
||||||
|
.macro TRANSPOSE r0 r1 r2 r3 t0 t1
|
||||||
|
vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
|
||||||
|
vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
|
||||||
|
vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
|
||||||
|
vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
|
||||||
|
|
||||||
|
vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
|
||||||
|
vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
|
||||||
|
vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
|
||||||
|
vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro ROTATE_ARGS
|
||||||
|
TMP_ = h
|
||||||
|
h = g
|
||||||
|
g = f
|
||||||
|
f = e
|
||||||
|
e = d
|
||||||
|
d = c
|
||||||
|
c = b
|
||||||
|
b = a
|
||||||
|
a = TMP_
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# PRORQ reg, imm, tmp
|
||||||
|
# packed-rotate-right-double
|
||||||
|
# does a rotate by doing two shifts and an or
|
||||||
|
.macro _PRORQ reg imm tmp
|
||||||
|
vpsllq $(64-\imm),\reg,\tmp
|
||||||
|
vpsrlq $\imm,\reg, \reg
|
||||||
|
vpor \tmp,\reg, \reg
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# non-destructive
|
||||||
|
# PRORQ_nd reg, imm, tmp, src
|
||||||
|
.macro _PRORQ_nd reg imm tmp src
|
||||||
|
vpsllq $(64-\imm), \src, \tmp
|
||||||
|
vpsrlq $\imm, \src, \reg
|
||||||
|
vpor \tmp, \reg, \reg
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# PRORQ dst/src, amt
|
||||||
|
.macro PRORQ reg imm
|
||||||
|
_PRORQ \reg, \imm, TMP
|
||||||
|
.endm
|
||||||
|
|
||||||
|
# PRORQ_nd dst, src, amt
|
||||||
|
.macro PRORQ_nd reg tmp imm
|
||||||
|
_PRORQ_nd \reg, \imm, TMP, \tmp
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#; arguments passed implicitly in preprocessor symbols i, a...h
|
||||||
|
.macro ROUND_00_15 _T1 i
|
||||||
|
PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
|
||||||
|
|
||||||
|
vpxor g, f, a2 # ch: a2 = f^g
|
||||||
|
vpand e,a2, a2 # ch: a2 = (f^g)&e
|
||||||
|
vpxor g, a2, a2 # a2 = ch
|
||||||
|
|
||||||
|
PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
|
||||||
|
|
||||||
|
offset = SZ4*(\i & 0xf)
|
||||||
|
vmovdqu \_T1,offset(%rsp)
|
||||||
|
vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
|
||||||
|
vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
|
||||||
|
PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
|
||||||
|
vpaddq a2, h, h # h = h + ch
|
||||||
|
PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
|
||||||
|
vpaddq \_T1,h, h # h = h + ch + W + K
|
||||||
|
vpxor a1, a0, a0 # a0 = sigma1
|
||||||
|
vmovdqu a,\_T1
|
||||||
|
PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
|
||||||
|
vpxor c, \_T1, \_T1 # maj: T1 = a^c
|
||||||
|
add $SZ4, ROUND # ROUND++
|
||||||
|
vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
|
||||||
|
vpaddq a0, h, h
|
||||||
|
vpaddq h, d, d
|
||||||
|
vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
|
||||||
|
PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
|
||||||
|
vpxor a1, a2, a2 # a2 = sig0
|
||||||
|
vpand c, a, a1 # maj: a1 = a&c
|
||||||
|
vpor \_T1, a1, a1 # a1 = maj
|
||||||
|
vpaddq a1, h, h # h = h + ch + W + K + maj
|
||||||
|
vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
|
||||||
|
ROTATE_ARGS
|
||||||
|
.endm
|
||||||
|
|
||||||
|
|
||||||
|
#; arguments passed implicitly in preprocessor symbols i, a...h
|
||||||
|
.macro ROUND_16_XX _T1 i
|
||||||
|
vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
|
||||||
|
vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
|
||||||
|
vmovdqu \_T1, a0
|
||||||
|
PRORQ \_T1,7
|
||||||
|
vmovdqu a1, a2
|
||||||
|
PRORQ a1,42
|
||||||
|
vpxor a0, \_T1, \_T1
|
||||||
|
PRORQ \_T1, 1
|
||||||
|
vpxor a2, a1, a1
|
||||||
|
PRORQ a1, 19
|
||||||
|
vpsrlq $7, a0, a0
|
||||||
|
vpxor a0, \_T1, \_T1
|
||||||
|
vpsrlq $6, a2, a2
|
||||||
|
vpxor a2, a1, a1
|
||||||
|
vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
|
||||||
|
vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
|
||||||
|
vpaddq a1, \_T1, \_T1
|
||||||
|
|
||||||
|
ROUND_00_15 \_T1,\i
|
||||||
|
.endm
|
||||||
|
|
||||||
|
|
||||||
|
# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
|
||||||
|
# arg 1 : STATE : pointer to input data
|
||||||
|
# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
|
||||||
|
ENTRY(sha512_x4_avx2)
|
||||||
|
# general registers preserved in outer calling routine
|
||||||
|
# outer calling routine saves all the XMM registers
|
||||||
|
# save callee-saved clobbered registers to comply with C function ABI
|
||||||
|
push %r12
|
||||||
|
push %r13
|
||||||
|
push %r14
|
||||||
|
push %r15
|
||||||
|
|
||||||
|
sub $STACK_SPACE1, %rsp
|
||||||
|
|
||||||
|
# Load the pre-transposed incoming digest.
|
||||||
|
vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
|
||||||
|
vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
|
||||||
|
vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
|
||||||
|
vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
|
||||||
|
vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
|
||||||
|
vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
|
||||||
|
vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
|
||||||
|
vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
|
||||||
|
|
||||||
|
lea K512_4(%rip),TBL
|
||||||
|
|
||||||
|
# load the address of each of the 4 message lanes
|
||||||
|
# getting ready to transpose input onto stack
|
||||||
|
mov _data_ptr+0*PTR_SZ(STATE),inp0
|
||||||
|
mov _data_ptr+1*PTR_SZ(STATE),inp1
|
||||||
|
mov _data_ptr+2*PTR_SZ(STATE),inp2
|
||||||
|
mov _data_ptr+3*PTR_SZ(STATE),inp3
|
||||||
|
|
||||||
|
xor IDX, IDX
|
||||||
|
lloop:
|
||||||
|
xor ROUND, ROUND
|
||||||
|
|
||||||
|
# save old digest
|
||||||
|
vmovdqu a, _digest(%rsp)
|
||||||
|
vmovdqu b, _digest+1*SZ4(%rsp)
|
||||||
|
vmovdqu c, _digest+2*SZ4(%rsp)
|
||||||
|
vmovdqu d, _digest+3*SZ4(%rsp)
|
||||||
|
vmovdqu e, _digest+4*SZ4(%rsp)
|
||||||
|
vmovdqu f, _digest+5*SZ4(%rsp)
|
||||||
|
vmovdqu g, _digest+6*SZ4(%rsp)
|
||||||
|
vmovdqu h, _digest+7*SZ4(%rsp)
|
||||||
|
i = 0
|
||||||
|
.rep 4
|
||||||
|
vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
|
||||||
|
VMOVPD i*32(inp0, IDX), TT2
|
||||||
|
VMOVPD i*32(inp1, IDX), TT1
|
||||||
|
VMOVPD i*32(inp2, IDX), TT4
|
||||||
|
VMOVPD i*32(inp3, IDX), TT3
|
||||||
|
TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
|
||||||
|
vpshufb TMP, TT0, TT0
|
||||||
|
vpshufb TMP, TT1, TT1
|
||||||
|
vpshufb TMP, TT2, TT2
|
||||||
|
vpshufb TMP, TT3, TT3
|
||||||
|
ROUND_00_15 TT0,(i*4+0)
|
||||||
|
ROUND_00_15 TT1,(i*4+1)
|
||||||
|
ROUND_00_15 TT2,(i*4+2)
|
||||||
|
ROUND_00_15 TT3,(i*4+3)
|
||||||
|
i = (i+1)
|
||||||
|
.endr
|
||||||
|
add $128, IDX
|
||||||
|
|
||||||
|
i = (i*4)
|
||||||
|
|
||||||
|
jmp Lrounds_16_xx
|
||||||
|
.align 16
|
||||||
|
Lrounds_16_xx:
|
||||||
|
.rep 16
|
||||||
|
ROUND_16_XX T1, i
|
||||||
|
i = (i+1)
|
||||||
|
.endr
|
||||||
|
cmp $0xa00,ROUND
|
||||||
|
jb Lrounds_16_xx
|
||||||
|
|
||||||
|
# add old digest
|
||||||
|
vpaddq _digest(%rsp), a, a
|
||||||
|
vpaddq _digest+1*SZ4(%rsp), b, b
|
||||||
|
vpaddq _digest+2*SZ4(%rsp), c, c
|
||||||
|
vpaddq _digest+3*SZ4(%rsp), d, d
|
||||||
|
vpaddq _digest+4*SZ4(%rsp), e, e
|
||||||
|
vpaddq _digest+5*SZ4(%rsp), f, f
|
||||||
|
vpaddq _digest+6*SZ4(%rsp), g, g
|
||||||
|
vpaddq _digest+7*SZ4(%rsp), h, h
|
||||||
|
|
||||||
|
sub $1, INP_SIZE # unit is blocks
|
||||||
|
jne lloop
|
||||||
|
|
||||||
|
# write back to memory (state object) the transposed digest
|
||||||
|
vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
|
||||||
|
|
||||||
|
# update input data pointers
|
||||||
|
add IDX, inp0
|
||||||
|
mov inp0, _data_ptr+0*PTR_SZ(STATE)
|
||||||
|
add IDX, inp1
|
||||||
|
mov inp1, _data_ptr+1*PTR_SZ(STATE)
|
||||||
|
add IDX, inp2
|
||||||
|
mov inp2, _data_ptr+2*PTR_SZ(STATE)
|
||||||
|
add IDX, inp3
|
||||||
|
mov inp3, _data_ptr+3*PTR_SZ(STATE)
|
||||||
|
|
||||||
|
#;;;;;;;;;;;;;;;
|
||||||
|
#; Postamble
|
||||||
|
add $STACK_SPACE1, %rsp
|
||||||
|
# restore callee-saved clobbered registers
|
||||||
|
|
||||||
|
pop %r15
|
||||||
|
pop %r14
|
||||||
|
pop %r13
|
||||||
|
pop %r12
|
||||||
|
|
||||||
|
# outer calling routine restores XMM and other GP registers
|
||||||
|
ret
|
||||||
|
ENDPROC(sha512_x4_avx2)
|
||||||
|
|
||||||
|
.data
|
||||||
|
.align 64
|
||||||
|
K512_4:
|
||||||
|
.octa 0x428a2f98d728ae22428a2f98d728ae22,\
|
||||||
|
0x428a2f98d728ae22428a2f98d728ae22
|
||||||
|
.octa 0x7137449123ef65cd7137449123ef65cd,\
|
||||||
|
0x7137449123ef65cd7137449123ef65cd
|
||||||
|
.octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
|
||||||
|
0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
|
||||||
|
.octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
|
||||||
|
0xe9b5dba58189dbbce9b5dba58189dbbc
|
||||||
|
.octa 0x3956c25bf348b5383956c25bf348b538,\
|
||||||
|
0x3956c25bf348b5383956c25bf348b538
|
||||||
|
.octa 0x59f111f1b605d01959f111f1b605d019,\
|
||||||
|
0x59f111f1b605d01959f111f1b605d019
|
||||||
|
.octa 0x923f82a4af194f9b923f82a4af194f9b,\
|
||||||
|
0x923f82a4af194f9b923f82a4af194f9b
|
||||||
|
.octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
|
||||||
|
0xab1c5ed5da6d8118ab1c5ed5da6d8118
|
||||||
|
.octa 0xd807aa98a3030242d807aa98a3030242,\
|
||||||
|
0xd807aa98a3030242d807aa98a3030242
|
||||||
|
.octa 0x12835b0145706fbe12835b0145706fbe,\
|
||||||
|
0x12835b0145706fbe12835b0145706fbe
|
||||||
|
.octa 0x243185be4ee4b28c243185be4ee4b28c,\
|
||||||
|
0x243185be4ee4b28c243185be4ee4b28c
|
||||||
|
.octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
|
||||||
|
0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
|
||||||
|
.octa 0x72be5d74f27b896f72be5d74f27b896f,\
|
||||||
|
0x72be5d74f27b896f72be5d74f27b896f
|
||||||
|
.octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
|
||||||
|
0x80deb1fe3b1696b180deb1fe3b1696b1
|
||||||
|
.octa 0x9bdc06a725c712359bdc06a725c71235,\
|
||||||
|
0x9bdc06a725c712359bdc06a725c71235
|
||||||
|
.octa 0xc19bf174cf692694c19bf174cf692694,\
|
||||||
|
0xc19bf174cf692694c19bf174cf692694
|
||||||
|
.octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
|
||||||
|
0xe49b69c19ef14ad2e49b69c19ef14ad2
|
||||||
|
.octa 0xefbe4786384f25e3efbe4786384f25e3,\
|
||||||
|
0xefbe4786384f25e3efbe4786384f25e3
|
||||||
|
.octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
|
||||||
|
0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
|
||||||
|
.octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
|
||||||
|
0x240ca1cc77ac9c65240ca1cc77ac9c65
|
||||||
|
.octa 0x2de92c6f592b02752de92c6f592b0275,\
|
||||||
|
0x2de92c6f592b02752de92c6f592b0275
|
||||||
|
.octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
|
||||||
|
0x4a7484aa6ea6e4834a7484aa6ea6e483
|
||||||
|
.octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
|
||||||
|
0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
|
||||||
|
.octa 0x76f988da831153b576f988da831153b5,\
|
||||||
|
0x76f988da831153b576f988da831153b5
|
||||||
|
.octa 0x983e5152ee66dfab983e5152ee66dfab,\
|
||||||
|
0x983e5152ee66dfab983e5152ee66dfab
|
||||||
|
.octa 0xa831c66d2db43210a831c66d2db43210,\
|
||||||
|
0xa831c66d2db43210a831c66d2db43210
|
||||||
|
.octa 0xb00327c898fb213fb00327c898fb213f,\
|
||||||
|
0xb00327c898fb213fb00327c898fb213f
|
||||||
|
.octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
|
||||||
|
0xbf597fc7beef0ee4bf597fc7beef0ee4
|
||||||
|
.octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
|
||||||
|
0xc6e00bf33da88fc2c6e00bf33da88fc2
|
||||||
|
.octa 0xd5a79147930aa725d5a79147930aa725,\
|
||||||
|
0xd5a79147930aa725d5a79147930aa725
|
||||||
|
.octa 0x06ca6351e003826f06ca6351e003826f,\
|
||||||
|
0x06ca6351e003826f06ca6351e003826f
|
||||||
|
.octa 0x142929670a0e6e70142929670a0e6e70,\
|
||||||
|
0x142929670a0e6e70142929670a0e6e70
|
||||||
|
.octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
|
||||||
|
0x27b70a8546d22ffc27b70a8546d22ffc
|
||||||
|
.octa 0x2e1b21385c26c9262e1b21385c26c926,\
|
||||||
|
0x2e1b21385c26c9262e1b21385c26c926
|
||||||
|
.octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
|
||||||
|
0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
|
||||||
|
.octa 0x53380d139d95b3df53380d139d95b3df,\
|
||||||
|
0x53380d139d95b3df53380d139d95b3df
|
||||||
|
.octa 0x650a73548baf63de650a73548baf63de,\
|
||||||
|
0x650a73548baf63de650a73548baf63de
|
||||||
|
.octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
|
||||||
|
0x766a0abb3c77b2a8766a0abb3c77b2a8
|
||||||
|
.octa 0x81c2c92e47edaee681c2c92e47edaee6,\
|
||||||
|
0x81c2c92e47edaee681c2c92e47edaee6
|
||||||
|
.octa 0x92722c851482353b92722c851482353b,\
|
||||||
|
0x92722c851482353b92722c851482353b
|
||||||
|
.octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
|
||||||
|
0xa2bfe8a14cf10364a2bfe8a14cf10364
|
||||||
|
.octa 0xa81a664bbc423001a81a664bbc423001,\
|
||||||
|
0xa81a664bbc423001a81a664bbc423001
|
||||||
|
.octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
|
||||||
|
0xc24b8b70d0f89791c24b8b70d0f89791
|
||||||
|
.octa 0xc76c51a30654be30c76c51a30654be30,\
|
||||||
|
0xc76c51a30654be30c76c51a30654be30
|
||||||
|
.octa 0xd192e819d6ef5218d192e819d6ef5218,\
|
||||||
|
0xd192e819d6ef5218d192e819d6ef5218
|
||||||
|
.octa 0xd69906245565a910d69906245565a910,\
|
||||||
|
0xd69906245565a910d69906245565a910
|
||||||
|
.octa 0xf40e35855771202af40e35855771202a,\
|
||||||
|
0xf40e35855771202af40e35855771202a
|
||||||
|
.octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
|
||||||
|
0x106aa07032bbd1b8106aa07032bbd1b8
|
||||||
|
.octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
|
||||||
|
0x19a4c116b8d2d0c819a4c116b8d2d0c8
|
||||||
|
.octa 0x1e376c085141ab531e376c085141ab53,\
|
||||||
|
0x1e376c085141ab531e376c085141ab53
|
||||||
|
.octa 0x2748774cdf8eeb992748774cdf8eeb99,\
|
||||||
|
0x2748774cdf8eeb992748774cdf8eeb99
|
||||||
|
.octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
|
||||||
|
0x34b0bcb5e19b48a834b0bcb5e19b48a8
|
||||||
|
.octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
|
||||||
|
0x391c0cb3c5c95a63391c0cb3c5c95a63
|
||||||
|
.octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
|
||||||
|
0x4ed8aa4ae3418acb4ed8aa4ae3418acb
|
||||||
|
.octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
|
||||||
|
0x5b9cca4f7763e3735b9cca4f7763e373
|
||||||
|
.octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
|
||||||
|
0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
|
||||||
|
.octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
|
||||||
|
0x748f82ee5defb2fc748f82ee5defb2fc
|
||||||
|
.octa 0x78a5636f43172f6078a5636f43172f60,\
|
||||||
|
0x78a5636f43172f6078a5636f43172f60
|
||||||
|
.octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
|
||||||
|
0x84c87814a1f0ab7284c87814a1f0ab72
|
||||||
|
.octa 0x8cc702081a6439ec8cc702081a6439ec,\
|
||||||
|
0x8cc702081a6439ec8cc702081a6439ec
|
||||||
|
.octa 0x90befffa23631e2890befffa23631e28,\
|
||||||
|
0x90befffa23631e2890befffa23631e28
|
||||||
|
.octa 0xa4506cebde82bde9a4506cebde82bde9,\
|
||||||
|
0xa4506cebde82bde9a4506cebde82bde9
|
||||||
|
.octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
|
||||||
|
0xbef9a3f7b2c67915bef9a3f7b2c67915
|
||||||
|
.octa 0xc67178f2e372532bc67178f2e372532b,\
|
||||||
|
0xc67178f2e372532bc67178f2e372532b
|
||||||
|
.octa 0xca273eceea26619cca273eceea26619c,\
|
||||||
|
0xca273eceea26619cca273eceea26619c
|
||||||
|
.octa 0xd186b8c721c0c207d186b8c721c0c207,\
|
||||||
|
0xd186b8c721c0c207d186b8c721c0c207
|
||||||
|
.octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
|
||||||
|
0xeada7dd6cde0eb1eeada7dd6cde0eb1e
|
||||||
|
.octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
|
||||||
|
0xf57d4f7fee6ed178f57d4f7fee6ed178
|
||||||
|
.octa 0x06f067aa72176fba06f067aa72176fba,\
|
||||||
|
0x06f067aa72176fba06f067aa72176fba
|
||||||
|
.octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
|
||||||
|
0x0a637dc5a2c898a60a637dc5a2c898a6
|
||||||
|
.octa 0x113f9804bef90dae113f9804bef90dae,\
|
||||||
|
0x113f9804bef90dae113f9804bef90dae
|
||||||
|
.octa 0x1b710b35131c471b1b710b35131c471b,\
|
||||||
|
0x1b710b35131c471b1b710b35131c471b
|
||||||
|
.octa 0x28db77f523047d8428db77f523047d84,\
|
||||||
|
0x28db77f523047d8428db77f523047d84
|
||||||
|
.octa 0x32caab7b40c7249332caab7b40c72493,\
|
||||||
|
0x32caab7b40c7249332caab7b40c72493
|
||||||
|
.octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
|
||||||
|
0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
|
||||||
|
.octa 0x431d67c49c100d4c431d67c49c100d4c,\
|
||||||
|
0x431d67c49c100d4c431d67c49c100d4c
|
||||||
|
.octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
|
||||||
|
0x4cc5d4becb3e42b64cc5d4becb3e42b6
|
||||||
|
.octa 0x597f299cfc657e2a597f299cfc657e2a,\
|
||||||
|
0x597f299cfc657e2a597f299cfc657e2a
|
||||||
|
.octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
|
||||||
|
0x5fcb6fab3ad6faec5fcb6fab3ad6faec
|
||||||
|
.octa 0x6c44198c4a4758176c44198c4a475817,\
|
||||||
|
0x6c44198c4a4758176c44198c4a475817
|
||||||
|
|
||||||
|
PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
|
||||||
|
.octa 0x18191a1b1c1d1e1f1011121314151617
|
|
@ -346,4 +346,10 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("sha512");
|
MODULE_ALIAS_CRYPTO("sha512");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha512-ssse3");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha512-avx");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha512-avx2");
|
||||||
MODULE_ALIAS_CRYPTO("sha384");
|
MODULE_ALIAS_CRYPTO("sha384");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha384-ssse3");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha384-avx");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha384-avx2");
|
||||||
|
|
|
@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER
|
||||||
select CRYPTO_AKCIPHER2
|
select CRYPTO_AKCIPHER2
|
||||||
select CRYPTO_ALGAPI
|
select CRYPTO_ALGAPI
|
||||||
|
|
||||||
|
config CRYPTO_KPP2
|
||||||
|
tristate
|
||||||
|
select CRYPTO_ALGAPI2
|
||||||
|
|
||||||
|
config CRYPTO_KPP
|
||||||
|
tristate
|
||||||
|
select CRYPTO_ALGAPI
|
||||||
|
select CRYPTO_KPP2
|
||||||
|
|
||||||
config CRYPTO_RSA
|
config CRYPTO_RSA
|
||||||
tristate "RSA algorithm"
|
tristate "RSA algorithm"
|
||||||
select CRYPTO_AKCIPHER
|
select CRYPTO_AKCIPHER
|
||||||
|
@ -102,6 +111,19 @@ config CRYPTO_RSA
|
||||||
help
|
help
|
||||||
Generic implementation of the RSA public key algorithm.
|
Generic implementation of the RSA public key algorithm.
|
||||||
|
|
||||||
|
config CRYPTO_DH
|
||||||
|
tristate "Diffie-Hellman algorithm"
|
||||||
|
select CRYPTO_KPP
|
||||||
|
select MPILIB
|
||||||
|
help
|
||||||
|
Generic implementation of the Diffie-Hellman algorithm.
|
||||||
|
|
||||||
|
config CRYPTO_ECDH
|
||||||
|
tristate "ECDH algorithm"
|
||||||
|
select CRYTPO_KPP
|
||||||
|
help
|
||||||
|
Generic implementation of the ECDH algorithm
|
||||||
|
|
||||||
config CRYPTO_MANAGER
|
config CRYPTO_MANAGER
|
||||||
tristate "Cryptographic algorithm manager"
|
tristate "Cryptographic algorithm manager"
|
||||||
select CRYPTO_MANAGER2
|
select CRYPTO_MANAGER2
|
||||||
|
@ -115,6 +137,7 @@ config CRYPTO_MANAGER2
|
||||||
select CRYPTO_HASH2
|
select CRYPTO_HASH2
|
||||||
select CRYPTO_BLKCIPHER2
|
select CRYPTO_BLKCIPHER2
|
||||||
select CRYPTO_AKCIPHER2
|
select CRYPTO_AKCIPHER2
|
||||||
|
select CRYPTO_KPP2
|
||||||
|
|
||||||
config CRYPTO_USER
|
config CRYPTO_USER
|
||||||
tristate "Userspace cryptographic algorithm configuration"
|
tristate "Userspace cryptographic algorithm configuration"
|
||||||
|
@ -414,6 +437,17 @@ config CRYPTO_CRC32C_INTEL
|
||||||
gain performance compared with software implementation.
|
gain performance compared with software implementation.
|
||||||
Module will be crc32c-intel.
|
Module will be crc32c-intel.
|
||||||
|
|
||||||
|
config CRYPT_CRC32C_VPMSUM
|
||||||
|
tristate "CRC32c CRC algorithm (powerpc64)"
|
||||||
|
depends on PPC64
|
||||||
|
select CRYPTO_HASH
|
||||||
|
select CRC32
|
||||||
|
help
|
||||||
|
CRC32c algorithm implemented using vector polynomial multiply-sum
|
||||||
|
(vpmsum) instructions, introduced in POWER8. Enable on POWER8
|
||||||
|
and newer processors for improved performance.
|
||||||
|
|
||||||
|
|
||||||
config CRYPTO_CRC32C_SPARC64
|
config CRYPTO_CRC32C_SPARC64
|
||||||
tristate "CRC32c CRC algorithm (SPARC64)"
|
tristate "CRC32c CRC algorithm (SPARC64)"
|
||||||
depends on SPARC64
|
depends on SPARC64
|
||||||
|
@ -681,6 +715,38 @@ config CRYPTO_SHA1_MB
|
||||||
lanes remain unfilled, a flush operation will be initiated to
|
lanes remain unfilled, a flush operation will be initiated to
|
||||||
process the crypto jobs, adding a slight latency.
|
process the crypto jobs, adding a slight latency.
|
||||||
|
|
||||||
|
config CRYPTO_SHA256_MB
|
||||||
|
tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
|
||||||
|
depends on X86 && 64BIT
|
||||||
|
select CRYPTO_SHA256
|
||||||
|
select CRYPTO_HASH
|
||||||
|
select CRYPTO_MCRYPTD
|
||||||
|
help
|
||||||
|
SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
|
||||||
|
using multi-buffer technique. This algorithm computes on
|
||||||
|
multiple data lanes concurrently with SIMD instructions for
|
||||||
|
better throughput. It should not be enabled by default but
|
||||||
|
used when there is significant amount of work to keep the keep
|
||||||
|
the data lanes filled to get performance benefit. If the data
|
||||||
|
lanes remain unfilled, a flush operation will be initiated to
|
||||||
|
process the crypto jobs, adding a slight latency.
|
||||||
|
|
||||||
|
config CRYPTO_SHA512_MB
|
||||||
|
tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
|
||||||
|
depends on X86 && 64BIT
|
||||||
|
select CRYPTO_SHA512
|
||||||
|
select CRYPTO_HASH
|
||||||
|
select CRYPTO_MCRYPTD
|
||||||
|
help
|
||||||
|
SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
|
||||||
|
using multi-buffer technique. This algorithm computes on
|
||||||
|
multiple data lanes concurrently with SIMD instructions for
|
||||||
|
better throughput. It should not be enabled by default but
|
||||||
|
used when there is significant amount of work to keep the keep
|
||||||
|
the data lanes filled to get performance benefit. If the data
|
||||||
|
lanes remain unfilled, a flush operation will be initiated to
|
||||||
|
process the crypto jobs, adding a slight latency.
|
||||||
|
|
||||||
config CRYPTO_SHA256
|
config CRYPTO_SHA256
|
||||||
tristate "SHA224 and SHA256 digest algorithm"
|
tristate "SHA224 and SHA256 digest algorithm"
|
||||||
select CRYPTO_HASH
|
select CRYPTO_HASH
|
||||||
|
@ -750,6 +816,16 @@ config CRYPTO_SHA512_SPARC64
|
||||||
SHA-512 secure hash standard (DFIPS 180-2) implemented
|
SHA-512 secure hash standard (DFIPS 180-2) implemented
|
||||||
using sparc64 crypto instructions, when available.
|
using sparc64 crypto instructions, when available.
|
||||||
|
|
||||||
|
config CRYPTO_SHA3
|
||||||
|
tristate "SHA3 digest algorithm"
|
||||||
|
select CRYPTO_HASH
|
||||||
|
help
|
||||||
|
SHA-3 secure hash standard (DFIPS 202). It's based on
|
||||||
|
cryptographic sponge function family called Keccak.
|
||||||
|
|
||||||
|
References:
|
||||||
|
http://keccak.noekeon.org/
|
||||||
|
|
||||||
config CRYPTO_TGR192
|
config CRYPTO_TGR192
|
||||||
tristate "Tiger digest algorithms"
|
tristate "Tiger digest algorithms"
|
||||||
select CRYPTO_HASH
|
select CRYPTO_HASH
|
||||||
|
@ -1567,6 +1643,7 @@ config CRYPTO_DRBG_HASH
|
||||||
config CRYPTO_DRBG_CTR
|
config CRYPTO_DRBG_CTR
|
||||||
bool "Enable CTR DRBG"
|
bool "Enable CTR DRBG"
|
||||||
select CRYPTO_AES
|
select CRYPTO_AES
|
||||||
|
depends on CRYPTO_CTR
|
||||||
help
|
help
|
||||||
Enable the CTR DRBG variant as defined in NIST SP800-90A.
|
Enable the CTR DRBG variant as defined in NIST SP800-90A.
|
||||||
|
|
||||||
|
|
|
@ -20,8 +20,6 @@ crypto_blkcipher-y := ablkcipher.o
|
||||||
crypto_blkcipher-y += blkcipher.o
|
crypto_blkcipher-y += blkcipher.o
|
||||||
crypto_blkcipher-y += skcipher.o
|
crypto_blkcipher-y += skcipher.o
|
||||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
|
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
|
||||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
|
|
||||||
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
|
|
||||||
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
|
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
|
||||||
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
|
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
|
||||||
|
|
||||||
|
@ -30,6 +28,15 @@ crypto_hash-y += shash.o
|
||||||
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
|
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
|
||||||
|
|
||||||
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
|
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
|
||||||
|
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
|
||||||
|
|
||||||
|
dh_generic-y := dh.o
|
||||||
|
dh_generic-y += dh_helper.o
|
||||||
|
obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
|
||||||
|
ecdh_generic-y := ecc.o
|
||||||
|
ecdh_generic-y += ecdh.o
|
||||||
|
ecdh_generic-y += ecdh_helper.o
|
||||||
|
obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
|
||||||
|
|
||||||
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
|
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
|
||||||
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
|
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
|
||||||
|
@ -61,6 +68,7 @@ obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
|
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
|
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
|
||||||
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
|
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
|
||||||
|
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
|
||||||
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
|
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
|
||||||
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
|
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
|
||||||
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
|
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
|
||||||
|
|
|
@ -71,7 +71,8 @@ int ablk_encrypt(struct ablkcipher_request *req)
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||||
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||||
|
|
||||||
if (!may_use_simd()) {
|
if (!may_use_simd() ||
|
||||||
|
(in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
|
||||||
struct ablkcipher_request *cryptd_req =
|
struct ablkcipher_request *cryptd_req =
|
||||||
ablkcipher_request_ctx(req);
|
ablkcipher_request_ctx(req);
|
||||||
|
|
||||||
|
@ -90,7 +91,8 @@ int ablk_decrypt(struct ablkcipher_request *req)
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||||
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||||
|
|
||||||
if (!may_use_simd()) {
|
if (!may_use_simd() ||
|
||||||
|
(in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
|
||||||
struct ablkcipher_request *cryptd_req =
|
struct ablkcipher_request *cryptd_req =
|
||||||
ablkcipher_request_ctx(req);
|
ablkcipher_request_ctx(req);
|
||||||
|
|
||||||
|
|
|
@ -14,11 +14,8 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <crypto/internal/skcipher.h>
|
#include <crypto/internal/skcipher.h>
|
||||||
#include <linux/cpumask.h>
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/rtnetlink.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/cryptouser.h>
|
#include <linux/cryptouser.h>
|
||||||
|
@ -349,16 +346,6 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
||||||
return alg->cra_ctxsize;
|
return alg->cra_ctxsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
return crypto_ablkcipher_encrypt(&req->creq);
|
|
||||||
}
|
|
||||||
|
|
||||||
int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
return crypto_ablkcipher_decrypt(&req->creq);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||||
u32 mask)
|
u32 mask)
|
||||||
{
|
{
|
||||||
|
@ -371,10 +358,6 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||||
crt->setkey = setkey;
|
crt->setkey = setkey;
|
||||||
crt->encrypt = alg->encrypt;
|
crt->encrypt = alg->encrypt;
|
||||||
crt->decrypt = alg->decrypt;
|
crt->decrypt = alg->decrypt;
|
||||||
if (!alg->ivsize) {
|
|
||||||
crt->givencrypt = skcipher_null_givencrypt;
|
|
||||||
crt->givdecrypt = skcipher_null_givdecrypt;
|
|
||||||
}
|
|
||||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||||
crt->ivsize = alg->ivsize;
|
crt->ivsize = alg->ivsize;
|
||||||
|
|
||||||
|
@ -436,11 +419,6 @@ const struct crypto_type crypto_ablkcipher_type = {
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
|
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
|
||||||
|
|
||||||
static int no_givdecrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
return -ENOSYS;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
|
static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||||
u32 mask)
|
u32 mask)
|
||||||
{
|
{
|
||||||
|
@ -454,8 +432,6 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||||
alg->setkey : setkey;
|
alg->setkey : setkey;
|
||||||
crt->encrypt = alg->encrypt;
|
crt->encrypt = alg->encrypt;
|
||||||
crt->decrypt = alg->decrypt;
|
crt->decrypt = alg->decrypt;
|
||||||
crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
|
|
||||||
crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
|
|
||||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||||
crt->ivsize = alg->ivsize;
|
crt->ivsize = alg->ivsize;
|
||||||
|
|
||||||
|
@ -516,202 +492,3 @@ const struct crypto_type crypto_givcipher_type = {
|
||||||
.report = crypto_givcipher_report,
|
.report = crypto_givcipher_report,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(crypto_givcipher_type);
|
EXPORT_SYMBOL_GPL(crypto_givcipher_type);
|
||||||
|
|
||||||
const char *crypto_default_geniv(const struct crypto_alg *alg)
|
|
||||||
{
|
|
||||||
if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
|
|
||||||
alg->cra_ablkcipher.ivsize) !=
|
|
||||||
alg->cra_blocksize)
|
|
||||||
return "chainiv";
|
|
||||||
|
|
||||||
return "eseqiv";
|
|
||||||
}
|
|
||||||
|
|
||||||
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct rtattr *tb[3];
|
|
||||||
struct {
|
|
||||||
struct rtattr attr;
|
|
||||||
struct crypto_attr_type data;
|
|
||||||
} ptype;
|
|
||||||
struct {
|
|
||||||
struct rtattr attr;
|
|
||||||
struct crypto_attr_alg data;
|
|
||||||
} palg;
|
|
||||||
struct crypto_template *tmpl;
|
|
||||||
struct crypto_instance *inst;
|
|
||||||
struct crypto_alg *larval;
|
|
||||||
const char *geniv;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
larval = crypto_larval_lookup(alg->cra_driver_name,
|
|
||||||
(type & ~CRYPTO_ALG_TYPE_MASK) |
|
|
||||||
CRYPTO_ALG_TYPE_GIVCIPHER,
|
|
||||||
mask | CRYPTO_ALG_TYPE_MASK);
|
|
||||||
err = PTR_ERR(larval);
|
|
||||||
if (IS_ERR(larval))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
err = -EAGAIN;
|
|
||||||
if (!crypto_is_larval(larval))
|
|
||||||
goto drop_larval;
|
|
||||||
|
|
||||||
ptype.attr.rta_len = sizeof(ptype);
|
|
||||||
ptype.attr.rta_type = CRYPTOA_TYPE;
|
|
||||||
ptype.data.type = type | CRYPTO_ALG_GENIV;
|
|
||||||
/* GENIV tells the template that we're making a default geniv. */
|
|
||||||
ptype.data.mask = mask | CRYPTO_ALG_GENIV;
|
|
||||||
tb[0] = &ptype.attr;
|
|
||||||
|
|
||||||
palg.attr.rta_len = sizeof(palg);
|
|
||||||
palg.attr.rta_type = CRYPTOA_ALG;
|
|
||||||
/* Must use the exact name to locate ourselves. */
|
|
||||||
memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
|
|
||||||
tb[1] = &palg.attr;
|
|
||||||
|
|
||||||
tb[2] = NULL;
|
|
||||||
|
|
||||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_BLKCIPHER)
|
|
||||||
geniv = alg->cra_blkcipher.geniv;
|
|
||||||
else
|
|
||||||
geniv = alg->cra_ablkcipher.geniv;
|
|
||||||
|
|
||||||
if (!geniv)
|
|
||||||
geniv = crypto_default_geniv(alg);
|
|
||||||
|
|
||||||
tmpl = crypto_lookup_template(geniv);
|
|
||||||
err = -ENOENT;
|
|
||||||
if (!tmpl)
|
|
||||||
goto kill_larval;
|
|
||||||
|
|
||||||
if (tmpl->create) {
|
|
||||||
err = tmpl->create(tmpl, tb);
|
|
||||||
if (err)
|
|
||||||
goto put_tmpl;
|
|
||||||
goto ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
inst = tmpl->alloc(tb);
|
|
||||||
err = PTR_ERR(inst);
|
|
||||||
if (IS_ERR(inst))
|
|
||||||
goto put_tmpl;
|
|
||||||
|
|
||||||
err = crypto_register_instance(tmpl, inst);
|
|
||||||
if (err) {
|
|
||||||
tmpl->free(inst);
|
|
||||||
goto put_tmpl;
|
|
||||||
}
|
|
||||||
|
|
||||||
ok:
|
|
||||||
/* Redo the lookup to use the instance we just registered. */
|
|
||||||
err = -EAGAIN;
|
|
||||||
|
|
||||||
put_tmpl:
|
|
||||||
crypto_tmpl_put(tmpl);
|
|
||||||
kill_larval:
|
|
||||||
crypto_larval_kill(larval);
|
|
||||||
drop_larval:
|
|
||||||
crypto_mod_put(larval);
|
|
||||||
out:
|
|
||||||
crypto_mod_put(alg);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
|
|
||||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
|
||||||
if (IS_ERR(alg))
|
|
||||||
return alg;
|
|
||||||
|
|
||||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_GIVCIPHER)
|
|
||||||
return alg;
|
|
||||||
|
|
||||||
if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
|
|
||||||
alg->cra_ablkcipher.ivsize))
|
|
||||||
return alg;
|
|
||||||
|
|
||||||
crypto_mod_put(alg);
|
|
||||||
alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
|
|
||||||
mask & ~CRYPTO_ALG_TESTED);
|
|
||||||
if (IS_ERR(alg))
|
|
||||||
return alg;
|
|
||||||
|
|
||||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_GIVCIPHER) {
|
|
||||||
if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
|
|
||||||
crypto_mod_put(alg);
|
|
||||||
alg = ERR_PTR(-ENOENT);
|
|
||||||
}
|
|
||||||
return alg;
|
|
||||||
}
|
|
||||||
|
|
||||||
BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
|
|
||||||
alg->cra_ablkcipher.ivsize));
|
|
||||||
|
|
||||||
return ERR_PTR(crypto_givcipher_default(alg, type, mask));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
|
|
||||||
|
|
||||||
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
|
|
||||||
u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
type = crypto_skcipher_type(type);
|
|
||||||
mask = crypto_skcipher_mask(mask);
|
|
||||||
|
|
||||||
alg = crypto_lookup_skcipher(name, type, mask);
|
|
||||||
if (IS_ERR(alg))
|
|
||||||
return PTR_ERR(alg);
|
|
||||||
|
|
||||||
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
|
||||||
crypto_mod_put(alg);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
|
|
||||||
|
|
||||||
struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
|
|
||||||
u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct crypto_tfm *tfm;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
type = crypto_skcipher_type(type);
|
|
||||||
mask = crypto_skcipher_mask(mask);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
|
|
||||||
alg = crypto_lookup_skcipher(alg_name, type, mask);
|
|
||||||
if (IS_ERR(alg)) {
|
|
||||||
err = PTR_ERR(alg);
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
tfm = __crypto_alloc_tfm(alg, type, mask);
|
|
||||||
if (!IS_ERR(tfm))
|
|
||||||
return __crypto_ablkcipher_cast(tfm);
|
|
||||||
|
|
||||||
crypto_mod_put(alg);
|
|
||||||
err = PTR_ERR(tfm);
|
|
||||||
|
|
||||||
err:
|
|
||||||
if (err != -EAGAIN)
|
|
||||||
break;
|
|
||||||
if (fatal_signal_pending(current)) {
|
|
||||||
err = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
|
|
||||||
|
|
|
@ -294,9 +294,9 @@ int aead_init_geniv(struct crypto_aead *aead)
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ctx->null = crypto_get_default_null_skcipher();
|
ctx->sknull = crypto_get_default_null_skcipher2();
|
||||||
err = PTR_ERR(ctx->null);
|
err = PTR_ERR(ctx->sknull);
|
||||||
if (IS_ERR(ctx->null))
|
if (IS_ERR(ctx->sknull))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
child = crypto_spawn_aead(aead_instance_ctx(inst));
|
child = crypto_spawn_aead(aead_instance_ctx(inst));
|
||||||
|
@ -314,7 +314,7 @@ out:
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
drop_null:
|
drop_null:
|
||||||
crypto_put_default_null_skcipher();
|
crypto_put_default_null_skcipher2();
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(aead_init_geniv);
|
EXPORT_SYMBOL_GPL(aead_init_geniv);
|
||||||
|
@ -324,7 +324,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
|
||||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_aead(ctx->child);
|
crypto_free_aead(ctx->child);
|
||||||
crypto_put_default_null_skcipher();
|
crypto_put_default_null_skcipher2();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(aead_exit_geniv);
|
EXPORT_SYMBOL_GPL(aead_exit_geniv);
|
||||||
|
|
||||||
|
@ -346,9 +346,13 @@ static int aead_prepare_alg(struct aead_alg *alg)
|
||||||
{
|
{
|
||||||
struct crypto_alg *base = &alg->base;
|
struct crypto_alg *base = &alg->base;
|
||||||
|
|
||||||
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
|
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
|
||||||
|
PAGE_SIZE / 8)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!alg->chunksize)
|
||||||
|
alg->chunksize = base->cra_blocksize;
|
||||||
|
|
||||||
base->cra_type = &crypto_aead_type;
|
base->cra_type = &crypto_aead_type;
|
||||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||||
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
|
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
|
||||||
|
|
|
@ -461,10 +461,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
||||||
|
|
||||||
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
|
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
|
||||||
{
|
{
|
||||||
if (alg->cra_type == &crypto_ahash_type)
|
if (alg->cra_type != &crypto_ahash_type)
|
||||||
return alg->cra_ctxsize;
|
return sizeof(struct crypto_shash *);
|
||||||
|
|
||||||
return sizeof(struct crypto_shash *);
|
return crypto_alg_extsize(alg);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NET
|
#ifdef CONFIG_NET
|
||||||
|
|
|
@ -811,6 +811,21 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_attr_u32);
|
EXPORT_SYMBOL_GPL(crypto_attr_u32);
|
||||||
|
|
||||||
|
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
|
||||||
|
struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
|
||||||
|
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
|
return -ENAMETOOLONG;
|
||||||
|
|
||||||
|
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
|
||||||
|
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
|
return -ENAMETOOLONG;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_inst_setname);
|
||||||
|
|
||||||
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
|
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
|
||||||
unsigned int head)
|
unsigned int head)
|
||||||
{
|
{
|
||||||
|
@ -825,13 +840,8 @@ void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
|
||||||
|
|
||||||
inst = (void *)(p + head);
|
inst = (void *)(p + head);
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = crypto_inst_setname(inst, name, alg);
|
||||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
|
if (err)
|
||||||
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
|
||||||
goto err_free_inst;
|
|
||||||
|
|
||||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
|
|
||||||
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
|
||||||
goto err_free_inst;
|
goto err_free_inst;
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
|
|
116
crypto/authenc.c
116
crypto/authenc.c
|
@ -32,8 +32,8 @@ struct authenc_instance_ctx {
|
||||||
|
|
||||||
struct crypto_authenc_ctx {
|
struct crypto_authenc_ctx {
|
||||||
struct crypto_ahash *auth;
|
struct crypto_ahash *auth;
|
||||||
struct crypto_ablkcipher *enc;
|
struct crypto_skcipher *enc;
|
||||||
struct crypto_blkcipher *null;
|
struct crypto_skcipher *null;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct authenc_request_ctx {
|
struct authenc_request_ctx {
|
||||||
|
@ -83,7 +83,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
||||||
{
|
{
|
||||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||||
struct crypto_ahash *auth = ctx->auth;
|
struct crypto_ahash *auth = ctx->auth;
|
||||||
struct crypto_ablkcipher *enc = ctx->enc;
|
struct crypto_skcipher *enc = ctx->enc;
|
||||||
struct crypto_authenc_keys keys;
|
struct crypto_authenc_keys keys;
|
||||||
int err = -EINVAL;
|
int err = -EINVAL;
|
||||||
|
|
||||||
|
@ -100,11 +100,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
|
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
|
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
|
||||||
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
|
crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -184,12 +184,15 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||||
struct blkcipher_desc desc = {
|
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||||
.tfm = ctx->null,
|
|
||||||
};
|
|
||||||
|
|
||||||
return crypto_blkcipher_encrypt(&desc, req->dst, req->src,
|
skcipher_request_set_tfm(skreq, ctx->null);
|
||||||
req->assoclen);
|
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||||
|
NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
return crypto_skcipher_encrypt(skreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_authenc_encrypt(struct aead_request *req)
|
static int crypto_authenc_encrypt(struct aead_request *req)
|
||||||
|
@ -199,14 +202,13 @@ static int crypto_authenc_encrypt(struct aead_request *req)
|
||||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||||
struct crypto_ablkcipher *enc = ctx->enc;
|
struct crypto_skcipher *enc = ctx->enc;
|
||||||
unsigned int cryptlen = req->cryptlen;
|
unsigned int cryptlen = req->cryptlen;
|
||||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
|
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
|
||||||
ictx->reqoff);
|
ictx->reqoff);
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src, *dst;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
sg_init_table(areq_ctx->src, 2);
|
|
||||||
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
|
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
|
||||||
dst = src;
|
dst = src;
|
||||||
|
|
||||||
|
@ -215,16 +217,15 @@ static int crypto_authenc_encrypt(struct aead_request *req)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
sg_init_table(areq_ctx->dst, 2);
|
|
||||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
||||||
}
|
}
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(abreq, enc);
|
skcipher_request_set_tfm(skreq, enc);
|
||||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||||
crypto_authenc_encrypt_done, req);
|
crypto_authenc_encrypt_done, req);
|
||||||
ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
|
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
|
||||||
|
|
||||||
err = crypto_ablkcipher_encrypt(abreq);
|
err = crypto_skcipher_encrypt(skreq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -240,8 +241,8 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
|
||||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||||
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
|
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
|
||||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
|
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
|
||||||
ictx->reqoff);
|
ictx->reqoff);
|
||||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||||
u8 *ihash = ahreq->result + authsize;
|
u8 *ihash = ahreq->result + authsize;
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src, *dst;
|
||||||
|
@ -251,22 +252,19 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
|
||||||
if (crypto_memneq(ihash, ahreq->result, authsize))
|
if (crypto_memneq(ihash, ahreq->result, authsize))
|
||||||
return -EBADMSG;
|
return -EBADMSG;
|
||||||
|
|
||||||
sg_init_table(areq_ctx->src, 2);
|
|
||||||
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
|
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
|
||||||
dst = src;
|
dst = src;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst)
|
||||||
sg_init_table(areq_ctx->dst, 2);
|
|
||||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
||||||
}
|
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
skcipher_request_set_tfm(skreq, ctx->enc);
|
||||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||||
req->base.complete, req->base.data);
|
req->base.complete, req->base.data);
|
||||||
ablkcipher_request_set_crypt(abreq, src, dst,
|
skcipher_request_set_crypt(skreq, src, dst,
|
||||||
req->cryptlen - authsize, req->iv);
|
req->cryptlen - authsize, req->iv);
|
||||||
|
|
||||||
return crypto_ablkcipher_decrypt(abreq);
|
return crypto_skcipher_decrypt(skreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
|
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
|
||||||
|
@ -318,20 +316,20 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||||
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct crypto_ahash *auth;
|
struct crypto_ahash *auth;
|
||||||
struct crypto_ablkcipher *enc;
|
struct crypto_skcipher *enc;
|
||||||
struct crypto_blkcipher *null;
|
struct crypto_skcipher *null;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
auth = crypto_spawn_ahash(&ictx->auth);
|
auth = crypto_spawn_ahash(&ictx->auth);
|
||||||
if (IS_ERR(auth))
|
if (IS_ERR(auth))
|
||||||
return PTR_ERR(auth);
|
return PTR_ERR(auth);
|
||||||
|
|
||||||
enc = crypto_spawn_skcipher(&ictx->enc);
|
enc = crypto_spawn_skcipher2(&ictx->enc);
|
||||||
err = PTR_ERR(enc);
|
err = PTR_ERR(enc);
|
||||||
if (IS_ERR(enc))
|
if (IS_ERR(enc))
|
||||||
goto err_free_ahash;
|
goto err_free_ahash;
|
||||||
|
|
||||||
null = crypto_get_default_null_skcipher();
|
null = crypto_get_default_null_skcipher2();
|
||||||
err = PTR_ERR(null);
|
err = PTR_ERR(null);
|
||||||
if (IS_ERR(null))
|
if (IS_ERR(null))
|
||||||
goto err_free_skcipher;
|
goto err_free_skcipher;
|
||||||
|
@ -347,13 +345,13 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||||
max_t(unsigned int,
|
max_t(unsigned int,
|
||||||
crypto_ahash_reqsize(auth) +
|
crypto_ahash_reqsize(auth) +
|
||||||
sizeof(struct ahash_request),
|
sizeof(struct ahash_request),
|
||||||
sizeof(struct ablkcipher_request) +
|
sizeof(struct skcipher_request) +
|
||||||
crypto_ablkcipher_reqsize(enc)));
|
crypto_skcipher_reqsize(enc)));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_skcipher:
|
err_free_skcipher:
|
||||||
crypto_free_ablkcipher(enc);
|
crypto_free_skcipher(enc);
|
||||||
err_free_ahash:
|
err_free_ahash:
|
||||||
crypto_free_ahash(auth);
|
crypto_free_ahash(auth);
|
||||||
return err;
|
return err;
|
||||||
|
@ -364,8 +362,8 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
|
||||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_ahash(ctx->auth);
|
crypto_free_ahash(ctx->auth);
|
||||||
crypto_free_ablkcipher(ctx->enc);
|
crypto_free_skcipher(ctx->enc);
|
||||||
crypto_put_default_null_skcipher();
|
crypto_put_default_null_skcipher2();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_authenc_free(struct aead_instance *inst)
|
static void crypto_authenc_free(struct aead_instance *inst)
|
||||||
|
@ -384,7 +382,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||||
struct aead_instance *inst;
|
struct aead_instance *inst;
|
||||||
struct hash_alg_common *auth;
|
struct hash_alg_common *auth;
|
||||||
struct crypto_alg *auth_base;
|
struct crypto_alg *auth_base;
|
||||||
struct crypto_alg *enc;
|
struct skcipher_alg *enc;
|
||||||
struct authenc_instance_ctx *ctx;
|
struct authenc_instance_ctx *ctx;
|
||||||
const char *enc_name;
|
const char *enc_name;
|
||||||
int err;
|
int err;
|
||||||
|
@ -397,7 +395,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
||||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
CRYPTO_ALG_TYPE_AHASH_MASK |
|
||||||
|
crypto_requires_sync(algt->type, algt->mask));
|
||||||
if (IS_ERR(auth))
|
if (IS_ERR(auth))
|
||||||
return PTR_ERR(auth);
|
return PTR_ERR(auth);
|
||||||
|
|
||||||
|
@ -421,37 +420,40 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
|
||||||
goto err_free_inst;
|
goto err_free_inst;
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
|
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
|
||||||
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
|
err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
|
||||||
crypto_requires_sync(algt->type,
|
crypto_requires_sync(algt->type,
|
||||||
algt->mask));
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_drop_auth;
|
goto err_drop_auth;
|
||||||
|
|
||||||
enc = crypto_skcipher_spawn_alg(&ctx->enc);
|
enc = crypto_spawn_skcipher_alg(&ctx->enc);
|
||||||
|
|
||||||
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
|
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
|
||||||
auth_base->cra_alignmask + 1);
|
auth_base->cra_alignmask + 1);
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
|
"authenc(%s,%s)", auth_base->cra_name,
|
||||||
|
enc->base.cra_name) >=
|
||||||
CRYPTO_MAX_ALG_NAME)
|
CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_enc;
|
goto err_drop_enc;
|
||||||
|
|
||||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"authenc(%s,%s)", auth_base->cra_driver_name,
|
"authenc(%s,%s)", auth_base->cra_driver_name,
|
||||||
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_enc;
|
goto err_drop_enc;
|
||||||
|
|
||||||
inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
|
inst->alg.base.cra_flags = (auth_base->cra_flags |
|
||||||
inst->alg.base.cra_priority = enc->cra_priority * 10 +
|
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||||
|
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
|
||||||
auth_base->cra_priority;
|
auth_base->cra_priority;
|
||||||
inst->alg.base.cra_blocksize = enc->cra_blocksize;
|
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
|
||||||
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
|
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
|
||||||
enc->cra_alignmask;
|
enc->base.cra_alignmask;
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
|
||||||
|
|
||||||
inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
|
inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
|
||||||
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
|
||||||
inst->alg.maxauthsize = auth->digestsize;
|
inst->alg.maxauthsize = auth->digestsize;
|
||||||
|
|
||||||
inst->alg.init = crypto_authenc_init_tfm;
|
inst->alg.init = crypto_authenc_init_tfm;
|
||||||
|
|
|
@ -35,8 +35,8 @@ struct authenc_esn_instance_ctx {
|
||||||
struct crypto_authenc_esn_ctx {
|
struct crypto_authenc_esn_ctx {
|
||||||
unsigned int reqoff;
|
unsigned int reqoff;
|
||||||
struct crypto_ahash *auth;
|
struct crypto_ahash *auth;
|
||||||
struct crypto_ablkcipher *enc;
|
struct crypto_skcipher *enc;
|
||||||
struct crypto_blkcipher *null;
|
struct crypto_skcipher *null;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct authenc_esn_request_ctx {
|
struct authenc_esn_request_ctx {
|
||||||
|
@ -65,7 +65,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
|
||||||
{
|
{
|
||||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||||
struct crypto_ahash *auth = ctx->auth;
|
struct crypto_ahash *auth = ctx->auth;
|
||||||
struct crypto_ablkcipher *enc = ctx->enc;
|
struct crypto_skcipher *enc = ctx->enc;
|
||||||
struct crypto_authenc_keys keys;
|
struct crypto_authenc_keys keys;
|
||||||
int err = -EINVAL;
|
int err = -EINVAL;
|
||||||
|
|
||||||
|
@ -82,11 +82,11 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
|
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
|
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
|
||||||
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
|
crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -182,11 +182,14 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
|
||||||
{
|
{
|
||||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||||
struct blkcipher_desc desc = {
|
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||||
.tfm = ctx->null,
|
|
||||||
};
|
|
||||||
|
|
||||||
return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len);
|
skcipher_request_set_tfm(skreq, ctx->null);
|
||||||
|
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||||
|
NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
|
||||||
|
|
||||||
|
return crypto_skcipher_encrypt(skreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
||||||
|
@ -194,9 +197,9 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
||||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
|
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
|
||||||
+ ctx->reqoff);
|
ctx->reqoff);
|
||||||
struct crypto_ablkcipher *enc = ctx->enc;
|
struct crypto_skcipher *enc = ctx->enc;
|
||||||
unsigned int assoclen = req->assoclen;
|
unsigned int assoclen = req->assoclen;
|
||||||
unsigned int cryptlen = req->cryptlen;
|
unsigned int cryptlen = req->cryptlen;
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src, *dst;
|
||||||
|
@ -215,12 +218,12 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
||||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
|
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
|
||||||
}
|
}
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(abreq, enc);
|
skcipher_request_set_tfm(skreq, enc);
|
||||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||||
crypto_authenc_esn_encrypt_done, req);
|
crypto_authenc_esn_encrypt_done, req);
|
||||||
ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
|
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
|
||||||
|
|
||||||
err = crypto_ablkcipher_encrypt(abreq);
|
err = crypto_skcipher_encrypt(skreq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -234,8 +237,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
||||||
unsigned int authsize = crypto_aead_authsize(authenc_esn);
|
unsigned int authsize = crypto_aead_authsize(authenc_esn);
|
||||||
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
|
||||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||||
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
|
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
|
||||||
+ ctx->reqoff);
|
ctx->reqoff);
|
||||||
struct crypto_ahash *auth = ctx->auth;
|
struct crypto_ahash *auth = ctx->auth;
|
||||||
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
|
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
|
||||||
crypto_ahash_alignmask(auth) + 1);
|
crypto_ahash_alignmask(auth) + 1);
|
||||||
|
@ -256,12 +259,12 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
||||||
sg_init_table(areq_ctx->dst, 2);
|
sg_init_table(areq_ctx->dst, 2);
|
||||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
|
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
skcipher_request_set_tfm(skreq, ctx->enc);
|
||||||
ablkcipher_request_set_callback(abreq, flags,
|
skcipher_request_set_callback(skreq, flags,
|
||||||
req->base.complete, req->base.data);
|
req->base.complete, req->base.data);
|
||||||
ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv);
|
skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
|
||||||
|
|
||||||
return crypto_ablkcipher_decrypt(abreq);
|
return crypto_skcipher_decrypt(skreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
|
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
|
||||||
|
@ -331,20 +334,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||||
struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct crypto_ahash *auth;
|
struct crypto_ahash *auth;
|
||||||
struct crypto_ablkcipher *enc;
|
struct crypto_skcipher *enc;
|
||||||
struct crypto_blkcipher *null;
|
struct crypto_skcipher *null;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
auth = crypto_spawn_ahash(&ictx->auth);
|
auth = crypto_spawn_ahash(&ictx->auth);
|
||||||
if (IS_ERR(auth))
|
if (IS_ERR(auth))
|
||||||
return PTR_ERR(auth);
|
return PTR_ERR(auth);
|
||||||
|
|
||||||
enc = crypto_spawn_skcipher(&ictx->enc);
|
enc = crypto_spawn_skcipher2(&ictx->enc);
|
||||||
err = PTR_ERR(enc);
|
err = PTR_ERR(enc);
|
||||||
if (IS_ERR(enc))
|
if (IS_ERR(enc))
|
||||||
goto err_free_ahash;
|
goto err_free_ahash;
|
||||||
|
|
||||||
null = crypto_get_default_null_skcipher();
|
null = crypto_get_default_null_skcipher2();
|
||||||
err = PTR_ERR(null);
|
err = PTR_ERR(null);
|
||||||
if (IS_ERR(null))
|
if (IS_ERR(null))
|
||||||
goto err_free_skcipher;
|
goto err_free_skcipher;
|
||||||
|
@ -361,15 +364,15 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||||
sizeof(struct authenc_esn_request_ctx) +
|
sizeof(struct authenc_esn_request_ctx) +
|
||||||
ctx->reqoff +
|
ctx->reqoff +
|
||||||
max_t(unsigned int,
|
max_t(unsigned int,
|
||||||
crypto_ahash_reqsize(auth) +
|
crypto_ahash_reqsize(auth) +
|
||||||
sizeof(struct ahash_request),
|
sizeof(struct ahash_request),
|
||||||
sizeof(struct skcipher_givcrypt_request) +
|
sizeof(struct skcipher_request) +
|
||||||
crypto_ablkcipher_reqsize(enc)));
|
crypto_skcipher_reqsize(enc)));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_skcipher:
|
err_free_skcipher:
|
||||||
crypto_free_ablkcipher(enc);
|
crypto_free_skcipher(enc);
|
||||||
err_free_ahash:
|
err_free_ahash:
|
||||||
crypto_free_ahash(auth);
|
crypto_free_ahash(auth);
|
||||||
return err;
|
return err;
|
||||||
|
@ -380,8 +383,8 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
|
||||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_ahash(ctx->auth);
|
crypto_free_ahash(ctx->auth);
|
||||||
crypto_free_ablkcipher(ctx->enc);
|
crypto_free_skcipher(ctx->enc);
|
||||||
crypto_put_default_null_skcipher();
|
crypto_put_default_null_skcipher2();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_authenc_esn_free(struct aead_instance *inst)
|
static void crypto_authenc_esn_free(struct aead_instance *inst)
|
||||||
|
@ -400,7 +403,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||||
struct aead_instance *inst;
|
struct aead_instance *inst;
|
||||||
struct hash_alg_common *auth;
|
struct hash_alg_common *auth;
|
||||||
struct crypto_alg *auth_base;
|
struct crypto_alg *auth_base;
|
||||||
struct crypto_alg *enc;
|
struct skcipher_alg *enc;
|
||||||
struct authenc_esn_instance_ctx *ctx;
|
struct authenc_esn_instance_ctx *ctx;
|
||||||
const char *enc_name;
|
const char *enc_name;
|
||||||
int err;
|
int err;
|
||||||
|
@ -413,7 +416,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
||||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
CRYPTO_ALG_TYPE_AHASH_MASK |
|
||||||
|
crypto_requires_sync(algt->type, algt->mask));
|
||||||
if (IS_ERR(auth))
|
if (IS_ERR(auth))
|
||||||
return PTR_ERR(auth);
|
return PTR_ERR(auth);
|
||||||
|
|
||||||
|
@ -437,34 +441,36 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||||
goto err_free_inst;
|
goto err_free_inst;
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
|
crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
|
||||||
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
|
err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
|
||||||
crypto_requires_sync(algt->type,
|
crypto_requires_sync(algt->type,
|
||||||
algt->mask));
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_drop_auth;
|
goto err_drop_auth;
|
||||||
|
|
||||||
enc = crypto_skcipher_spawn_alg(&ctx->enc);
|
enc = crypto_spawn_skcipher_alg(&ctx->enc);
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"authencesn(%s,%s)", auth_base->cra_name,
|
"authencesn(%s,%s)", auth_base->cra_name,
|
||||||
enc->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_enc;
|
goto err_drop_enc;
|
||||||
|
|
||||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"authencesn(%s,%s)", auth_base->cra_driver_name,
|
"authencesn(%s,%s)", auth_base->cra_driver_name,
|
||||||
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_enc;
|
goto err_drop_enc;
|
||||||
|
|
||||||
inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
|
inst->alg.base.cra_flags = (auth_base->cra_flags |
|
||||||
inst->alg.base.cra_priority = enc->cra_priority * 10 +
|
enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||||
|
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
|
||||||
auth_base->cra_priority;
|
auth_base->cra_priority;
|
||||||
inst->alg.base.cra_blocksize = enc->cra_blocksize;
|
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
|
||||||
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
|
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
|
||||||
enc->cra_alignmask;
|
enc->base.cra_alignmask;
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
|
||||||
|
|
||||||
inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
|
inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
|
||||||
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
|
||||||
inst->alg.maxauthsize = auth->digestsize;
|
inst->alg.maxauthsize = auth->digestsize;
|
||||||
|
|
||||||
inst->alg.init = crypto_authenc_esn_init_tfm;
|
inst->alg.init = crypto_authenc_esn_init_tfm;
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/scatterlist.h>
|
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
@ -466,10 +465,6 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
|
||||||
crt->setkey = async_setkey;
|
crt->setkey = async_setkey;
|
||||||
crt->encrypt = async_encrypt;
|
crt->encrypt = async_encrypt;
|
||||||
crt->decrypt = async_decrypt;
|
crt->decrypt = async_decrypt;
|
||||||
if (!alg->ivsize) {
|
|
||||||
crt->givencrypt = skcipher_null_givencrypt;
|
|
||||||
crt->givdecrypt = skcipher_null_givdecrypt;
|
|
||||||
}
|
|
||||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||||
crt->ivsize = alg->ivsize;
|
crt->ivsize = alg->ivsize;
|
||||||
|
|
||||||
|
@ -560,185 +555,5 @@ const struct crypto_type crypto_blkcipher_type = {
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
|
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
|
||||||
|
|
||||||
static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
|
|
||||||
const char *name, u32 type, u32 mask)
|
|
||||||
{
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
type = crypto_skcipher_type(type);
|
|
||||||
mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
|
|
||||||
|
|
||||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
|
||||||
if (IS_ERR(alg))
|
|
||||||
return PTR_ERR(alg);
|
|
||||||
|
|
||||||
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
|
||||||
crypto_mod_put(alg);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
|
|
||||||
struct rtattr **tb, u32 type,
|
|
||||||
u32 mask)
|
|
||||||
{
|
|
||||||
struct {
|
|
||||||
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
|
|
||||||
unsigned int keylen);
|
|
||||||
int (*encrypt)(struct ablkcipher_request *req);
|
|
||||||
int (*decrypt)(struct ablkcipher_request *req);
|
|
||||||
|
|
||||||
unsigned int min_keysize;
|
|
||||||
unsigned int max_keysize;
|
|
||||||
unsigned int ivsize;
|
|
||||||
|
|
||||||
const char *geniv;
|
|
||||||
} balg;
|
|
||||||
const char *name;
|
|
||||||
struct crypto_skcipher_spawn *spawn;
|
|
||||||
struct crypto_attr_type *algt;
|
|
||||||
struct crypto_instance *inst;
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
algt = crypto_get_attr_type(tb);
|
|
||||||
if (IS_ERR(algt))
|
|
||||||
return ERR_CAST(algt);
|
|
||||||
|
|
||||||
if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
|
|
||||||
algt->mask)
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
|
|
||||||
name = crypto_attr_alg_name(tb[1]);
|
|
||||||
if (IS_ERR(name))
|
|
||||||
return ERR_CAST(name);
|
|
||||||
|
|
||||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
|
||||||
if (!inst)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
spawn = crypto_instance_ctx(inst);
|
|
||||||
|
|
||||||
/* Ignore async algorithms if necessary. */
|
|
||||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(spawn, inst);
|
|
||||||
err = crypto_grab_nivcipher(spawn, name, type, mask);
|
|
||||||
if (err)
|
|
||||||
goto err_free_inst;
|
|
||||||
|
|
||||||
alg = crypto_skcipher_spawn_alg(spawn);
|
|
||||||
|
|
||||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
|
||||||
CRYPTO_ALG_TYPE_BLKCIPHER) {
|
|
||||||
balg.ivsize = alg->cra_blkcipher.ivsize;
|
|
||||||
balg.min_keysize = alg->cra_blkcipher.min_keysize;
|
|
||||||
balg.max_keysize = alg->cra_blkcipher.max_keysize;
|
|
||||||
|
|
||||||
balg.setkey = async_setkey;
|
|
||||||
balg.encrypt = async_encrypt;
|
|
||||||
balg.decrypt = async_decrypt;
|
|
||||||
|
|
||||||
balg.geniv = alg->cra_blkcipher.geniv;
|
|
||||||
} else {
|
|
||||||
balg.ivsize = alg->cra_ablkcipher.ivsize;
|
|
||||||
balg.min_keysize = alg->cra_ablkcipher.min_keysize;
|
|
||||||
balg.max_keysize = alg->cra_ablkcipher.max_keysize;
|
|
||||||
|
|
||||||
balg.setkey = alg->cra_ablkcipher.setkey;
|
|
||||||
balg.encrypt = alg->cra_ablkcipher.encrypt;
|
|
||||||
balg.decrypt = alg->cra_ablkcipher.decrypt;
|
|
||||||
|
|
||||||
balg.geniv = alg->cra_ablkcipher.geniv;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = -EINVAL;
|
|
||||||
if (!balg.ivsize)
|
|
||||||
goto err_drop_alg;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is only true if we're constructing an algorithm with its
|
|
||||||
* default IV generator. For the default generator we elide the
|
|
||||||
* template name and double-check the IV generator.
|
|
||||||
*/
|
|
||||||
if (algt->mask & CRYPTO_ALG_GENIV) {
|
|
||||||
if (!balg.geniv)
|
|
||||||
balg.geniv = crypto_default_geniv(alg);
|
|
||||||
err = -EAGAIN;
|
|
||||||
if (strcmp(tmpl->name, balg.geniv))
|
|
||||||
goto err_drop_alg;
|
|
||||||
|
|
||||||
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
|
||||||
memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
|
|
||||||
CRYPTO_MAX_ALG_NAME);
|
|
||||||
} else {
|
|
||||||
err = -ENAMETOOLONG;
|
|
||||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
|
||||||
"%s(%s)", tmpl->name, alg->cra_name) >=
|
|
||||||
CRYPTO_MAX_ALG_NAME)
|
|
||||||
goto err_drop_alg;
|
|
||||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
|
||||||
"%s(%s)", tmpl->name, alg->cra_driver_name) >=
|
|
||||||
CRYPTO_MAX_ALG_NAME)
|
|
||||||
goto err_drop_alg;
|
|
||||||
}
|
|
||||||
|
|
||||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
|
|
||||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
|
||||||
inst->alg.cra_priority = alg->cra_priority;
|
|
||||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
|
||||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
|
||||||
inst->alg.cra_type = &crypto_givcipher_type;
|
|
||||||
|
|
||||||
inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
|
|
||||||
inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
|
|
||||||
inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
|
|
||||||
inst->alg.cra_ablkcipher.geniv = balg.geniv;
|
|
||||||
|
|
||||||
inst->alg.cra_ablkcipher.setkey = balg.setkey;
|
|
||||||
inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
|
|
||||||
inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
|
|
||||||
|
|
||||||
out:
|
|
||||||
return inst;
|
|
||||||
|
|
||||||
err_drop_alg:
|
|
||||||
crypto_drop_skcipher(spawn);
|
|
||||||
err_free_inst:
|
|
||||||
kfree(inst);
|
|
||||||
inst = ERR_PTR(err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
|
|
||||||
|
|
||||||
void skcipher_geniv_free(struct crypto_instance *inst)
|
|
||||||
{
|
|
||||||
crypto_drop_skcipher(crypto_instance_ctx(inst));
|
|
||||||
kfree(inst);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(skcipher_geniv_free);
|
|
||||||
|
|
||||||
int skcipher_geniv_init(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
|
||||||
struct crypto_ablkcipher *cipher;
|
|
||||||
|
|
||||||
cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
|
|
||||||
if (IS_ERR(cipher))
|
|
||||||
return PTR_ERR(cipher);
|
|
||||||
|
|
||||||
tfm->crt_ablkcipher.base = cipher;
|
|
||||||
tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(skcipher_geniv_init);
|
|
||||||
|
|
||||||
void skcipher_geniv_exit(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("Generic block chaining cipher type");
|
MODULE_DESCRIPTION("Generic block chaining cipher type");
|
||||||
|
|
72
crypto/ccm.c
72
crypto/ccm.c
|
@ -28,7 +28,7 @@ struct ccm_instance_ctx {
|
||||||
|
|
||||||
struct crypto_ccm_ctx {
|
struct crypto_ccm_ctx {
|
||||||
struct crypto_cipher *cipher;
|
struct crypto_cipher *cipher;
|
||||||
struct crypto_ablkcipher *ctr;
|
struct crypto_skcipher *ctr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct crypto_rfc4309_ctx {
|
struct crypto_rfc4309_ctx {
|
||||||
|
@ -50,7 +50,7 @@ struct crypto_ccm_req_priv_ctx {
|
||||||
u32 flags;
|
u32 flags;
|
||||||
struct scatterlist src[3];
|
struct scatterlist src[3];
|
||||||
struct scatterlist dst[3];
|
struct scatterlist dst[3];
|
||||||
struct ablkcipher_request abreq;
|
struct skcipher_request skreq;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
|
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
|
||||||
|
@ -83,15 +83,15 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct crypto_ablkcipher *ctr = ctx->ctr;
|
struct crypto_skcipher *ctr = ctx->ctr;
|
||||||
struct crypto_cipher *tfm = ctx->cipher;
|
struct crypto_cipher *tfm = ctx->cipher;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
|
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ablkcipher_setkey(ctr, key, keylen);
|
err = crypto_skcipher_setkey(ctr, key, keylen);
|
||||||
crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
|
crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -347,7 +347,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
|
||||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||||
struct ablkcipher_request *abreq = &pctx->abreq;
|
struct skcipher_request *skreq = &pctx->skreq;
|
||||||
struct scatterlist *dst;
|
struct scatterlist *dst;
|
||||||
unsigned int cryptlen = req->cryptlen;
|
unsigned int cryptlen = req->cryptlen;
|
||||||
u8 *odata = pctx->odata;
|
u8 *odata = pctx->odata;
|
||||||
|
@ -366,11 +366,11 @@ static int crypto_ccm_encrypt(struct aead_request *req)
|
||||||
if (req->src != req->dst)
|
if (req->src != req->dst)
|
||||||
dst = pctx->dst;
|
dst = pctx->dst;
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(abreq, ctx->ctr);
|
skcipher_request_set_tfm(skreq, ctx->ctr);
|
||||||
ablkcipher_request_set_callback(abreq, pctx->flags,
|
skcipher_request_set_callback(skreq, pctx->flags,
|
||||||
crypto_ccm_encrypt_done, req);
|
crypto_ccm_encrypt_done, req);
|
||||||
ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
|
skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
|
||||||
err = crypto_ablkcipher_encrypt(abreq);
|
err = crypto_skcipher_encrypt(skreq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -407,7 +407,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
|
||||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||||
struct ablkcipher_request *abreq = &pctx->abreq;
|
struct skcipher_request *skreq = &pctx->skreq;
|
||||||
struct scatterlist *dst;
|
struct scatterlist *dst;
|
||||||
unsigned int authsize = crypto_aead_authsize(aead);
|
unsigned int authsize = crypto_aead_authsize(aead);
|
||||||
unsigned int cryptlen = req->cryptlen;
|
unsigned int cryptlen = req->cryptlen;
|
||||||
|
@ -429,11 +429,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
|
||||||
if (req->src != req->dst)
|
if (req->src != req->dst)
|
||||||
dst = pctx->dst;
|
dst = pctx->dst;
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(abreq, ctx->ctr);
|
skcipher_request_set_tfm(skreq, ctx->ctr);
|
||||||
ablkcipher_request_set_callback(abreq, pctx->flags,
|
skcipher_request_set_callback(skreq, pctx->flags,
|
||||||
crypto_ccm_decrypt_done, req);
|
crypto_ccm_decrypt_done, req);
|
||||||
ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
|
skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
|
||||||
err = crypto_ablkcipher_decrypt(abreq);
|
err = crypto_skcipher_decrypt(skreq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -454,7 +454,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
|
||||||
struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct crypto_cipher *cipher;
|
struct crypto_cipher *cipher;
|
||||||
struct crypto_ablkcipher *ctr;
|
struct crypto_skcipher *ctr;
|
||||||
unsigned long align;
|
unsigned long align;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
|
||||||
if (IS_ERR(cipher))
|
if (IS_ERR(cipher))
|
||||||
return PTR_ERR(cipher);
|
return PTR_ERR(cipher);
|
||||||
|
|
||||||
ctr = crypto_spawn_skcipher(&ictx->ctr);
|
ctr = crypto_spawn_skcipher2(&ictx->ctr);
|
||||||
err = PTR_ERR(ctr);
|
err = PTR_ERR(ctr);
|
||||||
if (IS_ERR(ctr))
|
if (IS_ERR(ctr))
|
||||||
goto err_free_cipher;
|
goto err_free_cipher;
|
||||||
|
@ -475,7 +475,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
|
||||||
crypto_aead_set_reqsize(
|
crypto_aead_set_reqsize(
|
||||||
tfm,
|
tfm,
|
||||||
align + sizeof(struct crypto_ccm_req_priv_ctx) +
|
align + sizeof(struct crypto_ccm_req_priv_ctx) +
|
||||||
crypto_ablkcipher_reqsize(ctr));
|
crypto_skcipher_reqsize(ctr));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -489,7 +489,7 @@ static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
|
||||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_cipher(ctx->cipher);
|
crypto_free_cipher(ctx->cipher);
|
||||||
crypto_free_ablkcipher(ctx->ctr);
|
crypto_free_skcipher(ctx->ctr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_ccm_free(struct aead_instance *inst)
|
static void crypto_ccm_free(struct aead_instance *inst)
|
||||||
|
@ -509,7 +509,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||||
{
|
{
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
struct aead_instance *inst;
|
struct aead_instance *inst;
|
||||||
struct crypto_alg *ctr;
|
struct skcipher_alg *ctr;
|
||||||
struct crypto_alg *cipher;
|
struct crypto_alg *cipher;
|
||||||
struct ccm_instance_ctx *ictx;
|
struct ccm_instance_ctx *ictx;
|
||||||
int err;
|
int err;
|
||||||
|
@ -544,39 +544,40 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
|
||||||
goto err_free_inst;
|
goto err_free_inst;
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
|
crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
|
||||||
err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
|
err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
|
||||||
crypto_requires_sync(algt->type,
|
crypto_requires_sync(algt->type,
|
||||||
algt->mask));
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_drop_cipher;
|
goto err_drop_cipher;
|
||||||
|
|
||||||
ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
|
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
|
||||||
|
|
||||||
/* Not a stream cipher? */
|
/* Not a stream cipher? */
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (ctr->cra_blocksize != 1)
|
if (ctr->base.cra_blocksize != 1)
|
||||||
goto err_drop_ctr;
|
goto err_drop_ctr;
|
||||||
|
|
||||||
/* We want the real thing! */
|
/* We want the real thing! */
|
||||||
if (ctr->cra_ablkcipher.ivsize != 16)
|
if (crypto_skcipher_alg_ivsize(ctr) != 16)
|
||||||
goto err_drop_ctr;
|
goto err_drop_ctr;
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"ccm_base(%s,%s)", ctr->cra_driver_name,
|
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
|
||||||
cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_ctr;
|
goto err_drop_ctr;
|
||||||
|
|
||||||
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||||
|
|
||||||
inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC;
|
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||||
inst->alg.base.cra_priority = (cipher->cra_priority +
|
inst->alg.base.cra_priority = (cipher->cra_priority +
|
||||||
ctr->cra_priority) / 2;
|
ctr->base.cra_priority) / 2;
|
||||||
inst->alg.base.cra_blocksize = 1;
|
inst->alg.base.cra_blocksize = 1;
|
||||||
inst->alg.base.cra_alignmask = cipher->cra_alignmask |
|
inst->alg.base.cra_alignmask = cipher->cra_alignmask |
|
||||||
ctr->cra_alignmask |
|
ctr->base.cra_alignmask |
|
||||||
(__alignof__(u32) - 1);
|
(__alignof__(u32) - 1);
|
||||||
inst->alg.ivsize = 16;
|
inst->alg.ivsize = 16;
|
||||||
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
|
||||||
inst->alg.maxauthsize = 16;
|
inst->alg.maxauthsize = 16;
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
|
||||||
inst->alg.init = crypto_ccm_init_tfm;
|
inst->alg.init = crypto_ccm_init_tfm;
|
||||||
|
@ -863,6 +864,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
|
||||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||||
|
|
||||||
inst->alg.ivsize = 8;
|
inst->alg.ivsize = 8;
|
||||||
|
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
|
||||||
inst->alg.maxauthsize = 16;
|
inst->alg.maxauthsize = 16;
|
||||||
|
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
|
||||||
|
|
|
@ -31,7 +31,7 @@ struct chachapoly_instance_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct chachapoly_ctx {
|
struct chachapoly_ctx {
|
||||||
struct crypto_ablkcipher *chacha;
|
struct crypto_skcipher *chacha;
|
||||||
struct crypto_ahash *poly;
|
struct crypto_ahash *poly;
|
||||||
/* key bytes we use for the ChaCha20 IV */
|
/* key bytes we use for the ChaCha20 IV */
|
||||||
unsigned int saltlen;
|
unsigned int saltlen;
|
||||||
|
@ -53,7 +53,7 @@ struct poly_req {
|
||||||
struct chacha_req {
|
struct chacha_req {
|
||||||
u8 iv[CHACHA20_IV_SIZE];
|
u8 iv[CHACHA20_IV_SIZE];
|
||||||
struct scatterlist src[1];
|
struct scatterlist src[1];
|
||||||
struct ablkcipher_request req; /* must be last member */
|
struct skcipher_request req; /* must be last member */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct chachapoly_req_ctx {
|
struct chachapoly_req_ctx {
|
||||||
|
@ -144,12 +144,12 @@ static int chacha_decrypt(struct aead_request *req)
|
||||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
||||||
}
|
}
|
||||||
|
|
||||||
ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
skcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
||||||
chacha_decrypt_done, req);
|
chacha_decrypt_done, req);
|
||||||
ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
|
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||||
ablkcipher_request_set_crypt(&creq->req, src, dst,
|
skcipher_request_set_crypt(&creq->req, src, dst,
|
||||||
rctx->cryptlen, creq->iv);
|
rctx->cryptlen, creq->iv);
|
||||||
err = crypto_ablkcipher_decrypt(&creq->req);
|
err = crypto_skcipher_decrypt(&creq->req);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -393,13 +393,13 @@ static int poly_genkey(struct aead_request *req)
|
||||||
|
|
||||||
chacha_iv(creq->iv, req, 0);
|
chacha_iv(creq->iv, req, 0);
|
||||||
|
|
||||||
ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
skcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
||||||
poly_genkey_done, req);
|
poly_genkey_done, req);
|
||||||
ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
|
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||||
ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src,
|
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
|
||||||
POLY1305_KEY_SIZE, creq->iv);
|
POLY1305_KEY_SIZE, creq->iv);
|
||||||
|
|
||||||
err = crypto_ablkcipher_decrypt(&creq->req);
|
err = crypto_skcipher_decrypt(&creq->req);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -433,12 +433,12 @@ static int chacha_encrypt(struct aead_request *req)
|
||||||
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
||||||
}
|
}
|
||||||
|
|
||||||
ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
skcipher_request_set_callback(&creq->req, aead_request_flags(req),
|
||||||
chacha_encrypt_done, req);
|
chacha_encrypt_done, req);
|
||||||
ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
|
skcipher_request_set_tfm(&creq->req, ctx->chacha);
|
||||||
ablkcipher_request_set_crypt(&creq->req, src, dst,
|
skcipher_request_set_crypt(&creq->req, src, dst,
|
||||||
req->cryptlen, creq->iv);
|
req->cryptlen, creq->iv);
|
||||||
err = crypto_ablkcipher_encrypt(&creq->req);
|
err = crypto_skcipher_encrypt(&creq->req);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -500,13 +500,13 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
|
||||||
keylen -= ctx->saltlen;
|
keylen -= ctx->saltlen;
|
||||||
memcpy(ctx->salt, key + keylen, ctx->saltlen);
|
memcpy(ctx->salt, key + keylen, ctx->saltlen);
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
|
crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
|
|
||||||
err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen);
|
err = crypto_skcipher_setkey(ctx->chacha, key, keylen);
|
||||||
crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) &
|
crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,7 +524,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
|
||||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||||
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct crypto_ablkcipher *chacha;
|
struct crypto_skcipher *chacha;
|
||||||
struct crypto_ahash *poly;
|
struct crypto_ahash *poly;
|
||||||
unsigned long align;
|
unsigned long align;
|
||||||
|
|
||||||
|
@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
|
||||||
if (IS_ERR(poly))
|
if (IS_ERR(poly))
|
||||||
return PTR_ERR(poly);
|
return PTR_ERR(poly);
|
||||||
|
|
||||||
chacha = crypto_spawn_skcipher(&ictx->chacha);
|
chacha = crypto_spawn_skcipher2(&ictx->chacha);
|
||||||
if (IS_ERR(chacha)) {
|
if (IS_ERR(chacha)) {
|
||||||
crypto_free_ahash(poly);
|
crypto_free_ahash(poly);
|
||||||
return PTR_ERR(chacha);
|
return PTR_ERR(chacha);
|
||||||
|
@ -548,8 +548,8 @@ static int chachapoly_init(struct crypto_aead *tfm)
|
||||||
tfm,
|
tfm,
|
||||||
align + offsetof(struct chachapoly_req_ctx, u) +
|
align + offsetof(struct chachapoly_req_ctx, u) +
|
||||||
max(offsetof(struct chacha_req, req) +
|
max(offsetof(struct chacha_req, req) +
|
||||||
sizeof(struct ablkcipher_request) +
|
sizeof(struct skcipher_request) +
|
||||||
crypto_ablkcipher_reqsize(chacha),
|
crypto_skcipher_reqsize(chacha),
|
||||||
offsetof(struct poly_req, req) +
|
offsetof(struct poly_req, req) +
|
||||||
sizeof(struct ahash_request) +
|
sizeof(struct ahash_request) +
|
||||||
crypto_ahash_reqsize(poly)));
|
crypto_ahash_reqsize(poly)));
|
||||||
|
@ -562,7 +562,7 @@ static void chachapoly_exit(struct crypto_aead *tfm)
|
||||||
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_ahash(ctx->poly);
|
crypto_free_ahash(ctx->poly);
|
||||||
crypto_free_ablkcipher(ctx->chacha);
|
crypto_free_skcipher(ctx->chacha);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void chachapoly_free(struct aead_instance *inst)
|
static void chachapoly_free(struct aead_instance *inst)
|
||||||
|
@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
{
|
{
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
struct aead_instance *inst;
|
struct aead_instance *inst;
|
||||||
struct crypto_alg *chacha;
|
struct skcipher_alg *chacha;
|
||||||
struct crypto_alg *poly;
|
struct crypto_alg *poly;
|
||||||
struct hash_alg_common *poly_hash;
|
struct hash_alg_common *poly_hash;
|
||||||
struct chachapoly_instance_ctx *ctx;
|
struct chachapoly_instance_ctx *ctx;
|
||||||
|
@ -605,7 +605,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
|
|
||||||
poly = crypto_find_alg(poly_name, &crypto_ahash_type,
|
poly = crypto_find_alg(poly_name, &crypto_ahash_type,
|
||||||
CRYPTO_ALG_TYPE_HASH,
|
CRYPTO_ALG_TYPE_HASH,
|
||||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
CRYPTO_ALG_TYPE_AHASH_MASK |
|
||||||
|
crypto_requires_sync(algt->type,
|
||||||
|
algt->mask));
|
||||||
if (IS_ERR(poly))
|
if (IS_ERR(poly))
|
||||||
return PTR_ERR(poly);
|
return PTR_ERR(poly);
|
||||||
|
|
||||||
|
@ -623,20 +625,20 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
goto err_free_inst;
|
goto err_free_inst;
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
|
crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
|
||||||
err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
|
err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
|
||||||
crypto_requires_sync(algt->type,
|
crypto_requires_sync(algt->type,
|
||||||
algt->mask));
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_drop_poly;
|
goto err_drop_poly;
|
||||||
|
|
||||||
chacha = crypto_skcipher_spawn_alg(&ctx->chacha);
|
chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
|
||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
/* Need 16-byte IV size, including Initial Block Counter value */
|
/* Need 16-byte IV size, including Initial Block Counter value */
|
||||||
if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE)
|
if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE)
|
||||||
goto out_drop_chacha;
|
goto out_drop_chacha;
|
||||||
/* Not a stream cipher? */
|
/* Not a stream cipher? */
|
||||||
if (chacha->cra_blocksize != 1)
|
if (chacha->base.cra_blocksize != 1)
|
||||||
goto out_drop_chacha;
|
goto out_drop_chacha;
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
|
@ -645,20 +647,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
poly_name) >= CRYPTO_MAX_ALG_NAME)
|
poly_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto out_drop_chacha;
|
goto out_drop_chacha;
|
||||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"%s(%s,%s)", name, chacha->cra_driver_name,
|
"%s(%s,%s)", name, chacha->base.cra_driver_name,
|
||||||
poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto out_drop_chacha;
|
goto out_drop_chacha;
|
||||||
|
|
||||||
inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) &
|
inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) &
|
||||||
CRYPTO_ALG_ASYNC;
|
CRYPTO_ALG_ASYNC;
|
||||||
inst->alg.base.cra_priority = (chacha->cra_priority +
|
inst->alg.base.cra_priority = (chacha->base.cra_priority +
|
||||||
poly->cra_priority) / 2;
|
poly->cra_priority) / 2;
|
||||||
inst->alg.base.cra_blocksize = 1;
|
inst->alg.base.cra_blocksize = 1;
|
||||||
inst->alg.base.cra_alignmask = chacha->cra_alignmask |
|
inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
|
||||||
poly->cra_alignmask;
|
poly->cra_alignmask;
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
|
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
|
||||||
ctx->saltlen;
|
ctx->saltlen;
|
||||||
inst->alg.ivsize = ivsize;
|
inst->alg.ivsize = ivsize;
|
||||||
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
|
||||||
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
|
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
|
||||||
inst->alg.init = chachapoly_init;
|
inst->alg.init = chachapoly_init;
|
||||||
inst->alg.exit = chachapoly_exit;
|
inst->alg.exit = chachapoly_exit;
|
||||||
|
|
317
crypto/chainiv.c
317
crypto/chainiv.c
|
@ -1,317 +0,0 @@
|
||||||
/*
|
|
||||||
* chainiv: Chain IV Generator
|
|
||||||
*
|
|
||||||
* Generate IVs simply be using the last block of the previous encryption.
|
|
||||||
* This is mainly useful for CBC with a synchronous algorithm.
|
|
||||||
*
|
|
||||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License as published by the Free
|
|
||||||
* Software Foundation; either version 2 of the License, or (at your option)
|
|
||||||
* any later version.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <crypto/internal/skcipher.h>
|
|
||||||
#include <crypto/rng.h>
|
|
||||||
#include <crypto/crypto_wq.h>
|
|
||||||
#include <linux/err.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
#include <linux/workqueue.h>
|
|
||||||
|
|
||||||
enum {
|
|
||||||
CHAINIV_STATE_INUSE = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct chainiv_ctx {
|
|
||||||
spinlock_t lock;
|
|
||||||
char iv[];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct async_chainiv_ctx {
|
|
||||||
unsigned long state;
|
|
||||||
|
|
||||||
spinlock_t lock;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
struct crypto_queue queue;
|
|
||||||
struct work_struct postponed;
|
|
||||||
|
|
||||||
char iv[];
|
|
||||||
};
|
|
||||||
|
|
||||||
static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
|
||||||
unsigned int ivsize;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
|
||||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags &
|
|
||||||
~CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
||||||
req->creq.base.complete,
|
|
||||||
req->creq.base.data);
|
|
||||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
|
||||||
req->creq.nbytes, req->creq.info);
|
|
||||||
|
|
||||||
spin_lock_bh(&ctx->lock);
|
|
||||||
|
|
||||||
ivsize = crypto_ablkcipher_ivsize(geniv);
|
|
||||||
|
|
||||||
memcpy(req->giv, ctx->iv, ivsize);
|
|
||||||
memcpy(subreq->info, ctx->iv, ivsize);
|
|
||||||
|
|
||||||
err = crypto_ablkcipher_encrypt(subreq);
|
|
||||||
if (err)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
memcpy(ctx->iv, subreq->info, ivsize);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
spin_unlock_bh(&ctx->lock);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
|
|
||||||
|
|
||||||
if (iv) {
|
|
||||||
err = crypto_rng_get_bytes(crypto_default_rng, iv,
|
|
||||||
crypto_ablkcipher_ivsize(geniv));
|
|
||||||
crypto_put_default_rng();
|
|
||||||
}
|
|
||||||
|
|
||||||
return err ?: skcipher_geniv_init(tfm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int chainiv_init(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
|
||||||
struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
||||||
char *iv;
|
|
||||||
|
|
||||||
spin_lock_init(&ctx->lock);
|
|
||||||
|
|
||||||
iv = NULL;
|
|
||||||
if (!crypto_get_default_rng()) {
|
|
||||||
crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
|
|
||||||
iv = ctx->iv;
|
|
||||||
}
|
|
||||||
|
|
||||||
return chainiv_init_common(tfm, iv);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
|
|
||||||
{
|
|
||||||
int queued;
|
|
||||||
int err = ctx->err;
|
|
||||||
|
|
||||||
if (!ctx->queue.qlen) {
|
|
||||||
smp_mb__before_atomic();
|
|
||||||
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
|
|
||||||
|
|
||||||
if (!ctx->queue.qlen ||
|
|
||||||
test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
queued = queue_work(kcrypto_wq, &ctx->postponed);
|
|
||||||
BUG_ON(!queued);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
int err;
|
|
||||||
|
|
||||||
spin_lock_bh(&ctx->lock);
|
|
||||||
err = skcipher_enqueue_givcrypt(&ctx->queue, req);
|
|
||||||
spin_unlock_bh(&ctx->lock);
|
|
||||||
|
|
||||||
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
|
||||||
return err;
|
|
||||||
|
|
||||||
ctx->err = err;
|
|
||||||
return async_chainiv_schedule_work(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
|
||||||
unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
|
|
||||||
|
|
||||||
memcpy(req->giv, ctx->iv, ivsize);
|
|
||||||
memcpy(subreq->info, ctx->iv, ivsize);
|
|
||||||
|
|
||||||
ctx->err = crypto_ablkcipher_encrypt(subreq);
|
|
||||||
if (ctx->err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
memcpy(ctx->iv, subreq->info, ivsize);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return async_chainiv_schedule_work(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
|
||||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags,
|
|
||||||
req->creq.base.complete,
|
|
||||||
req->creq.base.data);
|
|
||||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
|
||||||
req->creq.nbytes, req->creq.info);
|
|
||||||
|
|
||||||
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
|
||||||
goto postpone;
|
|
||||||
|
|
||||||
if (ctx->queue.qlen) {
|
|
||||||
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
|
|
||||||
goto postpone;
|
|
||||||
}
|
|
||||||
|
|
||||||
return async_chainiv_givencrypt_tail(req);
|
|
||||||
|
|
||||||
postpone:
|
|
||||||
return async_chainiv_postpone_request(req);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void async_chainiv_do_postponed(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct async_chainiv_ctx *ctx = container_of(work,
|
|
||||||
struct async_chainiv_ctx,
|
|
||||||
postponed);
|
|
||||||
struct skcipher_givcrypt_request *req;
|
|
||||||
struct ablkcipher_request *subreq;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
/* Only handle one request at a time to avoid hogging keventd. */
|
|
||||||
spin_lock_bh(&ctx->lock);
|
|
||||||
req = skcipher_dequeue_givcrypt(&ctx->queue);
|
|
||||||
spin_unlock_bh(&ctx->lock);
|
|
||||||
|
|
||||||
if (!req) {
|
|
||||||
async_chainiv_schedule_work(ctx);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
subreq = skcipher_givcrypt_reqctx(req);
|
|
||||||
subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
||||||
|
|
||||||
err = async_chainiv_givencrypt_tail(req);
|
|
||||||
|
|
||||||
local_bh_disable();
|
|
||||||
skcipher_givcrypt_complete(req, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int async_chainiv_init(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
|
||||||
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
||||||
char *iv;
|
|
||||||
|
|
||||||
spin_lock_init(&ctx->lock);
|
|
||||||
|
|
||||||
crypto_init_queue(&ctx->queue, 100);
|
|
||||||
INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
|
|
||||||
|
|
||||||
iv = NULL;
|
|
||||||
if (!crypto_get_default_rng()) {
|
|
||||||
crypto_ablkcipher_crt(geniv)->givencrypt =
|
|
||||||
async_chainiv_givencrypt;
|
|
||||||
iv = ctx->iv;
|
|
||||||
}
|
|
||||||
|
|
||||||
return chainiv_init_common(tfm, iv);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void async_chainiv_exit(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
||||||
|
|
||||||
BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
|
|
||||||
|
|
||||||
skcipher_geniv_exit(tfm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct crypto_template chainiv_tmpl;
|
|
||||||
|
|
||||||
static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
|
|
||||||
{
|
|
||||||
struct crypto_attr_type *algt;
|
|
||||||
struct crypto_instance *inst;
|
|
||||||
|
|
||||||
algt = crypto_get_attr_type(tb);
|
|
||||||
if (IS_ERR(algt))
|
|
||||||
return ERR_CAST(algt);
|
|
||||||
|
|
||||||
inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
|
|
||||||
if (IS_ERR(inst))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
inst->alg.cra_init = chainiv_init;
|
|
||||||
inst->alg.cra_exit = skcipher_geniv_exit;
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
|
|
||||||
|
|
||||||
if (!crypto_requires_sync(algt->type, algt->mask)) {
|
|
||||||
inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
|
|
||||||
|
|
||||||
inst->alg.cra_init = async_chainiv_init;
|
|
||||||
inst->alg.cra_exit = async_chainiv_exit;
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
|
|
||||||
|
|
||||||
out:
|
|
||||||
return inst;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct crypto_template chainiv_tmpl = {
|
|
||||||
.name = "chainiv",
|
|
||||||
.alloc = chainiv_alloc,
|
|
||||||
.free = skcipher_geniv_free,
|
|
||||||
.module = THIS_MODULE,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init chainiv_module_init(void)
|
|
||||||
{
|
|
||||||
return crypto_register_template(&chainiv_tmpl);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void chainiv_module_exit(void)
|
|
||||||
{
|
|
||||||
crypto_unregister_template(&chainiv_tmpl);
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(chainiv_module_init);
|
|
||||||
module_exit(chainiv_module_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("Chain IV Generator");
|
|
||||||
MODULE_ALIAS_CRYPTO("chainiv");
|
|
132
crypto/cryptd.c
132
crypto/cryptd.c
|
@ -22,6 +22,7 @@
|
||||||
#include <crypto/internal/aead.h>
|
#include <crypto/internal/aead.h>
|
||||||
#include <crypto/cryptd.h>
|
#include <crypto/cryptd.h>
|
||||||
#include <crypto/crypto_wq.h>
|
#include <crypto/crypto_wq.h>
|
||||||
|
#include <linux/atomic.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -31,7 +32,7 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#define CRYPTD_MAX_CPU_QLEN 100
|
#define CRYPTD_MAX_CPU_QLEN 1000
|
||||||
|
|
||||||
struct cryptd_cpu_queue {
|
struct cryptd_cpu_queue {
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
|
@ -58,6 +59,7 @@ struct aead_instance_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cryptd_blkcipher_ctx {
|
struct cryptd_blkcipher_ctx {
|
||||||
|
atomic_t refcnt;
|
||||||
struct crypto_blkcipher *child;
|
struct crypto_blkcipher *child;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cryptd_hash_ctx {
|
struct cryptd_hash_ctx {
|
||||||
|
atomic_t refcnt;
|
||||||
struct crypto_shash *child;
|
struct crypto_shash *child;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cryptd_aead_ctx {
|
struct cryptd_aead_ctx {
|
||||||
|
atomic_t refcnt;
|
||||||
struct crypto_aead *child;
|
struct crypto_aead *child;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
||||||
{
|
{
|
||||||
int cpu, err;
|
int cpu, err;
|
||||||
struct cryptd_cpu_queue *cpu_queue;
|
struct cryptd_cpu_queue *cpu_queue;
|
||||||
|
struct crypto_tfm *tfm;
|
||||||
|
atomic_t *refcnt;
|
||||||
|
bool may_backlog;
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
||||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||||
|
|
||||||
|
refcnt = crypto_tfm_ctx(request->tfm);
|
||||||
|
may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||||
|
|
||||||
|
if (err == -EBUSY && !may_backlog)
|
||||||
|
goto out_put_cpu;
|
||||||
|
|
||||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
||||||
|
|
||||||
|
if (!atomic_read(refcnt))
|
||||||
|
goto out_put_cpu;
|
||||||
|
|
||||||
|
tfm = request->tfm;
|
||||||
|
atomic_inc(refcnt);
|
||||||
|
|
||||||
|
out_put_cpu:
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
|
||||||
unsigned int len))
|
unsigned int len))
|
||||||
{
|
{
|
||||||
struct cryptd_blkcipher_request_ctx *rctx;
|
struct cryptd_blkcipher_request_ctx *rctx;
|
||||||
|
struct cryptd_blkcipher_ctx *ctx;
|
||||||
|
struct crypto_ablkcipher *tfm;
|
||||||
struct blkcipher_desc desc;
|
struct blkcipher_desc desc;
|
||||||
|
int refcnt;
|
||||||
|
|
||||||
rctx = ablkcipher_request_ctx(req);
|
rctx = ablkcipher_request_ctx(req);
|
||||||
|
|
||||||
|
@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
tfm = crypto_ablkcipher_reqtfm(req);
|
||||||
|
ctx = crypto_ablkcipher_ctx(tfm);
|
||||||
|
refcnt = atomic_read(&ctx->refcnt);
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rctx->complete(&req->base, err);
|
rctx->complete(&req->base, err);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
|
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
|
||||||
|
crypto_free_ablkcipher(tfm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
|
static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
|
||||||
|
@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
|
||||||
return cryptd_enqueue_request(queue, &req->base);
|
return cryptd_enqueue_request(queue, &req->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void cryptd_hash_complete(struct ahash_request *req, int err)
|
||||||
|
{
|
||||||
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
|
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||||
|
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||||
|
int refcnt = atomic_read(&ctx->refcnt);
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
rctx->complete(&req->base, err);
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
|
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
|
||||||
|
crypto_free_ahash(tfm);
|
||||||
|
}
|
||||||
|
|
||||||
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
|
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
|
||||||
{
|
{
|
||||||
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
||||||
|
@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
cryptd_hash_complete(req, err);
|
||||||
rctx->complete(&req->base, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cryptd_hash_init_enqueue(struct ahash_request *req)
|
static int cryptd_hash_init_enqueue(struct ahash_request *req)
|
||||||
|
@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
cryptd_hash_complete(req, err);
|
||||||
rctx->complete(&req->base, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cryptd_hash_update_enqueue(struct ahash_request *req)
|
static int cryptd_hash_update_enqueue(struct ahash_request *req)
|
||||||
|
@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
cryptd_hash_complete(req, err);
|
||||||
rctx->complete(&req->base, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cryptd_hash_final_enqueue(struct ahash_request *req)
|
static int cryptd_hash_final_enqueue(struct ahash_request *req)
|
||||||
|
@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
cryptd_hash_complete(req, err);
|
||||||
rctx->complete(&req->base, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
|
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
|
||||||
|
@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
cryptd_hash_complete(req, err);
|
||||||
rctx->complete(&req->base, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
|
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
|
||||||
|
@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req,
|
||||||
int (*crypt)(struct aead_request *req))
|
int (*crypt)(struct aead_request *req))
|
||||||
{
|
{
|
||||||
struct cryptd_aead_request_ctx *rctx;
|
struct cryptd_aead_request_ctx *rctx;
|
||||||
|
struct cryptd_aead_ctx *ctx;
|
||||||
crypto_completion_t compl;
|
crypto_completion_t compl;
|
||||||
|
struct crypto_aead *tfm;
|
||||||
|
int refcnt;
|
||||||
|
|
||||||
rctx = aead_request_ctx(req);
|
rctx = aead_request_ctx(req);
|
||||||
compl = rctx->complete;
|
compl = rctx->complete;
|
||||||
|
@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
|
||||||
goto out;
|
goto out;
|
||||||
aead_request_set_tfm(req, child);
|
aead_request_set_tfm(req, child);
|
||||||
err = crypt( req );
|
err = crypt( req );
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
tfm = crypto_aead_reqtfm(req);
|
||||||
|
ctx = crypto_aead_ctx(tfm);
|
||||||
|
refcnt = atomic_read(&ctx->refcnt);
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
compl(&req->base, err);
|
compl(&req->base, err);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
|
if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
|
||||||
|
crypto_free_aead(tfm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
|
static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
|
||||||
|
@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
|
||||||
u32 type, u32 mask)
|
u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
||||||
|
struct cryptd_blkcipher_ctx *ctx;
|
||||||
struct crypto_tfm *tfm;
|
struct crypto_tfm *tfm;
|
||||||
|
|
||||||
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
||||||
|
@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = crypto_tfm_ctx(tfm);
|
||||||
|
atomic_set(&ctx->refcnt, 1);
|
||||||
|
|
||||||
return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
|
return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
|
EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
|
||||||
|
@ -910,9 +962,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
|
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
|
||||||
|
|
||||||
|
bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
|
||||||
|
{
|
||||||
|
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
|
||||||
|
|
||||||
|
return atomic_read(&ctx->refcnt) - 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
|
||||||
|
|
||||||
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
|
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
|
||||||
{
|
{
|
||||||
crypto_free_ablkcipher(&tfm->base);
|
struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
|
||||||
|
|
||||||
|
if (atomic_dec_and_test(&ctx->refcnt))
|
||||||
|
crypto_free_ablkcipher(&tfm->base);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
|
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
|
||||||
|
|
||||||
|
@ -920,6 +983,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
|
||||||
u32 type, u32 mask)
|
u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
||||||
|
struct cryptd_hash_ctx *ctx;
|
||||||
struct crypto_ahash *tfm;
|
struct crypto_ahash *tfm;
|
||||||
|
|
||||||
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
||||||
|
@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = crypto_ahash_ctx(tfm);
|
||||||
|
atomic_set(&ctx->refcnt, 1);
|
||||||
|
|
||||||
return __cryptd_ahash_cast(tfm);
|
return __cryptd_ahash_cast(tfm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
|
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
|
||||||
|
@ -952,9 +1019,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_shash_desc);
|
EXPORT_SYMBOL_GPL(cryptd_shash_desc);
|
||||||
|
|
||||||
|
bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
|
||||||
|
{
|
||||||
|
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
|
||||||
|
|
||||||
|
return atomic_read(&ctx->refcnt) - 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
|
||||||
|
|
||||||
void cryptd_free_ahash(struct cryptd_ahash *tfm)
|
void cryptd_free_ahash(struct cryptd_ahash *tfm)
|
||||||
{
|
{
|
||||||
crypto_free_ahash(&tfm->base);
|
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
|
||||||
|
|
||||||
|
if (atomic_dec_and_test(&ctx->refcnt))
|
||||||
|
crypto_free_ahash(&tfm->base);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
|
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
|
||||||
|
|
||||||
|
@ -962,6 +1040,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
|
||||||
u32 type, u32 mask)
|
u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
||||||
|
struct cryptd_aead_ctx *ctx;
|
||||||
struct crypto_aead *tfm;
|
struct crypto_aead *tfm;
|
||||||
|
|
||||||
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
||||||
|
@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
|
||||||
crypto_free_aead(tfm);
|
crypto_free_aead(tfm);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = crypto_aead_ctx(tfm);
|
||||||
|
atomic_set(&ctx->refcnt, 1);
|
||||||
|
|
||||||
return __cryptd_aead_cast(tfm);
|
return __cryptd_aead_cast(tfm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
|
EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
|
||||||
|
@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_aead_child);
|
EXPORT_SYMBOL_GPL(cryptd_aead_child);
|
||||||
|
|
||||||
|
bool cryptd_aead_queued(struct cryptd_aead *tfm)
|
||||||
|
{
|
||||||
|
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
|
||||||
|
|
||||||
|
return atomic_read(&ctx->refcnt) - 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cryptd_aead_queued);
|
||||||
|
|
||||||
void cryptd_free_aead(struct cryptd_aead *tfm)
|
void cryptd_free_aead(struct cryptd_aead *tfm)
|
||||||
{
|
{
|
||||||
crypto_free_aead(&tfm->base);
|
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
|
||||||
|
|
||||||
|
if (atomic_dec_and_test(&ctx->refcnt))
|
||||||
|
crypto_free_aead(&tfm->base);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cryptd_free_aead);
|
EXPORT_SYMBOL_GPL(cryptd_free_aead);
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
|
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
|
||||||
static struct crypto_blkcipher *crypto_default_null_skcipher;
|
static struct crypto_skcipher *crypto_default_null_skcipher;
|
||||||
static int crypto_default_null_skcipher_refcnt;
|
static int crypto_default_null_skcipher_refcnt;
|
||||||
|
|
||||||
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
|
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
|
||||||
|
@ -153,15 +153,16 @@ MODULE_ALIAS_CRYPTO("compress_null");
|
||||||
MODULE_ALIAS_CRYPTO("digest_null");
|
MODULE_ALIAS_CRYPTO("digest_null");
|
||||||
MODULE_ALIAS_CRYPTO("cipher_null");
|
MODULE_ALIAS_CRYPTO("cipher_null");
|
||||||
|
|
||||||
struct crypto_blkcipher *crypto_get_default_null_skcipher(void)
|
struct crypto_skcipher *crypto_get_default_null_skcipher(void)
|
||||||
{
|
{
|
||||||
struct crypto_blkcipher *tfm;
|
struct crypto_skcipher *tfm;
|
||||||
|
|
||||||
mutex_lock(&crypto_default_null_skcipher_lock);
|
mutex_lock(&crypto_default_null_skcipher_lock);
|
||||||
tfm = crypto_default_null_skcipher;
|
tfm = crypto_default_null_skcipher;
|
||||||
|
|
||||||
if (!tfm) {
|
if (!tfm) {
|
||||||
tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0);
|
tfm = crypto_alloc_skcipher("ecb(cipher_null)",
|
||||||
|
0, CRYPTO_ALG_ASYNC);
|
||||||
if (IS_ERR(tfm))
|
if (IS_ERR(tfm))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
@ -181,7 +182,7 @@ void crypto_put_default_null_skcipher(void)
|
||||||
{
|
{
|
||||||
mutex_lock(&crypto_default_null_skcipher_lock);
|
mutex_lock(&crypto_default_null_skcipher_lock);
|
||||||
if (!--crypto_default_null_skcipher_refcnt) {
|
if (!--crypto_default_null_skcipher_refcnt) {
|
||||||
crypto_free_blkcipher(crypto_default_null_skcipher);
|
crypto_free_skcipher(crypto_default_null_skcipher);
|
||||||
crypto_default_null_skcipher = NULL;
|
crypto_default_null_skcipher = NULL;
|
||||||
}
|
}
|
||||||
mutex_unlock(&crypto_default_null_skcipher_lock);
|
mutex_unlock(&crypto_default_null_skcipher_lock);
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <crypto/internal/skcipher.h>
|
#include <crypto/internal/skcipher.h>
|
||||||
#include <crypto/internal/rng.h>
|
#include <crypto/internal/rng.h>
|
||||||
#include <crypto/akcipher.h>
|
#include <crypto/akcipher.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
@ -126,6 +127,21 @@ nla_put_failure:
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_report_kpp rkpp;
|
||||||
|
|
||||||
|
strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
||||||
|
|
||||||
|
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
|
||||||
|
sizeof(struct crypto_report_kpp), &rkpp))
|
||||||
|
goto nla_put_failure;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nla_put_failure:
|
||||||
|
return -EMSGSIZE;
|
||||||
|
}
|
||||||
|
|
||||||
static int crypto_report_one(struct crypto_alg *alg,
|
static int crypto_report_one(struct crypto_alg *alg,
|
||||||
struct crypto_user_alg *ualg, struct sk_buff *skb)
|
struct crypto_user_alg *ualg, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
@ -176,6 +192,10 @@ static int crypto_report_one(struct crypto_alg *alg,
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
case CRYPTO_ALG_TYPE_KPP:
|
||||||
|
if (crypto_report_kpp(skb, alg))
|
||||||
|
goto nla_put_failure;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -358,32 +378,6 @@ drop_alg:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
|
|
||||||
u32 mask)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
struct crypto_alg *alg;
|
|
||||||
|
|
||||||
type = crypto_skcipher_type(type);
|
|
||||||
mask = crypto_skcipher_mask(mask);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
alg = crypto_lookup_skcipher(name, type, mask);
|
|
||||||
if (!IS_ERR(alg))
|
|
||||||
return alg;
|
|
||||||
|
|
||||||
err = PTR_ERR(alg);
|
|
||||||
if (err != -EAGAIN)
|
|
||||||
break;
|
|
||||||
if (fatal_signal_pending(current)) {
|
|
||||||
err = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
struct nlattr **attrs)
|
struct nlattr **attrs)
|
||||||
{
|
{
|
||||||
|
@ -416,16 +410,7 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
else
|
else
|
||||||
name = p->cru_name;
|
name = p->cru_name;
|
||||||
|
|
||||||
switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
|
alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
|
||||||
case CRYPTO_ALG_TYPE_GIVCIPHER:
|
|
||||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
|
||||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
|
||||||
alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_ERR(alg))
|
if (IS_ERR(alg))
|
||||||
return PTR_ERR(alg);
|
return PTR_ERR(alg);
|
||||||
|
|
||||||
|
|
183
crypto/ctr.c
183
crypto/ctr.c
|
@ -26,13 +26,13 @@ struct crypto_ctr_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct crypto_rfc3686_ctx {
|
struct crypto_rfc3686_ctx {
|
||||||
struct crypto_ablkcipher *child;
|
struct crypto_skcipher *child;
|
||||||
u8 nonce[CTR_RFC3686_NONCE_SIZE];
|
u8 nonce[CTR_RFC3686_NONCE_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct crypto_rfc3686_req_ctx {
|
struct crypto_rfc3686_req_ctx {
|
||||||
u8 iv[CTR_RFC3686_BLOCK_SIZE];
|
u8 iv[CTR_RFC3686_BLOCK_SIZE];
|
||||||
struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR;
|
struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
|
static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||||
|
@ -249,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = {
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent,
|
static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
|
||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent);
|
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||||
struct crypto_ablkcipher *child = ctx->child;
|
struct crypto_skcipher *child = ctx->child;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* the nonce is stored in bytes at end of key */
|
/* the nonce is stored in bytes at end of key */
|
||||||
|
@ -265,173 +265,178 @@ static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent,
|
||||||
|
|
||||||
keylen -= CTR_RFC3686_NONCE_SIZE;
|
keylen -= CTR_RFC3686_NONCE_SIZE;
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
|
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ablkcipher_setkey(child, key, keylen);
|
err = crypto_skcipher_setkey(child, key, keylen);
|
||||||
crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) &
|
crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_rfc3686_crypt(struct ablkcipher_request *req)
|
static int crypto_rfc3686_crypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
struct crypto_ablkcipher *child = ctx->child;
|
struct crypto_skcipher *child = ctx->child;
|
||||||
unsigned long align = crypto_ablkcipher_alignmask(tfm);
|
unsigned long align = crypto_skcipher_alignmask(tfm);
|
||||||
struct crypto_rfc3686_req_ctx *rctx =
|
struct crypto_rfc3686_req_ctx *rctx =
|
||||||
(void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1);
|
(void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
|
||||||
struct ablkcipher_request *subreq = &rctx->subreq;
|
struct skcipher_request *subreq = &rctx->subreq;
|
||||||
u8 *iv = rctx->iv;
|
u8 *iv = rctx->iv;
|
||||||
|
|
||||||
/* set up counter block */
|
/* set up counter block */
|
||||||
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
||||||
memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE);
|
memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
|
||||||
|
|
||||||
/* initialize counter portion of counter block */
|
/* initialize counter portion of counter block */
|
||||||
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
|
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
|
||||||
cpu_to_be32(1);
|
cpu_to_be32(1);
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(subreq, child);
|
skcipher_request_set_tfm(subreq, child);
|
||||||
ablkcipher_request_set_callback(subreq, req->base.flags,
|
skcipher_request_set_callback(subreq, req->base.flags,
|
||||||
req->base.complete, req->base.data);
|
req->base.complete, req->base.data);
|
||||||
ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes,
|
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||||
iv);
|
req->cryptlen, iv);
|
||||||
|
|
||||||
return crypto_ablkcipher_encrypt(subreq);
|
return crypto_skcipher_encrypt(subreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm)
|
static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||||
struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
|
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
|
||||||
struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
struct crypto_ablkcipher *cipher;
|
struct crypto_skcipher *cipher;
|
||||||
unsigned long align;
|
unsigned long align;
|
||||||
|
unsigned int reqsize;
|
||||||
|
|
||||||
cipher = crypto_spawn_skcipher(spawn);
|
cipher = crypto_spawn_skcipher2(spawn);
|
||||||
if (IS_ERR(cipher))
|
if (IS_ERR(cipher))
|
||||||
return PTR_ERR(cipher);
|
return PTR_ERR(cipher);
|
||||||
|
|
||||||
ctx->child = cipher;
|
ctx->child = cipher;
|
||||||
|
|
||||||
align = crypto_tfm_alg_alignmask(tfm);
|
align = crypto_skcipher_alignmask(tfm);
|
||||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||||
tfm->crt_ablkcipher.reqsize = align +
|
reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
|
||||||
sizeof(struct crypto_rfc3686_req_ctx) +
|
crypto_skcipher_reqsize(cipher);
|
||||||
crypto_ablkcipher_reqsize(cipher);
|
crypto_skcipher_set_reqsize(tfm, reqsize);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm)
|
static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_ablkcipher(ctx->child);
|
crypto_free_skcipher(ctx->child);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb)
|
static void crypto_rfc3686_free(struct skcipher_instance *inst)
|
||||||
|
{
|
||||||
|
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
|
||||||
|
|
||||||
|
crypto_drop_skcipher(spawn);
|
||||||
|
kfree(inst);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crypto_rfc3686_create(struct crypto_template *tmpl,
|
||||||
|
struct rtattr **tb)
|
||||||
{
|
{
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
struct crypto_instance *inst;
|
struct skcipher_instance *inst;
|
||||||
struct crypto_alg *alg;
|
struct skcipher_alg *alg;
|
||||||
struct crypto_skcipher_spawn *spawn;
|
struct crypto_skcipher_spawn *spawn;
|
||||||
const char *cipher_name;
|
const char *cipher_name;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
algt = crypto_get_attr_type(tb);
|
algt = crypto_get_attr_type(tb);
|
||||||
if (IS_ERR(algt))
|
if (IS_ERR(algt))
|
||||||
return ERR_CAST(algt);
|
return PTR_ERR(algt);
|
||||||
|
|
||||||
if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask)
|
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||||
return ERR_PTR(-EINVAL);
|
return -EINVAL;
|
||||||
|
|
||||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||||
if (IS_ERR(cipher_name))
|
if (IS_ERR(cipher_name))
|
||||||
return ERR_CAST(cipher_name);
|
return PTR_ERR(cipher_name);
|
||||||
|
|
||||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||||
if (!inst)
|
if (!inst)
|
||||||
return ERR_PTR(-ENOMEM);
|
return -ENOMEM;
|
||||||
|
|
||||||
spawn = crypto_instance_ctx(inst);
|
spawn = skcipher_instance_ctx(inst);
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(spawn, inst);
|
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
|
||||||
err = crypto_grab_skcipher(spawn, cipher_name, 0,
|
err = crypto_grab_skcipher2(spawn, cipher_name, 0,
|
||||||
crypto_requires_sync(algt->type,
|
crypto_requires_sync(algt->type,
|
||||||
algt->mask));
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_inst;
|
goto err_free_inst;
|
||||||
|
|
||||||
alg = crypto_skcipher_spawn_alg(spawn);
|
alg = crypto_spawn_skcipher_alg(spawn);
|
||||||
|
|
||||||
/* We only support 16-byte blocks. */
|
/* We only support 16-byte blocks. */
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE)
|
if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
|
||||||
goto err_drop_spawn;
|
goto err_drop_spawn;
|
||||||
|
|
||||||
/* Not a stream cipher? */
|
/* Not a stream cipher? */
|
||||||
if (alg->cra_blocksize != 1)
|
if (alg->base.cra_blocksize != 1)
|
||||||
goto err_drop_spawn;
|
goto err_drop_spawn;
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)",
|
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||||
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
"rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_spawn;
|
goto err_drop_spawn;
|
||||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"rfc3686(%s)", alg->cra_driver_name) >=
|
"rfc3686(%s)", alg->base.cra_driver_name) >=
|
||||||
CRYPTO_MAX_ALG_NAME)
|
CRYPTO_MAX_ALG_NAME)
|
||||||
goto err_drop_spawn;
|
goto err_drop_spawn;
|
||||||
|
|
||||||
inst->alg.cra_priority = alg->cra_priority;
|
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||||
inst->alg.cra_blocksize = 1;
|
inst->alg.base.cra_blocksize = 1;
|
||||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||||
|
|
||||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||||
(alg->cra_flags & CRYPTO_ALG_ASYNC);
|
|
||||||
inst->alg.cra_type = &crypto_ablkcipher_type;
|
|
||||||
|
|
||||||
inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE;
|
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
|
||||||
inst->alg.cra_ablkcipher.min_keysize =
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
|
||||||
alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE;
|
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
|
||||||
inst->alg.cra_ablkcipher.max_keysize =
|
CTR_RFC3686_NONCE_SIZE;
|
||||||
alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE;
|
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
|
||||||
|
CTR_RFC3686_NONCE_SIZE;
|
||||||
|
|
||||||
inst->alg.cra_ablkcipher.geniv = "seqiv";
|
inst->alg.setkey = crypto_rfc3686_setkey;
|
||||||
|
inst->alg.encrypt = crypto_rfc3686_crypt;
|
||||||
|
inst->alg.decrypt = crypto_rfc3686_crypt;
|
||||||
|
|
||||||
inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey;
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
|
||||||
inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt;
|
|
||||||
inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt;
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
|
inst->alg.init = crypto_rfc3686_init_tfm;
|
||||||
|
inst->alg.exit = crypto_rfc3686_exit_tfm;
|
||||||
|
|
||||||
inst->alg.cra_init = crypto_rfc3686_init_tfm;
|
inst->free = crypto_rfc3686_free;
|
||||||
inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
|
|
||||||
|
|
||||||
return inst;
|
err = skcipher_register_instance(tmpl, inst);
|
||||||
|
if (err)
|
||||||
|
goto err_drop_spawn;
|
||||||
|
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
|
||||||
err_drop_spawn:
|
err_drop_spawn:
|
||||||
crypto_drop_skcipher(spawn);
|
crypto_drop_skcipher(spawn);
|
||||||
err_free_inst:
|
err_free_inst:
|
||||||
kfree(inst);
|
kfree(inst);
|
||||||
return ERR_PTR(err);
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
static void crypto_rfc3686_free(struct crypto_instance *inst)
|
|
||||||
{
|
|
||||||
struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
|
|
||||||
|
|
||||||
crypto_drop_skcipher(spawn);
|
|
||||||
kfree(inst);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_template crypto_rfc3686_tmpl = {
|
static struct crypto_template crypto_rfc3686_tmpl = {
|
||||||
.name = "rfc3686",
|
.name = "rfc3686",
|
||||||
.alloc = crypto_rfc3686_alloc,
|
.create = crypto_rfc3686_create,
|
||||||
.free = crypto_rfc3686_free,
|
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
509
crypto/cts.c
509
crypto/cts.c
|
@ -40,7 +40,7 @@
|
||||||
* rfc3962 includes errata information in its Appendix A.
|
* rfc3962 includes errata information in its Appendix A.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/internal/skcipher.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -51,289 +51,364 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
struct crypto_cts_ctx {
|
struct crypto_cts_ctx {
|
||||||
struct crypto_blkcipher *child;
|
struct crypto_skcipher *child;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key,
|
struct crypto_cts_reqctx {
|
||||||
|
struct scatterlist sg[2];
|
||||||
|
unsigned offset;
|
||||||
|
struct skcipher_request subreq;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
|
||||||
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
|
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
struct crypto_skcipher *child = ctx->child;
|
||||||
|
|
||||||
|
return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child),
|
||||||
|
crypto_skcipher_alignmask(tfm) + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent);
|
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||||
struct crypto_blkcipher *child = ctx->child;
|
struct crypto_skcipher *child = ctx->child;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
|
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_blkcipher_setkey(child, key, keylen);
|
err = crypto_skcipher_setkey(child, key, keylen);
|
||||||
crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
|
crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx,
|
static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
|
||||||
struct blkcipher_desc *desc,
|
|
||||||
struct scatterlist *dst,
|
|
||||||
struct scatterlist *src,
|
|
||||||
unsigned int offset,
|
|
||||||
unsigned int nbytes)
|
|
||||||
{
|
{
|
||||||
int bsize = crypto_blkcipher_blocksize(desc->tfm);
|
struct skcipher_request *req = areq->data;
|
||||||
u8 tmp[bsize], tmp2[bsize];
|
|
||||||
struct blkcipher_desc lcldesc;
|
|
||||||
struct scatterlist sgsrc[1], sgdst[1];
|
|
||||||
int lastn = nbytes - bsize;
|
|
||||||
u8 iv[bsize];
|
|
||||||
u8 s[bsize * 2], d[bsize * 2];
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (lastn < 0)
|
if (err == -EINPROGRESS)
|
||||||
return -EINVAL;
|
return;
|
||||||
|
|
||||||
sg_init_table(sgsrc, 1);
|
skcipher_request_complete(req, err);
|
||||||
sg_init_table(sgdst, 1);
|
|
||||||
|
|
||||||
memset(s, 0, sizeof(s));
|
|
||||||
scatterwalk_map_and_copy(s, src, offset, nbytes, 0);
|
|
||||||
|
|
||||||
memcpy(iv, desc->info, bsize);
|
|
||||||
|
|
||||||
lcldesc.tfm = ctx->child;
|
|
||||||
lcldesc.info = iv;
|
|
||||||
lcldesc.flags = desc->flags;
|
|
||||||
|
|
||||||
sg_set_buf(&sgsrc[0], s, bsize);
|
|
||||||
sg_set_buf(&sgdst[0], tmp, bsize);
|
|
||||||
err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
|
|
||||||
|
|
||||||
memcpy(d + bsize, tmp, lastn);
|
|
||||||
|
|
||||||
lcldesc.info = tmp;
|
|
||||||
|
|
||||||
sg_set_buf(&sgsrc[0], s + bsize, bsize);
|
|
||||||
sg_set_buf(&sgdst[0], tmp2, bsize);
|
|
||||||
err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
|
|
||||||
|
|
||||||
memcpy(d, tmp2, bsize);
|
|
||||||
|
|
||||||
scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);
|
|
||||||
|
|
||||||
memcpy(desc->info, tmp2, bsize);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_cts_encrypt(struct blkcipher_desc *desc,
|
static int cts_cbc_encrypt(struct skcipher_request *req)
|
||||||
struct scatterlist *dst, struct scatterlist *src,
|
|
||||||
unsigned int nbytes)
|
|
||||||
{
|
{
|
||||||
struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
|
||||||
int bsize = crypto_blkcipher_blocksize(desc->tfm);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
int tot_blocks = (nbytes + bsize - 1) / bsize;
|
struct skcipher_request *subreq = &rctx->subreq;
|
||||||
int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0;
|
int bsize = crypto_skcipher_blocksize(tfm);
|
||||||
struct blkcipher_desc lcldesc;
|
u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
|
||||||
int err;
|
struct scatterlist *sg;
|
||||||
|
unsigned int offset;
|
||||||
|
int lastn;
|
||||||
|
|
||||||
lcldesc.tfm = ctx->child;
|
offset = rctx->offset;
|
||||||
lcldesc.info = desc->info;
|
lastn = req->cryptlen - offset;
|
||||||
lcldesc.flags = desc->flags;
|
|
||||||
|
|
||||||
if (tot_blocks == 1) {
|
sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
|
||||||
err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize);
|
scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
|
||||||
} else if (nbytes <= bsize * 2) {
|
|
||||||
err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes);
|
|
||||||
} else {
|
|
||||||
/* do normal function for tot_blocks - 2 */
|
|
||||||
err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src,
|
|
||||||
cbc_blocks * bsize);
|
|
||||||
if (err == 0) {
|
|
||||||
/* do cts for final two blocks */
|
|
||||||
err = cts_cbc_encrypt(ctx, desc, dst, src,
|
|
||||||
cbc_blocks * bsize,
|
|
||||||
nbytes - (cbc_blocks * bsize));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
memset(d, 0, bsize);
|
||||||
|
scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
|
||||||
|
|
||||||
|
scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
|
||||||
|
memzero_explicit(d, sizeof(d));
|
||||||
|
|
||||||
|
skcipher_request_set_callback(subreq, req->base.flags &
|
||||||
|
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
|
cts_cbc_crypt_done, req);
|
||||||
|
skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
|
||||||
|
return crypto_skcipher_encrypt(subreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
|
static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
|
||||||
struct blkcipher_desc *desc,
|
|
||||||
struct scatterlist *dst,
|
|
||||||
struct scatterlist *src,
|
|
||||||
unsigned int offset,
|
|
||||||
unsigned int nbytes)
|
|
||||||
{
|
{
|
||||||
int bsize = crypto_blkcipher_blocksize(desc->tfm);
|
struct skcipher_request *req = areq->data;
|
||||||
u8 tmp[bsize];
|
|
||||||
struct blkcipher_desc lcldesc;
|
|
||||||
struct scatterlist sgsrc[1], sgdst[1];
|
|
||||||
int lastn = nbytes - bsize;
|
|
||||||
u8 iv[bsize];
|
|
||||||
u8 s[bsize * 2], d[bsize * 2];
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (lastn < 0)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
sg_init_table(sgsrc, 1);
|
|
||||||
sg_init_table(sgdst, 1);
|
|
||||||
|
|
||||||
scatterwalk_map_and_copy(s, src, offset, nbytes, 0);
|
|
||||||
|
|
||||||
lcldesc.tfm = ctx->child;
|
|
||||||
lcldesc.info = iv;
|
|
||||||
lcldesc.flags = desc->flags;
|
|
||||||
|
|
||||||
/* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/
|
|
||||||
memset(iv, 0, sizeof(iv));
|
|
||||||
sg_set_buf(&sgsrc[0], s, bsize);
|
|
||||||
sg_set_buf(&sgdst[0], tmp, bsize);
|
|
||||||
err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto out;
|
||||||
/* 2. Pad Cn with zeros at the end to create C of length BB */
|
|
||||||
memset(iv, 0, sizeof(iv));
|
|
||||||
memcpy(iv, s + bsize, lastn);
|
|
||||||
/* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */
|
|
||||||
crypto_xor(tmp, iv, bsize);
|
|
||||||
/* 4. Select the first Ln bytes of Xn (tmp) to create Pn */
|
|
||||||
memcpy(d + bsize, tmp, lastn);
|
|
||||||
|
|
||||||
/* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
|
err = cts_cbc_encrypt(req);
|
||||||
memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
|
if (err == -EINPROGRESS ||
|
||||||
/* 6. Decrypt En to create Pn-1 */
|
(err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||||
memzero_explicit(iv, sizeof(iv));
|
return;
|
||||||
|
|
||||||
sg_set_buf(&sgsrc[0], s + bsize, bsize);
|
out:
|
||||||
sg_set_buf(&sgdst[0], d, bsize);
|
skcipher_request_complete(req, err);
|
||||||
err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
|
|
||||||
|
|
||||||
/* XOR with previous block */
|
|
||||||
crypto_xor(d, desc->info, bsize);
|
|
||||||
|
|
||||||
scatterwalk_map_and_copy(d, dst, offset, nbytes, 1);
|
|
||||||
|
|
||||||
memcpy(desc->info, s, bsize);
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_cts_decrypt(struct blkcipher_desc *desc,
|
static int crypto_cts_encrypt(struct skcipher_request *req)
|
||||||
struct scatterlist *dst, struct scatterlist *src,
|
|
||||||
unsigned int nbytes)
|
|
||||||
{
|
{
|
||||||
struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
int bsize = crypto_blkcipher_blocksize(desc->tfm);
|
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
|
||||||
int tot_blocks = (nbytes + bsize - 1) / bsize;
|
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0;
|
struct skcipher_request *subreq = &rctx->subreq;
|
||||||
struct blkcipher_desc lcldesc;
|
int bsize = crypto_skcipher_blocksize(tfm);
|
||||||
int err;
|
unsigned int nbytes = req->cryptlen;
|
||||||
|
int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
|
||||||
|
unsigned int offset;
|
||||||
|
|
||||||
lcldesc.tfm = ctx->child;
|
skcipher_request_set_tfm(subreq, ctx->child);
|
||||||
lcldesc.info = desc->info;
|
|
||||||
lcldesc.flags = desc->flags;
|
|
||||||
|
|
||||||
if (tot_blocks == 1) {
|
if (cbc_blocks <= 0) {
|
||||||
err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize);
|
skcipher_request_set_callback(subreq, req->base.flags,
|
||||||
} else if (nbytes <= bsize * 2) {
|
req->base.complete,
|
||||||
err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes);
|
req->base.data);
|
||||||
} else {
|
skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
|
||||||
/* do normal function for tot_blocks - 2 */
|
req->iv);
|
||||||
err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src,
|
return crypto_skcipher_encrypt(subreq);
|
||||||
cbc_blocks * bsize);
|
|
||||||
if (err == 0) {
|
|
||||||
/* do cts for final two blocks */
|
|
||||||
err = cts_cbc_decrypt(ctx, desc, dst, src,
|
|
||||||
cbc_blocks * bsize,
|
|
||||||
nbytes - (cbc_blocks * bsize));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return err;
|
|
||||||
|
offset = cbc_blocks * bsize;
|
||||||
|
rctx->offset = offset;
|
||||||
|
|
||||||
|
skcipher_request_set_callback(subreq, req->base.flags,
|
||||||
|
crypto_cts_encrypt_done, req);
|
||||||
|
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||||
|
offset, req->iv);
|
||||||
|
|
||||||
|
return crypto_skcipher_encrypt(subreq) ?:
|
||||||
|
cts_cbc_encrypt(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_cts_init_tfm(struct crypto_tfm *tfm)
|
static int cts_cbc_decrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
|
||||||
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct skcipher_request *subreq = &rctx->subreq;
|
||||||
struct crypto_blkcipher *cipher;
|
int bsize = crypto_skcipher_blocksize(tfm);
|
||||||
|
u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
|
||||||
|
struct scatterlist *sg;
|
||||||
|
unsigned int offset;
|
||||||
|
u8 *space;
|
||||||
|
int lastn;
|
||||||
|
|
||||||
cipher = crypto_spawn_blkcipher(spawn);
|
offset = rctx->offset;
|
||||||
|
lastn = req->cryptlen - offset;
|
||||||
|
|
||||||
|
sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
|
||||||
|
|
||||||
|
/* 1. Decrypt Cn-1 (s) to create Dn */
|
||||||
|
scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
|
||||||
|
space = crypto_cts_reqctx_space(req);
|
||||||
|
crypto_xor(d + bsize, space, bsize);
|
||||||
|
/* 2. Pad Cn with zeros at the end to create C of length BB */
|
||||||
|
memset(d, 0, bsize);
|
||||||
|
scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
|
||||||
|
/* 3. Exclusive-or Dn with C to create Xn */
|
||||||
|
/* 4. Select the first Ln bytes of Xn to create Pn */
|
||||||
|
crypto_xor(d + bsize, d, lastn);
|
||||||
|
|
||||||
|
/* 5. Append the tail (BB - Ln) bytes of Xn to Cn to create En */
|
||||||
|
memcpy(d + lastn, d + bsize + lastn, bsize - lastn);
|
||||||
|
/* 6. Decrypt En to create Pn-1 */
|
||||||
|
|
||||||
|
scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
|
||||||
|
memzero_explicit(d, sizeof(d));
|
||||||
|
|
||||||
|
skcipher_request_set_callback(subreq, req->base.flags &
|
||||||
|
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
|
cts_cbc_crypt_done, req);
|
||||||
|
|
||||||
|
skcipher_request_set_crypt(subreq, sg, sg, bsize, space);
|
||||||
|
return crypto_skcipher_decrypt(subreq);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
|
||||||
|
{
|
||||||
|
struct skcipher_request *req = areq->data;
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
err = cts_cbc_decrypt(req);
|
||||||
|
if (err == -EINPROGRESS ||
|
||||||
|
(err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||||
|
return;
|
||||||
|
|
||||||
|
out:
|
||||||
|
skcipher_request_complete(req, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crypto_cts_decrypt(struct skcipher_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
|
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
|
||||||
|
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
struct skcipher_request *subreq = &rctx->subreq;
|
||||||
|
int bsize = crypto_skcipher_blocksize(tfm);
|
||||||
|
unsigned int nbytes = req->cryptlen;
|
||||||
|
int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
|
||||||
|
unsigned int offset;
|
||||||
|
u8 *space;
|
||||||
|
|
||||||
|
skcipher_request_set_tfm(subreq, ctx->child);
|
||||||
|
|
||||||
|
if (cbc_blocks <= 0) {
|
||||||
|
skcipher_request_set_callback(subreq, req->base.flags,
|
||||||
|
req->base.complete,
|
||||||
|
req->base.data);
|
||||||
|
skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
|
||||||
|
req->iv);
|
||||||
|
return crypto_skcipher_decrypt(subreq);
|
||||||
|
}
|
||||||
|
|
||||||
|
skcipher_request_set_callback(subreq, req->base.flags,
|
||||||
|
crypto_cts_decrypt_done, req);
|
||||||
|
|
||||||
|
space = crypto_cts_reqctx_space(req);
|
||||||
|
|
||||||
|
offset = cbc_blocks * bsize;
|
||||||
|
rctx->offset = offset;
|
||||||
|
|
||||||
|
if (cbc_blocks <= 1)
|
||||||
|
memcpy(space, req->iv, bsize);
|
||||||
|
else
|
||||||
|
scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
|
||||||
|
bsize, 0);
|
||||||
|
|
||||||
|
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||||
|
offset, req->iv);
|
||||||
|
|
||||||
|
return crypto_skcipher_decrypt(subreq) ?:
|
||||||
|
cts_cbc_decrypt(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
|
||||||
|
{
|
||||||
|
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||||
|
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
|
||||||
|
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
|
struct crypto_skcipher *cipher;
|
||||||
|
unsigned reqsize;
|
||||||
|
unsigned bsize;
|
||||||
|
unsigned align;
|
||||||
|
|
||||||
|
cipher = crypto_spawn_skcipher2(spawn);
|
||||||
if (IS_ERR(cipher))
|
if (IS_ERR(cipher))
|
||||||
return PTR_ERR(cipher);
|
return PTR_ERR(cipher);
|
||||||
|
|
||||||
ctx->child = cipher;
|
ctx->child = cipher;
|
||||||
|
|
||||||
|
align = crypto_skcipher_alignmask(tfm);
|
||||||
|
bsize = crypto_skcipher_blocksize(cipher);
|
||||||
|
reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) +
|
||||||
|
crypto_skcipher_reqsize(cipher),
|
||||||
|
crypto_tfm_ctx_alignment()) +
|
||||||
|
(align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize;
|
||||||
|
|
||||||
|
crypto_skcipher_set_reqsize(tfm, reqsize);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_cts_exit_tfm(struct crypto_tfm *tfm)
|
static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||||
crypto_free_blkcipher(ctx->child);
|
|
||||||
|
crypto_free_skcipher(ctx->child);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
|
static void crypto_cts_free(struct skcipher_instance *inst)
|
||||||
{
|
{
|
||||||
struct crypto_instance *inst;
|
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
||||||
struct crypto_alg *alg;
|
kfree(inst);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
|
{
|
||||||
|
struct crypto_skcipher_spawn *spawn;
|
||||||
|
struct skcipher_instance *inst;
|
||||||
|
struct crypto_attr_type *algt;
|
||||||
|
struct skcipher_alg *alg;
|
||||||
|
const char *cipher_name;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
|
algt = crypto_get_attr_type(tb);
|
||||||
|
if (IS_ERR(algt))
|
||||||
|
return PTR_ERR(algt);
|
||||||
|
|
||||||
|
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||||
|
if (IS_ERR(cipher_name))
|
||||||
|
return PTR_ERR(cipher_name);
|
||||||
|
|
||||||
|
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||||
|
if (!inst)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
spawn = skcipher_instance_ctx(inst);
|
||||||
|
|
||||||
|
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
|
||||||
|
err = crypto_grab_skcipher2(spawn, cipher_name, 0,
|
||||||
|
crypto_requires_sync(algt->type,
|
||||||
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
return ERR_PTR(err);
|
goto err_free_inst;
|
||||||
|
|
||||||
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER,
|
alg = crypto_spawn_skcipher_alg(spawn);
|
||||||
CRYPTO_ALG_TYPE_MASK);
|
|
||||||
if (IS_ERR(alg))
|
|
||||||
return ERR_CAST(alg);
|
|
||||||
|
|
||||||
inst = ERR_PTR(-EINVAL);
|
err = -EINVAL;
|
||||||
if (!is_power_of_2(alg->cra_blocksize))
|
if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
|
||||||
goto out_put_alg;
|
goto err_drop_spawn;
|
||||||
|
|
||||||
if (strncmp(alg->cra_name, "cbc(", 4))
|
if (strncmp(alg->base.cra_name, "cbc(", 4))
|
||||||
goto out_put_alg;
|
goto err_drop_spawn;
|
||||||
|
|
||||||
inst = crypto_alloc_instance("cts", alg);
|
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
|
||||||
if (IS_ERR(inst))
|
&alg->base);
|
||||||
goto out_put_alg;
|
if (err)
|
||||||
|
goto err_drop_spawn;
|
||||||
|
|
||||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
|
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||||
inst->alg.cra_priority = alg->cra_priority;
|
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
|
||||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
|
||||||
inst->alg.cra_type = &crypto_blkcipher_type;
|
|
||||||
|
|
||||||
/* We access the data as u32s when xoring. */
|
/* We access the data as u32s when xoring. */
|
||||||
inst->alg.cra_alignmask |= __alignof__(u32) - 1;
|
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
||||||
|
|
||||||
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
|
inst->alg.ivsize = alg->base.cra_blocksize;
|
||||||
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
|
||||||
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
|
||||||
|
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
|
||||||
|
|
||||||
inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
|
||||||
|
|
||||||
inst->alg.cra_init = crypto_cts_init_tfm;
|
inst->alg.init = crypto_cts_init_tfm;
|
||||||
inst->alg.cra_exit = crypto_cts_exit_tfm;
|
inst->alg.exit = crypto_cts_exit_tfm;
|
||||||
|
|
||||||
inst->alg.cra_blkcipher.setkey = crypto_cts_setkey;
|
inst->alg.setkey = crypto_cts_setkey;
|
||||||
inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt;
|
inst->alg.encrypt = crypto_cts_encrypt;
|
||||||
inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt;
|
inst->alg.decrypt = crypto_cts_decrypt;
|
||||||
|
|
||||||
out_put_alg:
|
inst->free = crypto_cts_free;
|
||||||
crypto_mod_put(alg);
|
|
||||||
return inst;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void crypto_cts_free(struct crypto_instance *inst)
|
err = skcipher_register_instance(tmpl, inst);
|
||||||
{
|
if (err)
|
||||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
goto err_drop_spawn;
|
||||||
|
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err_drop_spawn:
|
||||||
|
crypto_drop_skcipher(spawn);
|
||||||
|
err_free_inst:
|
||||||
kfree(inst);
|
kfree(inst);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_template crypto_cts_tmpl = {
|
static struct crypto_template crypto_cts_tmpl = {
|
||||||
.name = "cts",
|
.name = "cts",
|
||||||
.alloc = crypto_cts_alloc,
|
.create = crypto_cts_create,
|
||||||
.free = crypto_cts_free,
|
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,189 @@
|
||||||
|
/* Diffie-Hellman Key Agreement Method [RFC2631]
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016, Intel Corporation
|
||||||
|
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public Licence
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the Licence, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <crypto/internal/kpp.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
#include <crypto/dh.h>
|
||||||
|
#include <linux/mpi.h>
|
||||||
|
|
||||||
|
struct dh_ctx {
|
||||||
|
MPI p;
|
||||||
|
MPI g;
|
||||||
|
MPI xa;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void dh_clear_params(struct dh_ctx *ctx)
|
||||||
|
{
|
||||||
|
mpi_free(ctx->p);
|
||||||
|
mpi_free(ctx->g);
|
||||||
|
ctx->p = NULL;
|
||||||
|
ctx->g = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dh_free_ctx(struct dh_ctx *ctx)
|
||||||
|
{
|
||||||
|
dh_clear_params(ctx);
|
||||||
|
mpi_free(ctx->xa);
|
||||||
|
ctx->xa = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If base is g we compute the public key
|
||||||
|
* ya = g^xa mod p; [RFC2631 sec 2.1.1]
|
||||||
|
* else if base if the counterpart public key we compute the shared secret
|
||||||
|
* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
|
||||||
|
*/
|
||||||
|
static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val)
|
||||||
|
{
|
||||||
|
/* val = base^xa mod p */
|
||||||
|
return mpi_powm(val, base, ctx->xa, ctx->p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm)
|
||||||
|
{
|
||||||
|
return kpp_tfm_ctx(tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dh_check_params_length(unsigned int p_len)
|
||||||
|
{
|
||||||
|
return (p_len < 1536) ? -EINVAL : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
|
||||||
|
{
|
||||||
|
if (unlikely(!params->p || !params->g))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (dh_check_params_length(params->p_size << 3))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ctx->p = mpi_read_raw_data(params->p, params->p_size);
|
||||||
|
if (!ctx->p)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ctx->g = mpi_read_raw_data(params->g, params->g_size);
|
||||||
|
if (!ctx->g) {
|
||||||
|
mpi_free(ctx->p);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
|
||||||
|
{
|
||||||
|
struct dh_ctx *ctx = dh_get_ctx(tfm);
|
||||||
|
struct dh params;
|
||||||
|
|
||||||
|
if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (dh_set_params(ctx, ¶ms) < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ctx->xa = mpi_read_raw_data(params.key, params.key_size);
|
||||||
|
if (!ctx->xa) {
|
||||||
|
dh_clear_params(ctx);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dh_compute_value(struct kpp_request *req)
|
||||||
|
{
|
||||||
|
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
||||||
|
struct dh_ctx *ctx = dh_get_ctx(tfm);
|
||||||
|
MPI base, val = mpi_alloc(0);
|
||||||
|
int ret = 0;
|
||||||
|
int sign;
|
||||||
|
|
||||||
|
if (!val)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (unlikely(!ctx->xa)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto err_free_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (req->src) {
|
||||||
|
base = mpi_read_raw_from_sgl(req->src, req->src_len);
|
||||||
|
if (!base) {
|
||||||
|
ret = EINVAL;
|
||||||
|
goto err_free_val;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
base = ctx->g;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = _compute_val(ctx, base, val);
|
||||||
|
if (ret)
|
||||||
|
goto err_free_base;
|
||||||
|
|
||||||
|
ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
|
||||||
|
if (ret)
|
||||||
|
goto err_free_base;
|
||||||
|
|
||||||
|
if (sign < 0)
|
||||||
|
ret = -EBADMSG;
|
||||||
|
err_free_base:
|
||||||
|
if (req->src)
|
||||||
|
mpi_free(base);
|
||||||
|
err_free_val:
|
||||||
|
mpi_free(val);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dh_max_size(struct crypto_kpp *tfm)
|
||||||
|
{
|
||||||
|
struct dh_ctx *ctx = dh_get_ctx(tfm);
|
||||||
|
|
||||||
|
return mpi_get_size(ctx->p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dh_exit_tfm(struct crypto_kpp *tfm)
|
||||||
|
{
|
||||||
|
struct dh_ctx *ctx = dh_get_ctx(tfm);
|
||||||
|
|
||||||
|
dh_free_ctx(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct kpp_alg dh = {
|
||||||
|
.set_secret = dh_set_secret,
|
||||||
|
.generate_public_key = dh_compute_value,
|
||||||
|
.compute_shared_secret = dh_compute_value,
|
||||||
|
.max_size = dh_max_size,
|
||||||
|
.exit = dh_exit_tfm,
|
||||||
|
.base = {
|
||||||
|
.cra_name = "dh",
|
||||||
|
.cra_driver_name = "dh-generic",
|
||||||
|
.cra_priority = 100,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
.cra_ctxsize = sizeof(struct dh_ctx),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int dh_init(void)
|
||||||
|
{
|
||||||
|
return crypto_register_kpp(&dh);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dh_exit(void)
|
||||||
|
{
|
||||||
|
crypto_unregister_kpp(&dh);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(dh_init);
|
||||||
|
module_exit(dh_exit);
|
||||||
|
MODULE_ALIAS_CRYPTO("dh");
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("DH generic algorithm");
|
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2016, Intel Corporation
|
||||||
|
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public Licence
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the Licence, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <crypto/dh.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
|
||||||
|
#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
|
||||||
|
|
||||||
|
static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
|
||||||
|
{
|
||||||
|
memcpy(dst, src, size);
|
||||||
|
return dst + size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
|
||||||
|
{
|
||||||
|
memcpy(dst, src, size);
|
||||||
|
return src + size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dh_data_size(const struct dh *p)
|
||||||
|
{
|
||||||
|
return p->key_size + p->p_size + p->g_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto_dh_key_len(const struct dh *p)
|
||||||
|
{
|
||||||
|
return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_dh_key_len);
|
||||||
|
|
||||||
|
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
|
||||||
|
{
|
||||||
|
u8 *ptr = buf;
|
||||||
|
struct kpp_secret secret = {
|
||||||
|
.type = CRYPTO_KPP_SECRET_TYPE_DH,
|
||||||
|
.len = len
|
||||||
|
};
|
||||||
|
|
||||||
|
if (unlikely(!buf))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (len != crypto_dh_key_len(params))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ptr = dh_pack_data(ptr, &secret, sizeof(secret));
|
||||||
|
ptr = dh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size));
|
||||||
|
ptr = dh_pack_data(ptr, ¶ms->p_size, sizeof(params->p_size));
|
||||||
|
ptr = dh_pack_data(ptr, ¶ms->g_size, sizeof(params->g_size));
|
||||||
|
ptr = dh_pack_data(ptr, params->key, params->key_size);
|
||||||
|
ptr = dh_pack_data(ptr, params->p, params->p_size);
|
||||||
|
dh_pack_data(ptr, params->g, params->g_size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
|
||||||
|
|
||||||
|
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
|
||||||
|
{
|
||||||
|
const u8 *ptr = buf;
|
||||||
|
struct kpp_secret secret;
|
||||||
|
|
||||||
|
if (unlikely(!buf || len < DH_KPP_SECRET_MIN_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ptr = dh_unpack_data(&secret, ptr, sizeof(secret));
|
||||||
|
if (secret.type != CRYPTO_KPP_SECRET_TYPE_DH)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ptr = dh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size));
|
||||||
|
ptr = dh_unpack_data(¶ms->p_size, ptr, sizeof(params->p_size));
|
||||||
|
ptr = dh_unpack_data(¶ms->g_size, ptr, sizeof(params->g_size));
|
||||||
|
if (secret.len != crypto_dh_key_len(params))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Don't allocate memory. Set pointers to data within
|
||||||
|
* the given buffer
|
||||||
|
*/
|
||||||
|
params->key = (void *)ptr;
|
||||||
|
params->p = (void *)(ptr + params->key_size);
|
||||||
|
params->g = (void *)(ptr + params->key_size + params->p_size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
|
279
crypto/drbg.c
279
crypto/drbg.c
|
@ -252,10 +252,16 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
|
||||||
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
|
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
|
||||||
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
|
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
|
||||||
|
|
||||||
static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
|
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
|
||||||
unsigned char *outval, const struct drbg_string *in);
|
const unsigned char *key);
|
||||||
|
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
|
||||||
|
const struct drbg_string *in);
|
||||||
static int drbg_init_sym_kernel(struct drbg_state *drbg);
|
static int drbg_init_sym_kernel(struct drbg_state *drbg);
|
||||||
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
|
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
|
||||||
|
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
|
||||||
|
u8 *inbuf, u32 inbuflen,
|
||||||
|
u8 *outbuf, u32 outlen);
|
||||||
|
#define DRBG_CTR_NULL_LEN 128
|
||||||
|
|
||||||
/* BCC function for CTR DRBG as defined in 10.4.3 */
|
/* BCC function for CTR DRBG as defined in 10.4.3 */
|
||||||
static int drbg_ctr_bcc(struct drbg_state *drbg,
|
static int drbg_ctr_bcc(struct drbg_state *drbg,
|
||||||
|
@ -270,6 +276,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
|
||||||
drbg_string_fill(&data, out, drbg_blocklen(drbg));
|
drbg_string_fill(&data, out, drbg_blocklen(drbg));
|
||||||
|
|
||||||
/* 10.4.3 step 2 / 4 */
|
/* 10.4.3 step 2 / 4 */
|
||||||
|
drbg_kcapi_symsetkey(drbg, key);
|
||||||
list_for_each_entry(curr, in, list) {
|
list_for_each_entry(curr, in, list) {
|
||||||
const unsigned char *pos = curr->buf;
|
const unsigned char *pos = curr->buf;
|
||||||
size_t len = curr->len;
|
size_t len = curr->len;
|
||||||
|
@ -278,7 +285,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
|
||||||
/* 10.4.3 step 4.2 */
|
/* 10.4.3 step 4.2 */
|
||||||
if (drbg_blocklen(drbg) == cnt) {
|
if (drbg_blocklen(drbg) == cnt) {
|
||||||
cnt = 0;
|
cnt = 0;
|
||||||
ret = drbg_kcapi_sym(drbg, key, out, &data);
|
ret = drbg_kcapi_sym(drbg, out, &data);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -290,7 +297,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
|
||||||
}
|
}
|
||||||
/* 10.4.3 step 4.2 for last block */
|
/* 10.4.3 step 4.2 for last block */
|
||||||
if (cnt)
|
if (cnt)
|
||||||
ret = drbg_kcapi_sym(drbg, key, out, &data);
|
ret = drbg_kcapi_sym(drbg, out, &data);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -425,6 +432,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
|
||||||
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
|
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
|
||||||
|
|
||||||
/* 10.4.2 step 13 */
|
/* 10.4.2 step 13 */
|
||||||
|
drbg_kcapi_symsetkey(drbg, temp);
|
||||||
while (generated_len < bytes_to_return) {
|
while (generated_len < bytes_to_return) {
|
||||||
short blocklen = 0;
|
short blocklen = 0;
|
||||||
/*
|
/*
|
||||||
|
@ -432,7 +440,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
|
||||||
* implicit as the key is only drbg_blocklen in size based on
|
* implicit as the key is only drbg_blocklen in size based on
|
||||||
* the implementation of the cipher function callback
|
* the implementation of the cipher function callback
|
||||||
*/
|
*/
|
||||||
ret = drbg_kcapi_sym(drbg, temp, X, &cipherin);
|
ret = drbg_kcapi_sym(drbg, X, &cipherin);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
blocklen = (drbg_blocklen(drbg) <
|
blocklen = (drbg_blocklen(drbg) <
|
||||||
|
@ -476,13 +484,26 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
|
||||||
unsigned char *temp = drbg->scratchpad;
|
unsigned char *temp = drbg->scratchpad;
|
||||||
unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
|
unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
|
||||||
drbg_blocklen(drbg);
|
drbg_blocklen(drbg);
|
||||||
unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
|
|
||||||
unsigned int len = 0;
|
|
||||||
struct drbg_string cipherin;
|
|
||||||
|
|
||||||
if (3 > reseed)
|
if (3 > reseed)
|
||||||
memset(df_data, 0, drbg_statelen(drbg));
|
memset(df_data, 0, drbg_statelen(drbg));
|
||||||
|
|
||||||
|
if (!reseed) {
|
||||||
|
/*
|
||||||
|
* The DRBG uses the CTR mode of the underlying AES cipher. The
|
||||||
|
* CTR mode increments the counter value after the AES operation
|
||||||
|
* but SP800-90A requires that the counter is incremented before
|
||||||
|
* the AES operation. Hence, we increment it at the time we set
|
||||||
|
* it by one.
|
||||||
|
*/
|
||||||
|
crypto_inc(drbg->V, drbg_blocklen(drbg));
|
||||||
|
|
||||||
|
ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
|
||||||
|
drbg_keylen(drbg));
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
|
/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
|
||||||
if (seed) {
|
if (seed) {
|
||||||
ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
|
ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
|
||||||
|
@ -490,35 +511,20 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg));
|
ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg),
|
||||||
/*
|
temp, drbg_statelen(drbg));
|
||||||
* 10.2.1.3.2 steps 2 and 3 are already covered as the allocation
|
if (ret)
|
||||||
* zeroizes all memory during initialization
|
return ret;
|
||||||
*/
|
|
||||||
while (len < (drbg_statelen(drbg))) {
|
|
||||||
/* 10.2.1.2 step 2.1 */
|
|
||||||
crypto_inc(drbg->V, drbg_blocklen(drbg));
|
|
||||||
/*
|
|
||||||
* 10.2.1.2 step 2.2 */
|
|
||||||
ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
/* 10.2.1.2 step 2.3 and 3 */
|
|
||||||
len += drbg_blocklen(drbg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 10.2.1.2 step 4 */
|
|
||||||
temp_p = temp;
|
|
||||||
df_data_p = df_data;
|
|
||||||
for (len = 0; len < drbg_statelen(drbg); len++) {
|
|
||||||
*temp_p ^= *df_data_p;
|
|
||||||
df_data_p++; temp_p++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 10.2.1.2 step 5 */
|
/* 10.2.1.2 step 5 */
|
||||||
memcpy(drbg->C, temp, drbg_keylen(drbg));
|
ret = crypto_skcipher_setkey(drbg->ctr_handle, temp,
|
||||||
|
drbg_keylen(drbg));
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
/* 10.2.1.2 step 6 */
|
/* 10.2.1.2 step 6 */
|
||||||
memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
|
memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
|
||||||
|
/* See above: increment counter by one to compensate timing of CTR op */
|
||||||
|
crypto_inc(drbg->V, drbg_blocklen(drbg));
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -537,9 +543,8 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
|
||||||
unsigned char *buf, unsigned int buflen,
|
unsigned char *buf, unsigned int buflen,
|
||||||
struct list_head *addtl)
|
struct list_head *addtl)
|
||||||
{
|
{
|
||||||
int len = 0;
|
int ret;
|
||||||
int ret = 0;
|
int len = min_t(int, buflen, INT_MAX);
|
||||||
struct drbg_string data;
|
|
||||||
|
|
||||||
/* 10.2.1.5.2 step 2 */
|
/* 10.2.1.5.2 step 2 */
|
||||||
if (addtl && !list_empty(addtl)) {
|
if (addtl && !list_empty(addtl)) {
|
||||||
|
@ -549,33 +554,16 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 10.2.1.5.2 step 4.1 */
|
/* 10.2.1.5.2 step 4.1 */
|
||||||
crypto_inc(drbg->V, drbg_blocklen(drbg));
|
ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
|
||||||
drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
|
buf, len);
|
||||||
while (len < buflen) {
|
if (ret)
|
||||||
int outlen = 0;
|
return ret;
|
||||||
/* 10.2.1.5.2 step 4.2 */
|
|
||||||
ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
|
|
||||||
if (ret) {
|
|
||||||
len = ret;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
|
|
||||||
drbg_blocklen(drbg) : (buflen - len);
|
|
||||||
/* 10.2.1.5.2 step 4.3 */
|
|
||||||
memcpy(buf + len, drbg->scratchpad, outlen);
|
|
||||||
len += outlen;
|
|
||||||
/* 10.2.1.5.2 step 6 */
|
|
||||||
if (len < buflen)
|
|
||||||
crypto_inc(drbg->V, drbg_blocklen(drbg));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 10.2.1.5.2 step 6 */
|
/* 10.2.1.5.2 step 6 */
|
||||||
ret = drbg_ctr_update(drbg, NULL, 3);
|
ret = drbg_ctr_update(drbg, NULL, 3);
|
||||||
if (ret)
|
if (ret)
|
||||||
len = ret;
|
len = ret;
|
||||||
|
|
||||||
out:
|
|
||||||
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1145,11 +1133,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
|
||||||
if (!drbg)
|
if (!drbg)
|
||||||
return;
|
return;
|
||||||
kzfree(drbg->V);
|
kzfree(drbg->V);
|
||||||
drbg->V = NULL;
|
drbg->Vbuf = NULL;
|
||||||
kzfree(drbg->C);
|
kzfree(drbg->C);
|
||||||
drbg->C = NULL;
|
drbg->Cbuf = NULL;
|
||||||
kzfree(drbg->scratchpad);
|
kzfree(drbg->scratchpadbuf);
|
||||||
drbg->scratchpad = NULL;
|
drbg->scratchpadbuf = NULL;
|
||||||
drbg->reseed_ctr = 0;
|
drbg->reseed_ctr = 0;
|
||||||
drbg->d_ops = NULL;
|
drbg->d_ops = NULL;
|
||||||
drbg->core = NULL;
|
drbg->core = NULL;
|
||||||
|
@ -1185,12 +1173,18 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
|
ret = drbg->d_ops->crypto_init(drbg);
|
||||||
if (!drbg->V)
|
if (ret < 0)
|
||||||
goto err;
|
|
||||||
drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
|
|
||||||
if (!drbg->C)
|
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
|
||||||
|
if (!drbg->Vbuf)
|
||||||
|
goto fini;
|
||||||
|
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
|
||||||
|
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
|
||||||
|
if (!drbg->Cbuf)
|
||||||
|
goto fini;
|
||||||
|
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
|
||||||
/* scratchpad is only generated for CTR and Hash */
|
/* scratchpad is only generated for CTR and Hash */
|
||||||
if (drbg->core->flags & DRBG_HMAC)
|
if (drbg->core->flags & DRBG_HMAC)
|
||||||
sb_size = 0;
|
sb_size = 0;
|
||||||
|
@ -1204,13 +1198,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
|
||||||
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
|
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
|
||||||
|
|
||||||
if (0 < sb_size) {
|
if (0 < sb_size) {
|
||||||
drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL);
|
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
|
||||||
if (!drbg->scratchpad)
|
if (!drbg->scratchpadbuf)
|
||||||
goto err;
|
goto fini;
|
||||||
|
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fini:
|
||||||
|
drbg->d_ops->crypto_fini(drbg);
|
||||||
err:
|
err:
|
||||||
drbg_dealloc_state(drbg);
|
drbg_dealloc_state(drbg);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1478,10 +1475,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
ret = -EFAULT;
|
|
||||||
if (drbg->d_ops->crypto_init(drbg))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
ret = drbg_prepare_hrng(drbg);
|
ret = drbg_prepare_hrng(drbg);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_everything;
|
goto free_everything;
|
||||||
|
@ -1505,8 +1498,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
|
||||||
mutex_unlock(&drbg->drbg_mutex);
|
mutex_unlock(&drbg->drbg_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
err:
|
|
||||||
drbg_dealloc_state(drbg);
|
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&drbg->drbg_mutex);
|
mutex_unlock(&drbg->drbg_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1591,7 +1582,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
|
||||||
sdesc->shash.tfm = tfm;
|
sdesc->shash.tfm = tfm;
|
||||||
sdesc->shash.flags = 0;
|
sdesc->shash.flags = 0;
|
||||||
drbg->priv_data = sdesc;
|
drbg->priv_data = sdesc;
|
||||||
return 0;
|
|
||||||
|
return crypto_shash_alignmask(tfm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
|
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
|
||||||
|
@ -1627,10 +1619,45 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
|
||||||
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
|
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
|
||||||
|
|
||||||
#ifdef CONFIG_CRYPTO_DRBG_CTR
|
#ifdef CONFIG_CRYPTO_DRBG_CTR
|
||||||
|
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
|
||||||
|
{
|
||||||
|
struct crypto_cipher *tfm =
|
||||||
|
(struct crypto_cipher *)drbg->priv_data;
|
||||||
|
if (tfm)
|
||||||
|
crypto_free_cipher(tfm);
|
||||||
|
drbg->priv_data = NULL;
|
||||||
|
|
||||||
|
if (drbg->ctr_handle)
|
||||||
|
crypto_free_skcipher(drbg->ctr_handle);
|
||||||
|
drbg->ctr_handle = NULL;
|
||||||
|
|
||||||
|
if (drbg->ctr_req)
|
||||||
|
skcipher_request_free(drbg->ctr_req);
|
||||||
|
drbg->ctr_req = NULL;
|
||||||
|
|
||||||
|
kfree(drbg->ctr_null_value_buf);
|
||||||
|
drbg->ctr_null_value = NULL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
|
||||||
|
{
|
||||||
|
struct drbg_state *drbg = req->data;
|
||||||
|
|
||||||
|
if (error == -EINPROGRESS)
|
||||||
|
return;
|
||||||
|
drbg->ctr_async_err = error;
|
||||||
|
complete(&drbg->ctr_completion);
|
||||||
|
}
|
||||||
|
|
||||||
static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
struct crypto_cipher *tfm;
|
struct crypto_cipher *tfm;
|
||||||
|
struct crypto_skcipher *sk_tfm;
|
||||||
|
struct skcipher_request *req;
|
||||||
|
unsigned int alignmask;
|
||||||
|
char ctr_name[CRYPTO_MAX_ALG_NAME];
|
||||||
|
|
||||||
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
|
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
|
||||||
if (IS_ERR(tfm)) {
|
if (IS_ERR(tfm)) {
|
||||||
|
@ -1640,31 +1667,103 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
||||||
}
|
}
|
||||||
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
|
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
|
||||||
drbg->priv_data = tfm;
|
drbg->priv_data = tfm;
|
||||||
return ret;
|
|
||||||
|
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
|
||||||
|
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
|
||||||
|
drbg_fini_sym_kernel(drbg);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
|
||||||
|
if (IS_ERR(sk_tfm)) {
|
||||||
|
pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
|
||||||
|
ctr_name);
|
||||||
|
drbg_fini_sym_kernel(drbg);
|
||||||
|
return PTR_ERR(sk_tfm);
|
||||||
|
}
|
||||||
|
drbg->ctr_handle = sk_tfm;
|
||||||
|
|
||||||
|
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
|
||||||
|
if (!req) {
|
||||||
|
pr_info("DRBG: could not allocate request queue\n");
|
||||||
|
drbg_fini_sym_kernel(drbg);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
drbg->ctr_req = req;
|
||||||
|
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
|
drbg_skcipher_cb, drbg);
|
||||||
|
|
||||||
|
alignmask = crypto_skcipher_alignmask(sk_tfm);
|
||||||
|
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!drbg->ctr_null_value_buf) {
|
||||||
|
drbg_fini_sym_kernel(drbg);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
|
||||||
|
alignmask + 1);
|
||||||
|
|
||||||
|
return alignmask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
|
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
|
||||||
{
|
const unsigned char *key)
|
||||||
struct crypto_cipher *tfm =
|
|
||||||
(struct crypto_cipher *)drbg->priv_data;
|
|
||||||
if (tfm)
|
|
||||||
crypto_free_cipher(tfm);
|
|
||||||
drbg->priv_data = NULL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
|
|
||||||
unsigned char *outval, const struct drbg_string *in)
|
|
||||||
{
|
{
|
||||||
struct crypto_cipher *tfm =
|
struct crypto_cipher *tfm =
|
||||||
(struct crypto_cipher *)drbg->priv_data;
|
(struct crypto_cipher *)drbg->priv_data;
|
||||||
|
|
||||||
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
|
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
|
||||||
|
const struct drbg_string *in)
|
||||||
|
{
|
||||||
|
struct crypto_cipher *tfm =
|
||||||
|
(struct crypto_cipher *)drbg->priv_data;
|
||||||
|
|
||||||
/* there is only component in *in */
|
/* there is only component in *in */
|
||||||
BUG_ON(in->len < drbg_blocklen(drbg));
|
BUG_ON(in->len < drbg_blocklen(drbg));
|
||||||
crypto_cipher_encrypt_one(tfm, outval, in->buf);
|
crypto_cipher_encrypt_one(tfm, outval, in->buf);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
|
||||||
|
u8 *inbuf, u32 inlen,
|
||||||
|
u8 *outbuf, u32 outlen)
|
||||||
|
{
|
||||||
|
struct scatterlist sg_in;
|
||||||
|
|
||||||
|
sg_init_one(&sg_in, inbuf, inlen);
|
||||||
|
|
||||||
|
while (outlen) {
|
||||||
|
u32 cryptlen = min_t(u32, inlen, outlen);
|
||||||
|
struct scatterlist sg_out;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
sg_init_one(&sg_out, outbuf, cryptlen);
|
||||||
|
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
|
||||||
|
cryptlen, drbg->V);
|
||||||
|
ret = crypto_skcipher_encrypt(drbg->ctr_req);
|
||||||
|
switch (ret) {
|
||||||
|
case 0:
|
||||||
|
break;
|
||||||
|
case -EINPROGRESS:
|
||||||
|
case -EBUSY:
|
||||||
|
ret = wait_for_completion_interruptible(
|
||||||
|
&drbg->ctr_completion);
|
||||||
|
if (!ret && !drbg->ctr_async_err) {
|
||||||
|
reinit_completion(&drbg->ctr_completion);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
init_completion(&drbg->ctr_completion);
|
||||||
|
|
||||||
|
outlen -= cryptlen;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif /* CONFIG_CRYPTO_DRBG_CTR */
|
#endif /* CONFIG_CRYPTO_DRBG_CTR */
|
||||||
|
|
||||||
/***************************************************************
|
/***************************************************************
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Kenneth MacKay
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in the
|
||||||
|
* documentation and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
#ifndef _CRYPTO_ECC_H
|
||||||
|
#define _CRYPTO_ECC_H
|
||||||
|
|
||||||
|
#define ECC_MAX_DIGITS 4 /* 256 */
|
||||||
|
|
||||||
|
#define ECC_DIGITS_TO_BYTES_SHIFT 3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ecc_is_key_valid() - Validate a given ECDH private key
|
||||||
|
*
|
||||||
|
* @curve_id: id representing the curve to use
|
||||||
|
* @ndigits: curve number of digits
|
||||||
|
* @private_key: private key to be used for the given curve
|
||||||
|
* @private_key_len: private key len
|
||||||
|
*
|
||||||
|
* Returns 0 if the key is acceptable, a negative value otherwise
|
||||||
|
*/
|
||||||
|
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
|
||||||
|
const u8 *private_key, unsigned int private_key_len);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ecdh_make_pub_key() - Compute an ECC public key
|
||||||
|
*
|
||||||
|
* @curve_id: id representing the curve to use
|
||||||
|
* @private_key: pregenerated private key for the given curve
|
||||||
|
* @private_key_len: length of private_key
|
||||||
|
* @public_key: buffer for storing the public key generated
|
||||||
|
* @public_key_len: length of the public_key buffer
|
||||||
|
*
|
||||||
|
* Returns 0 if the public key was generated successfully, a negative value
|
||||||
|
* if an error occurred.
|
||||||
|
*/
|
||||||
|
int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
|
||||||
|
const u8 *private_key, unsigned int private_key_len,
|
||||||
|
u8 *public_key, unsigned int public_key_len);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* crypto_ecdh_shared_secret() - Compute a shared secret
|
||||||
|
*
|
||||||
|
* @curve_id: id representing the curve to use
|
||||||
|
* @private_key: private key of part A
|
||||||
|
* @private_key_len: length of private_key
|
||||||
|
* @public_key: public key of counterpart B
|
||||||
|
* @public_key_len: length of public_key
|
||||||
|
* @secret: buffer for storing the calculated shared secret
|
||||||
|
* @secret_len: length of the secret buffer
|
||||||
|
*
|
||||||
|
* Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
|
||||||
|
* before using it for symmetric encryption or HMAC.
|
||||||
|
*
|
||||||
|
* Returns 0 if the shared secret was generated successfully, a negative value
|
||||||
|
* if an error occurred.
|
||||||
|
*/
|
||||||
|
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||||
|
const u8 *private_key, unsigned int private_key_len,
|
||||||
|
const u8 *public_key, unsigned int public_key_len,
|
||||||
|
u8 *secret, unsigned int secret_len);
|
||||||
|
#endif
|
|
@ -0,0 +1,57 @@
|
||||||
|
#ifndef _CRYTO_ECC_CURVE_DEFS_H
|
||||||
|
#define _CRYTO_ECC_CURVE_DEFS_H
|
||||||
|
|
||||||
|
struct ecc_point {
|
||||||
|
u64 *x;
|
||||||
|
u64 *y;
|
||||||
|
u8 ndigits;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ecc_curve {
|
||||||
|
char *name;
|
||||||
|
struct ecc_point g;
|
||||||
|
u64 *p;
|
||||||
|
u64 *n;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* NIST P-192 */
|
||||||
|
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
|
||||||
|
0x188DA80EB03090F6ull };
|
||||||
|
static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
|
||||||
|
0x07192B95FFC8DA78ull };
|
||||||
|
static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
|
||||||
|
0xFFFFFFFFFFFFFFFFull };
|
||||||
|
static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
|
||||||
|
0xFFFFFFFFFFFFFFFFull };
|
||||||
|
static struct ecc_curve nist_p192 = {
|
||||||
|
.name = "nist_192",
|
||||||
|
.g = {
|
||||||
|
.x = nist_p192_g_x,
|
||||||
|
.y = nist_p192_g_y,
|
||||||
|
.ndigits = 3,
|
||||||
|
},
|
||||||
|
.p = nist_p192_p,
|
||||||
|
.n = nist_p192_n
|
||||||
|
};
|
||||||
|
|
||||||
|
/* NIST P-256 */
|
||||||
|
static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
|
||||||
|
0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
|
||||||
|
static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
|
||||||
|
0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull };
|
||||||
|
static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
|
||||||
|
0x0000000000000000ull, 0xFFFFFFFF00000001ull };
|
||||||
|
static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
|
||||||
|
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
|
||||||
|
static struct ecc_curve nist_p256 = {
|
||||||
|
.name = "nist_256",
|
||||||
|
.g = {
|
||||||
|
.x = nist_p256_g_x,
|
||||||
|
.y = nist_p256_g_y,
|
||||||
|
.ndigits = 4,
|
||||||
|
},
|
||||||
|
.p = nist_p256_p,
|
||||||
|
.n = nist_p256_n
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,151 @@
|
||||||
|
/* ECDH key-agreement protocol
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016, Intel Corporation
|
||||||
|
* Authors: Salvator Benedetto <salvatore.benedetto@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public Licence
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the Licence, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <crypto/internal/kpp.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
#include <crypto/ecdh.h>
|
||||||
|
#include <linux/scatterlist.h>
|
||||||
|
#include "ecc.h"
|
||||||
|
|
||||||
|
struct ecdh_ctx {
|
||||||
|
unsigned int curve_id;
|
||||||
|
unsigned int ndigits;
|
||||||
|
u64 private_key[ECC_MAX_DIGITS];
|
||||||
|
u64 public_key[2 * ECC_MAX_DIGITS];
|
||||||
|
u64 shared_secret[ECC_MAX_DIGITS];
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
|
||||||
|
{
|
||||||
|
return kpp_tfm_ctx(tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int ecdh_supported_curve(unsigned int curve_id)
|
||||||
|
{
|
||||||
|
switch (curve_id) {
|
||||||
|
case ECC_CURVE_NIST_P192: return 3;
|
||||||
|
case ECC_CURVE_NIST_P256: return 4;
|
||||||
|
default: return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
|
||||||
|
{
|
||||||
|
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
|
||||||
|
struct ecdh params;
|
||||||
|
unsigned int ndigits;
|
||||||
|
|
||||||
|
if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ndigits = ecdh_supported_curve(params.curve_id);
|
||||||
|
if (!ndigits)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ctx->curve_id = params.curve_id;
|
||||||
|
ctx->ndigits = ndigits;
|
||||||
|
|
||||||
|
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
|
||||||
|
(const u8 *)params.key, params.key_size) < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
memcpy(ctx->private_key, params.key, params.key_size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ecdh_compute_value(struct kpp_request *req)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
||||||
|
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
|
||||||
|
size_t copied, nbytes;
|
||||||
|
void *buf;
|
||||||
|
|
||||||
|
nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
|
||||||
|
|
||||||
|
if (req->src) {
|
||||||
|
copied = sg_copy_to_buffer(req->src, 1, ctx->public_key,
|
||||||
|
2 * nbytes);
|
||||||
|
if (copied != 2 * nbytes)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
|
||||||
|
(const u8 *)ctx->private_key, nbytes,
|
||||||
|
(const u8 *)ctx->public_key, 2 * nbytes,
|
||||||
|
(u8 *)ctx->shared_secret, nbytes);
|
||||||
|
|
||||||
|
buf = ctx->shared_secret;
|
||||||
|
} else {
|
||||||
|
ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits,
|
||||||
|
(const u8 *)ctx->private_key, nbytes,
|
||||||
|
(u8 *)ctx->public_key,
|
||||||
|
sizeof(ctx->public_key));
|
||||||
|
buf = ctx->public_key;
|
||||||
|
/* Public part is a point thus it has both coordinates */
|
||||||
|
nbytes *= 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
|
||||||
|
if (copied != nbytes)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ecdh_max_size(struct crypto_kpp *tfm)
|
||||||
|
{
|
||||||
|
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
|
||||||
|
int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
|
||||||
|
|
||||||
|
/* Public key is made of two coordinates */
|
||||||
|
return 2 * nbytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void no_exit_tfm(struct crypto_kpp *tfm)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct kpp_alg ecdh = {
|
||||||
|
.set_secret = ecdh_set_secret,
|
||||||
|
.generate_public_key = ecdh_compute_value,
|
||||||
|
.compute_shared_secret = ecdh_compute_value,
|
||||||
|
.max_size = ecdh_max_size,
|
||||||
|
.exit = no_exit_tfm,
|
||||||
|
.base = {
|
||||||
|
.cra_name = "ecdh",
|
||||||
|
.cra_driver_name = "ecdh-generic",
|
||||||
|
.cra_priority = 100,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
.cra_ctxsize = sizeof(struct ecdh_ctx),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int ecdh_init(void)
|
||||||
|
{
|
||||||
|
return crypto_register_kpp(&ecdh);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ecdh_exit(void)
|
||||||
|
{
|
||||||
|
crypto_unregister_kpp(&ecdh);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(ecdh_init);
|
||||||
|
module_exit(ecdh_exit);
|
||||||
|
MODULE_ALIAS_CRYPTO("ecdh");
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("ECDH generic algorithm");
|
|
@ -0,0 +1,86 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2016, Intel Corporation
|
||||||
|
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public Licence
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the Licence, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <crypto/ecdh.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
|
||||||
|
#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
|
||||||
|
|
||||||
|
static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
|
||||||
|
{
|
||||||
|
memcpy(dst, src, sz);
|
||||||
|
return dst + sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
|
||||||
|
{
|
||||||
|
memcpy(dst, src, sz);
|
||||||
|
return src + sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto_ecdh_key_len(const struct ecdh *params)
|
||||||
|
{
|
||||||
|
return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_ecdh_key_len);
|
||||||
|
|
||||||
|
int crypto_ecdh_encode_key(char *buf, unsigned int len,
|
||||||
|
const struct ecdh *params)
|
||||||
|
{
|
||||||
|
u8 *ptr = buf;
|
||||||
|
struct kpp_secret secret = {
|
||||||
|
.type = CRYPTO_KPP_SECRET_TYPE_ECDH,
|
||||||
|
.len = len
|
||||||
|
};
|
||||||
|
|
||||||
|
if (unlikely(!buf))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (len != crypto_ecdh_key_len(params))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
|
||||||
|
ptr = ecdh_pack_data(ptr, ¶ms->curve_id, sizeof(params->curve_id));
|
||||||
|
ptr = ecdh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size));
|
||||||
|
ecdh_pack_data(ptr, params->key, params->key_size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_ecdh_encode_key);
|
||||||
|
|
||||||
|
int crypto_ecdh_decode_key(const char *buf, unsigned int len,
|
||||||
|
struct ecdh *params)
|
||||||
|
{
|
||||||
|
const u8 *ptr = buf;
|
||||||
|
struct kpp_secret secret;
|
||||||
|
|
||||||
|
if (unlikely(!buf || len < ECDH_KPP_SECRET_MIN_SIZE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ptr = ecdh_unpack_data(&secret, ptr, sizeof(secret));
|
||||||
|
if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id));
|
||||||
|
ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size));
|
||||||
|
if (secret.len != crypto_ecdh_key_len(params))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Don't allocate memory. Set pointer to data
|
||||||
|
* within the given buffer
|
||||||
|
*/
|
||||||
|
params->key = (void *)ptr;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_ecdh_decode_key);
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
#include <crypto/internal/geniv.h>
|
#include <crypto/internal/geniv.h>
|
||||||
#include <crypto/scatterwalk.h>
|
#include <crypto/scatterwalk.h>
|
||||||
|
#include <crypto/skcipher.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
@ -112,13 +113,16 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||||
info = req->iv;
|
info = req->iv;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
struct blkcipher_desc desc = {
|
SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
|
||||||
.tfm = ctx->null,
|
|
||||||
};
|
|
||||||
|
|
||||||
err = crypto_blkcipher_encrypt(
|
skcipher_request_set_tfm(nreq, ctx->sknull);
|
||||||
&desc, req->dst, req->src,
|
skcipher_request_set_callback(nreq, req->base.flags,
|
||||||
req->assoclen + req->cryptlen);
|
NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(nreq, req->src, req->dst,
|
||||||
|
req->assoclen + req->cryptlen,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
err = crypto_skcipher_encrypt(nreq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
242
crypto/eseqiv.c
242
crypto/eseqiv.c
|
@ -1,242 +0,0 @@
|
||||||
/*
|
|
||||||
* eseqiv: Encrypted Sequence Number IV Generator
|
|
||||||
*
|
|
||||||
* This generator generates an IV based on a sequence number by xoring it
|
|
||||||
* with a salt and then encrypting it with the same key as used to encrypt
|
|
||||||
* the plain text. This algorithm requires that the block size be equal
|
|
||||||
* to the IV size. It is mainly useful for CBC.
|
|
||||||
*
|
|
||||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License as published by the Free
|
|
||||||
* Software Foundation; either version 2 of the License, or (at your option)
|
|
||||||
* any later version.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <crypto/internal/skcipher.h>
|
|
||||||
#include <crypto/rng.h>
|
|
||||||
#include <crypto/scatterwalk.h>
|
|
||||||
#include <linux/err.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/scatterlist.h>
|
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/string.h>
|
|
||||||
|
|
||||||
struct eseqiv_request_ctx {
|
|
||||||
struct scatterlist src[2];
|
|
||||||
struct scatterlist dst[2];
|
|
||||||
char tail[];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct eseqiv_ctx {
|
|
||||||
spinlock_t lock;
|
|
||||||
unsigned int reqoff;
|
|
||||||
char salt[];
|
|
||||||
};
|
|
||||||
|
|
||||||
static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
|
|
||||||
|
|
||||||
memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
|
|
||||||
crypto_ablkcipher_alignmask(geniv) + 1),
|
|
||||||
crypto_ablkcipher_ivsize(geniv));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void eseqiv_complete(struct crypto_async_request *base, int err)
|
|
||||||
{
|
|
||||||
struct skcipher_givcrypt_request *req = base->data;
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
eseqiv_complete2(req);
|
|
||||||
|
|
||||||
out:
|
|
||||||
skcipher_givcrypt_complete(req, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
|
|
||||||
struct ablkcipher_request *subreq;
|
|
||||||
crypto_completion_t compl;
|
|
||||||
void *data;
|
|
||||||
struct scatterlist *osrc, *odst;
|
|
||||||
struct scatterlist *dst;
|
|
||||||
struct page *srcp;
|
|
||||||
struct page *dstp;
|
|
||||||
u8 *giv;
|
|
||||||
u8 *vsrc;
|
|
||||||
u8 *vdst;
|
|
||||||
__be64 seq;
|
|
||||||
unsigned int ivsize;
|
|
||||||
unsigned int len;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
subreq = (void *)(reqctx->tail + ctx->reqoff);
|
|
||||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
|
||||||
|
|
||||||
giv = req->giv;
|
|
||||||
compl = req->creq.base.complete;
|
|
||||||
data = req->creq.base.data;
|
|
||||||
|
|
||||||
osrc = req->creq.src;
|
|
||||||
odst = req->creq.dst;
|
|
||||||
srcp = sg_page(osrc);
|
|
||||||
dstp = sg_page(odst);
|
|
||||||
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
|
|
||||||
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
|
|
||||||
|
|
||||||
ivsize = crypto_ablkcipher_ivsize(geniv);
|
|
||||||
|
|
||||||
if (vsrc != giv + ivsize && vdst != giv + ivsize) {
|
|
||||||
giv = PTR_ALIGN((u8 *)reqctx->tail,
|
|
||||||
crypto_ablkcipher_alignmask(geniv) + 1);
|
|
||||||
compl = eseqiv_complete;
|
|
||||||
data = req;
|
|
||||||
}
|
|
||||||
|
|
||||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
|
|
||||||
data);
|
|
||||||
|
|
||||||
sg_init_table(reqctx->src, 2);
|
|
||||||
sg_set_buf(reqctx->src, giv, ivsize);
|
|
||||||
scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
|
|
||||||
|
|
||||||
dst = reqctx->src;
|
|
||||||
if (osrc != odst) {
|
|
||||||
sg_init_table(reqctx->dst, 2);
|
|
||||||
sg_set_buf(reqctx->dst, giv, ivsize);
|
|
||||||
scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
|
|
||||||
|
|
||||||
dst = reqctx->dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
|
|
||||||
req->creq.nbytes + ivsize,
|
|
||||||
req->creq.info);
|
|
||||||
|
|
||||||
memcpy(req->creq.info, ctx->salt, ivsize);
|
|
||||||
|
|
||||||
len = ivsize;
|
|
||||||
if (ivsize > sizeof(u64)) {
|
|
||||||
memset(req->giv, 0, ivsize - sizeof(u64));
|
|
||||||
len = sizeof(u64);
|
|
||||||
}
|
|
||||||
seq = cpu_to_be64(req->seq);
|
|
||||||
memcpy(req->giv + ivsize - len, &seq, len);
|
|
||||||
|
|
||||||
err = crypto_ablkcipher_encrypt(subreq);
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (giv != req->giv)
|
|
||||||
eseqiv_complete2(req);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int eseqiv_init(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
|
||||||
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
unsigned long alignmask;
|
|
||||||
unsigned int reqsize;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
spin_lock_init(&ctx->lock);
|
|
||||||
|
|
||||||
alignmask = crypto_tfm_ctx_alignment() - 1;
|
|
||||||
reqsize = sizeof(struct eseqiv_request_ctx);
|
|
||||||
|
|
||||||
if (alignmask & reqsize) {
|
|
||||||
alignmask &= reqsize;
|
|
||||||
alignmask--;
|
|
||||||
}
|
|
||||||
|
|
||||||
alignmask = ~alignmask;
|
|
||||||
alignmask &= crypto_ablkcipher_alignmask(geniv);
|
|
||||||
|
|
||||||
reqsize += alignmask;
|
|
||||||
reqsize += crypto_ablkcipher_ivsize(geniv);
|
|
||||||
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
|
|
||||||
|
|
||||||
ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
|
|
||||||
|
|
||||||
tfm->crt_ablkcipher.reqsize = reqsize +
|
|
||||||
sizeof(struct ablkcipher_request);
|
|
||||||
|
|
||||||
err = 0;
|
|
||||||
if (!crypto_get_default_rng()) {
|
|
||||||
crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
|
|
||||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
|
||||||
crypto_ablkcipher_ivsize(geniv));
|
|
||||||
crypto_put_default_rng();
|
|
||||||
}
|
|
||||||
|
|
||||||
return err ?: skcipher_geniv_init(tfm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct crypto_template eseqiv_tmpl;
|
|
||||||
|
|
||||||
static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
|
|
||||||
{
|
|
||||||
struct crypto_instance *inst;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
|
|
||||||
if (IS_ERR(inst))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
err = -EINVAL;
|
|
||||||
if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
|
|
||||||
goto free_inst;
|
|
||||||
|
|
||||||
inst->alg.cra_init = eseqiv_init;
|
|
||||||
inst->alg.cra_exit = skcipher_geniv_exit;
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
|
|
||||||
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
|
|
||||||
|
|
||||||
out:
|
|
||||||
return inst;
|
|
||||||
|
|
||||||
free_inst:
|
|
||||||
skcipher_geniv_free(inst);
|
|
||||||
inst = ERR_PTR(err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct crypto_template eseqiv_tmpl = {
|
|
||||||
.name = "eseqiv",
|
|
||||||
.alloc = eseqiv_alloc,
|
|
||||||
.free = skcipher_geniv_free,
|
|
||||||
.module = THIS_MODULE,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init eseqiv_module_init(void)
|
|
||||||
{
|
|
||||||
return crypto_register_template(&eseqiv_tmpl);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __exit eseqiv_module_exit(void)
|
|
||||||
{
|
|
||||||
crypto_unregister_template(&eseqiv_tmpl);
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(eseqiv_module_init);
|
|
||||||
module_exit(eseqiv_module_exit);
|
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
|
|
||||||
MODULE_ALIAS_CRYPTO("eseqiv");
|
|
113
crypto/gcm.c
113
crypto/gcm.c
|
@ -29,7 +29,7 @@ struct gcm_instance_ctx {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct crypto_gcm_ctx {
|
struct crypto_gcm_ctx {
|
||||||
struct crypto_ablkcipher *ctr;
|
struct crypto_skcipher *ctr;
|
||||||
struct crypto_ahash *ghash;
|
struct crypto_ahash *ghash;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
|
||||||
|
|
||||||
struct crypto_rfc4543_ctx {
|
struct crypto_rfc4543_ctx {
|
||||||
struct crypto_aead *child;
|
struct crypto_aead *child;
|
||||||
struct crypto_blkcipher *null;
|
struct crypto_skcipher *null;
|
||||||
u8 nonce[4];
|
u8 nonce[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ struct crypto_gcm_req_priv_ctx {
|
||||||
struct crypto_gcm_ghash_ctx ghash_ctx;
|
struct crypto_gcm_ghash_ctx ghash_ctx;
|
||||||
union {
|
union {
|
||||||
struct ahash_request ahreq;
|
struct ahash_request ahreq;
|
||||||
struct ablkcipher_request abreq;
|
struct skcipher_request skreq;
|
||||||
} u;
|
} u;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||||
{
|
{
|
||||||
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
|
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct crypto_ahash *ghash = ctx->ghash;
|
struct crypto_ahash *ghash = ctx->ghash;
|
||||||
struct crypto_ablkcipher *ctr = ctx->ctr;
|
struct crypto_skcipher *ctr = ctx->ctr;
|
||||||
struct {
|
struct {
|
||||||
be128 hash;
|
be128 hash;
|
||||||
u8 iv[8];
|
u8 iv[8];
|
||||||
|
@ -122,35 +122,35 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||||
struct crypto_gcm_setkey_result result;
|
struct crypto_gcm_setkey_result result;
|
||||||
|
|
||||||
struct scatterlist sg[1];
|
struct scatterlist sg[1];
|
||||||
struct ablkcipher_request req;
|
struct skcipher_request req;
|
||||||
} *data;
|
} *data;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
|
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
|
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_ablkcipher_setkey(ctr, key, keylen);
|
err = crypto_skcipher_setkey(ctr, key, keylen);
|
||||||
crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
|
crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
|
data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
init_completion(&data->result.completion);
|
init_completion(&data->result.completion);
|
||||||
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
|
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
|
||||||
ablkcipher_request_set_tfm(&data->req, ctr);
|
skcipher_request_set_tfm(&data->req, ctr);
|
||||||
ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
crypto_gcm_setkey_done,
|
crypto_gcm_setkey_done,
|
||||||
&data->result);
|
&data->result);
|
||||||
ablkcipher_request_set_crypt(&data->req, data->sg, data->sg,
|
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
|
||||||
sizeof(data->hash), data->iv);
|
sizeof(data->hash), data->iv);
|
||||||
|
|
||||||
err = crypto_ablkcipher_encrypt(&data->req);
|
err = crypto_skcipher_encrypt(&data->req);
|
||||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||||
err = wait_for_completion_interruptible(
|
err = wait_for_completion_interruptible(
|
||||||
&data->result.completion);
|
&data->result.completion);
|
||||||
|
@ -223,13 +223,13 @@ static void crypto_gcm_init_crypt(struct aead_request *req,
|
||||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||||
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
|
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||||
struct ablkcipher_request *ablk_req = &pctx->u.abreq;
|
struct skcipher_request *skreq = &pctx->u.skreq;
|
||||||
struct scatterlist *dst;
|
struct scatterlist *dst;
|
||||||
|
|
||||||
dst = req->src == req->dst ? pctx->src : pctx->dst;
|
dst = req->src == req->dst ? pctx->src : pctx->dst;
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(ablk_req, ctx->ctr);
|
skcipher_request_set_tfm(skreq, ctx->ctr);
|
||||||
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
|
skcipher_request_set_crypt(skreq, pctx->src, dst,
|
||||||
cryptlen + sizeof(pctx->auth_tag),
|
cryptlen + sizeof(pctx->auth_tag),
|
||||||
pctx->iv);
|
pctx->iv);
|
||||||
}
|
}
|
||||||
|
@ -494,14 +494,14 @@ out:
|
||||||
static int crypto_gcm_encrypt(struct aead_request *req)
|
static int crypto_gcm_encrypt(struct aead_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||||
struct ablkcipher_request *abreq = &pctx->u.abreq;
|
struct skcipher_request *skreq = &pctx->u.skreq;
|
||||||
u32 flags = aead_request_flags(req);
|
u32 flags = aead_request_flags(req);
|
||||||
|
|
||||||
crypto_gcm_init_common(req);
|
crypto_gcm_init_common(req);
|
||||||
crypto_gcm_init_crypt(req, req->cryptlen);
|
crypto_gcm_init_crypt(req, req->cryptlen);
|
||||||
ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req);
|
skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
|
||||||
|
|
||||||
return crypto_ablkcipher_encrypt(abreq) ?:
|
return crypto_skcipher_encrypt(skreq) ?:
|
||||||
gcm_encrypt_continue(req, flags);
|
gcm_encrypt_continue(req, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,12 +533,12 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
|
||||||
static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
|
static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
|
||||||
{
|
{
|
||||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||||
struct ablkcipher_request *abreq = &pctx->u.abreq;
|
struct skcipher_request *skreq = &pctx->u.skreq;
|
||||||
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
|
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
|
||||||
|
|
||||||
crypto_gcm_init_crypt(req, gctx->cryptlen);
|
crypto_gcm_init_crypt(req, gctx->cryptlen);
|
||||||
ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req);
|
skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
|
||||||
return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req);
|
return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_gcm_decrypt(struct aead_request *req)
|
static int crypto_gcm_decrypt(struct aead_request *req)
|
||||||
|
@ -566,7 +566,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
|
||||||
struct aead_instance *inst = aead_alg_instance(tfm);
|
struct aead_instance *inst = aead_alg_instance(tfm);
|
||||||
struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
|
struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
|
||||||
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct crypto_ablkcipher *ctr;
|
struct crypto_skcipher *ctr;
|
||||||
struct crypto_ahash *ghash;
|
struct crypto_ahash *ghash;
|
||||||
unsigned long align;
|
unsigned long align;
|
||||||
int err;
|
int err;
|
||||||
|
@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
|
||||||
if (IS_ERR(ghash))
|
if (IS_ERR(ghash))
|
||||||
return PTR_ERR(ghash);
|
return PTR_ERR(ghash);
|
||||||
|
|
||||||
ctr = crypto_spawn_skcipher(&ictx->ctr);
|
ctr = crypto_spawn_skcipher2(&ictx->ctr);
|
||||||
err = PTR_ERR(ctr);
|
err = PTR_ERR(ctr);
|
||||||
if (IS_ERR(ctr))
|
if (IS_ERR(ctr))
|
||||||
goto err_free_hash;
|
goto err_free_hash;
|
||||||
|
@ -587,8 +587,8 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
|
||||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||||
crypto_aead_set_reqsize(tfm,
|
crypto_aead_set_reqsize(tfm,
|
||||||
align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
|
align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
|
||||||
max(sizeof(struct ablkcipher_request) +
|
max(sizeof(struct skcipher_request) +
|
||||||
crypto_ablkcipher_reqsize(ctr),
|
crypto_skcipher_reqsize(ctr),
|
||||||
sizeof(struct ahash_request) +
|
sizeof(struct ahash_request) +
|
||||||
crypto_ahash_reqsize(ghash)));
|
crypto_ahash_reqsize(ghash)));
|
||||||
|
|
||||||
|
@ -604,7 +604,7 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
|
||||||
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_ahash(ctx->ghash);
|
crypto_free_ahash(ctx->ghash);
|
||||||
crypto_free_ablkcipher(ctx->ctr);
|
crypto_free_skcipher(ctx->ctr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_gcm_free(struct aead_instance *inst)
|
static void crypto_gcm_free(struct aead_instance *inst)
|
||||||
|
@ -624,7 +624,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||||
{
|
{
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
struct aead_instance *inst;
|
struct aead_instance *inst;
|
||||||
struct crypto_alg *ctr;
|
struct skcipher_alg *ctr;
|
||||||
struct crypto_alg *ghash_alg;
|
struct crypto_alg *ghash_alg;
|
||||||
struct hash_alg_common *ghash;
|
struct hash_alg_common *ghash;
|
||||||
struct gcm_instance_ctx *ctx;
|
struct gcm_instance_ctx *ctx;
|
||||||
|
@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||||
|
|
||||||
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
|
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
|
||||||
CRYPTO_ALG_TYPE_HASH,
|
CRYPTO_ALG_TYPE_HASH,
|
||||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
CRYPTO_ALG_TYPE_AHASH_MASK |
|
||||||
|
crypto_requires_sync(algt->type,
|
||||||
|
algt->mask));
|
||||||
if (IS_ERR(ghash_alg))
|
if (IS_ERR(ghash_alg))
|
||||||
return PTR_ERR(ghash_alg);
|
return PTR_ERR(ghash_alg);
|
||||||
|
|
||||||
|
@ -661,41 +663,42 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
|
||||||
goto err_drop_ghash;
|
goto err_drop_ghash;
|
||||||
|
|
||||||
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
|
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
|
||||||
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
|
err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
|
||||||
crypto_requires_sync(algt->type,
|
crypto_requires_sync(algt->type,
|
||||||
algt->mask));
|
algt->mask));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_drop_ghash;
|
goto err_drop_ghash;
|
||||||
|
|
||||||
ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
|
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
|
||||||
|
|
||||||
/* We only support 16-byte blocks. */
|
/* We only support 16-byte blocks. */
|
||||||
if (ctr->cra_ablkcipher.ivsize != 16)
|
if (crypto_skcipher_alg_ivsize(ctr) != 16)
|
||||||
goto out_put_ctr;
|
goto out_put_ctr;
|
||||||
|
|
||||||
/* Not a stream cipher? */
|
/* Not a stream cipher? */
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (ctr->cra_blocksize != 1)
|
if (ctr->base.cra_blocksize != 1)
|
||||||
goto out_put_ctr;
|
goto out_put_ctr;
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"gcm_base(%s,%s)", ctr->cra_driver_name,
|
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
|
||||||
ghash_alg->cra_driver_name) >=
|
ghash_alg->cra_driver_name) >=
|
||||||
CRYPTO_MAX_ALG_NAME)
|
CRYPTO_MAX_ALG_NAME)
|
||||||
goto out_put_ctr;
|
goto out_put_ctr;
|
||||||
|
|
||||||
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||||
|
|
||||||
inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) &
|
inst->alg.base.cra_flags = (ghash->base.cra_flags |
|
||||||
CRYPTO_ALG_ASYNC;
|
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
|
||||||
inst->alg.base.cra_priority = (ghash->base.cra_priority +
|
inst->alg.base.cra_priority = (ghash->base.cra_priority +
|
||||||
ctr->cra_priority) / 2;
|
ctr->base.cra_priority) / 2;
|
||||||
inst->alg.base.cra_blocksize = 1;
|
inst->alg.base.cra_blocksize = 1;
|
||||||
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
|
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
|
||||||
ctr->cra_alignmask;
|
ctr->base.cra_alignmask;
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
|
||||||
inst->alg.ivsize = 12;
|
inst->alg.ivsize = 12;
|
||||||
|
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
|
||||||
inst->alg.maxauthsize = 16;
|
inst->alg.maxauthsize = 16;
|
||||||
inst->alg.init = crypto_gcm_init_tfm;
|
inst->alg.init = crypto_gcm_init_tfm;
|
||||||
inst->alg.exit = crypto_gcm_exit_tfm;
|
inst->alg.exit = crypto_gcm_exit_tfm;
|
||||||
|
@ -980,6 +983,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
|
||||||
|
|
||||||
inst->alg.ivsize = 8;
|
inst->alg.ivsize = 8;
|
||||||
|
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
|
||||||
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||||
|
|
||||||
inst->alg.init = crypto_rfc4106_init_tfm;
|
inst->alg.init = crypto_rfc4106_init_tfm;
|
||||||
|
@ -1084,11 +1088,13 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
|
||||||
unsigned int authsize = crypto_aead_authsize(aead);
|
unsigned int authsize = crypto_aead_authsize(aead);
|
||||||
unsigned int nbytes = req->assoclen + req->cryptlen -
|
unsigned int nbytes = req->assoclen + req->cryptlen -
|
||||||
(enc ? 0 : authsize);
|
(enc ? 0 : authsize);
|
||||||
struct blkcipher_desc desc = {
|
SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
|
||||||
.tfm = ctx->null,
|
|
||||||
};
|
|
||||||
|
|
||||||
return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes);
|
skcipher_request_set_tfm(nreq, ctx->null);
|
||||||
|
skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
|
||||||
|
|
||||||
|
return crypto_skcipher_encrypt(nreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int crypto_rfc4543_encrypt(struct aead_request *req)
|
static int crypto_rfc4543_encrypt(struct aead_request *req)
|
||||||
|
@ -1108,7 +1114,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
|
||||||
struct crypto_aead_spawn *spawn = &ictx->aead;
|
struct crypto_aead_spawn *spawn = &ictx->aead;
|
||||||
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct crypto_aead *aead;
|
struct crypto_aead *aead;
|
||||||
struct crypto_blkcipher *null;
|
struct crypto_skcipher *null;
|
||||||
unsigned long align;
|
unsigned long align;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
@ -1116,7 +1122,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
|
||||||
if (IS_ERR(aead))
|
if (IS_ERR(aead))
|
||||||
return PTR_ERR(aead);
|
return PTR_ERR(aead);
|
||||||
|
|
||||||
null = crypto_get_default_null_skcipher();
|
null = crypto_get_default_null_skcipher2();
|
||||||
err = PTR_ERR(null);
|
err = PTR_ERR(null);
|
||||||
if (IS_ERR(null))
|
if (IS_ERR(null))
|
||||||
goto err_free_aead;
|
goto err_free_aead;
|
||||||
|
@ -1144,7 +1150,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
|
||||||
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
|
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_aead(ctx->child);
|
crypto_free_aead(ctx->child);
|
||||||
crypto_put_default_null_skcipher();
|
crypto_put_default_null_skcipher2();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void crypto_rfc4543_free(struct aead_instance *inst)
|
static void crypto_rfc4543_free(struct aead_instance *inst)
|
||||||
|
@ -1219,6 +1225,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
|
||||||
|
|
||||||
inst->alg.ivsize = 8;
|
inst->alg.ivsize = 8;
|
||||||
|
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
|
||||||
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
|
||||||
|
|
||||||
inst->alg.init = crypto_rfc4543_init_tfm;
|
inst->alg.init = crypto_rfc4543_init_tfm;
|
||||||
|
|
|
@ -87,24 +87,28 @@ void jent_memcpy(void *dest, const void *src, unsigned int n)
|
||||||
memcpy(dest, src, n);
|
memcpy(dest, src, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Obtain a high-resolution time stamp value. The time stamp is used to measure
|
||||||
|
* the execution time of a given code path and its variations. Hence, the time
|
||||||
|
* stamp must have a sufficiently high resolution.
|
||||||
|
*
|
||||||
|
* Note, if the function returns zero because a given architecture does not
|
||||||
|
* implement a high-resolution time stamp, the RNG code's runtime test
|
||||||
|
* will detect it and will not produce output.
|
||||||
|
*/
|
||||||
void jent_get_nstime(__u64 *out)
|
void jent_get_nstime(__u64 *out)
|
||||||
{
|
{
|
||||||
struct timespec ts;
|
|
||||||
__u64 tmp = 0;
|
__u64 tmp = 0;
|
||||||
|
|
||||||
tmp = random_get_entropy();
|
tmp = random_get_entropy();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If random_get_entropy does not return a value (which is possible on,
|
* If random_get_entropy does not return a value, i.e. it is not
|
||||||
* for example, MIPS), invoke __getnstimeofday
|
* implemented for a given architecture, use a clock source.
|
||||||
* hoping that there are timers we can work with.
|
* hoping that there are timers we can work with.
|
||||||
*/
|
*/
|
||||||
if ((0 == tmp) &&
|
if (tmp == 0)
|
||||||
(0 == __getnstimeofday(&ts))) {
|
tmp = ktime_get_ns();
|
||||||
tmp = ts.tv_sec;
|
|
||||||
tmp = tmp << 32;
|
|
||||||
tmp = tmp | ts.tv_nsec;
|
|
||||||
}
|
|
||||||
|
|
||||||
*out = tmp;
|
*out = tmp;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,123 @@
|
||||||
|
/*
|
||||||
|
* Key-agreement Protocol Primitives (KPP)
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016, Intel Corporation
|
||||||
|
* Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the Free
|
||||||
|
* Software Foundation; either version 2 of the License, or (at your option)
|
||||||
|
* any later version.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/seq_file.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <linux/crypto.h>
|
||||||
|
#include <crypto/algapi.h>
|
||||||
|
#include <linux/cryptouser.h>
|
||||||
|
#include <net/netlink.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
#include <crypto/internal/kpp.h>
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET
|
||||||
|
static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_report_kpp rkpp;
|
||||||
|
|
||||||
|
strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
||||||
|
|
||||||
|
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
|
||||||
|
sizeof(struct crypto_report_kpp), &rkpp))
|
||||||
|
goto nla_put_failure;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nla_put_failure:
|
||||||
|
return -EMSGSIZE;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
|
||||||
|
__attribute__ ((unused));
|
||||||
|
|
||||||
|
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
seq_puts(m, "type : kpp\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
|
||||||
|
struct kpp_alg *alg = crypto_kpp_alg(kpp);
|
||||||
|
|
||||||
|
alg->exit(kpp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
|
||||||
|
struct kpp_alg *alg = crypto_kpp_alg(kpp);
|
||||||
|
|
||||||
|
if (alg->exit)
|
||||||
|
kpp->base.exit = crypto_kpp_exit_tfm;
|
||||||
|
|
||||||
|
if (alg->init)
|
||||||
|
return alg->init(kpp);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct crypto_type crypto_kpp_type = {
|
||||||
|
.extsize = crypto_alg_extsize,
|
||||||
|
.init_tfm = crypto_kpp_init_tfm,
|
||||||
|
#ifdef CONFIG_PROC_FS
|
||||||
|
.show = crypto_kpp_show,
|
||||||
|
#endif
|
||||||
|
.report = crypto_kpp_report,
|
||||||
|
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||||
|
.maskset = CRYPTO_ALG_TYPE_MASK,
|
||||||
|
.type = CRYPTO_ALG_TYPE_KPP,
|
||||||
|
.tfmsize = offsetof(struct crypto_kpp, base),
|
||||||
|
};
|
||||||
|
|
||||||
|
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
|
||||||
|
{
|
||||||
|
return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
|
||||||
|
|
||||||
|
static void kpp_prepare_alg(struct kpp_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_alg *base = &alg->base;
|
||||||
|
|
||||||
|
base->cra_type = &crypto_kpp_type;
|
||||||
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||||
|
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto_register_kpp(struct kpp_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_alg *base = &alg->base;
|
||||||
|
|
||||||
|
kpp_prepare_alg(alg);
|
||||||
|
return crypto_register_alg(base);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_register_kpp);
|
||||||
|
|
||||||
|
void crypto_unregister_kpp(struct kpp_alg *alg)
|
||||||
|
{
|
||||||
|
crypto_unregister_alg(&alg->base);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
|
132
crypto/mcryptd.c
132
crypto/mcryptd.c
|
@ -41,7 +41,7 @@ struct mcryptd_flush_list {
|
||||||
static struct mcryptd_flush_list __percpu *mcryptd_flist;
|
static struct mcryptd_flush_list __percpu *mcryptd_flist;
|
||||||
|
|
||||||
struct hashd_instance_ctx {
|
struct hashd_instance_ctx {
|
||||||
struct crypto_shash_spawn spawn;
|
struct crypto_ahash_spawn spawn;
|
||||||
struct mcryptd_queue *queue;
|
struct mcryptd_queue *queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -272,18 +272,18 @@ static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||||
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
|
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||||
struct crypto_shash_spawn *spawn = &ictx->spawn;
|
struct crypto_ahash_spawn *spawn = &ictx->spawn;
|
||||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
struct crypto_shash *hash;
|
struct crypto_ahash *hash;
|
||||||
|
|
||||||
hash = crypto_spawn_shash(spawn);
|
hash = crypto_spawn_ahash(spawn);
|
||||||
if (IS_ERR(hash))
|
if (IS_ERR(hash))
|
||||||
return PTR_ERR(hash);
|
return PTR_ERR(hash);
|
||||||
|
|
||||||
ctx->child = hash;
|
ctx->child = hash;
|
||||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||||
sizeof(struct mcryptd_hash_request_ctx) +
|
sizeof(struct mcryptd_hash_request_ctx) +
|
||||||
crypto_shash_descsize(hash));
|
crypto_ahash_reqsize(hash));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,21 +291,21 @@ static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
|
|
||||||
crypto_free_shash(ctx->child);
|
crypto_free_ahash(ctx->child);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mcryptd_hash_setkey(struct crypto_ahash *parent,
|
static int mcryptd_hash_setkey(struct crypto_ahash *parent,
|
||||||
const u8 *key, unsigned int keylen)
|
const u8 *key, unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
|
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
|
||||||
struct crypto_shash *child = ctx->child;
|
struct crypto_ahash *child = ctx->child;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||||
crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
|
crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
|
||||||
CRYPTO_TFM_REQ_MASK);
|
CRYPTO_TFM_REQ_MASK);
|
||||||
err = crypto_shash_setkey(child, key, keylen);
|
err = crypto_ahash_setkey(child, key, keylen);
|
||||||
crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
|
crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
|
||||||
CRYPTO_TFM_RES_MASK);
|
CRYPTO_TFM_RES_MASK);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -331,20 +331,20 @@ static int mcryptd_hash_enqueue(struct ahash_request *req,
|
||||||
static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
|
static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
||||||
struct crypto_shash *child = ctx->child;
|
struct crypto_ahash *child = ctx->child;
|
||||||
struct ahash_request *req = ahash_request_cast(req_async);
|
struct ahash_request *req = ahash_request_cast(req_async);
|
||||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||||
struct shash_desc *desc = &rctx->desc;
|
struct ahash_request *desc = &rctx->areq;
|
||||||
|
|
||||||
if (unlikely(err == -EINPROGRESS))
|
if (unlikely(err == -EINPROGRESS))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
desc->tfm = child;
|
ahash_request_set_tfm(desc, child);
|
||||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
|
rctx->complete, req_async);
|
||||||
|
|
||||||
err = crypto_shash_init(desc);
|
rctx->out = req->result;
|
||||||
|
err = crypto_ahash_init(desc);
|
||||||
req->base.complete = rctx->complete;
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
|
@ -365,7 +365,8 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
|
||||||
if (unlikely(err == -EINPROGRESS))
|
if (unlikely(err == -EINPROGRESS))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = shash_ahash_mcryptd_update(req, &rctx->desc);
|
rctx->out = req->result;
|
||||||
|
err = ahash_mcryptd_update(&rctx->areq);
|
||||||
if (err) {
|
if (err) {
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -391,7 +392,8 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
|
||||||
if (unlikely(err == -EINPROGRESS))
|
if (unlikely(err == -EINPROGRESS))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = shash_ahash_mcryptd_final(req, &rctx->desc);
|
rctx->out = req->result;
|
||||||
|
err = ahash_mcryptd_final(&rctx->areq);
|
||||||
if (err) {
|
if (err) {
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -416,8 +418,8 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
|
||||||
|
|
||||||
if (unlikely(err == -EINPROGRESS))
|
if (unlikely(err == -EINPROGRESS))
|
||||||
goto out;
|
goto out;
|
||||||
|
rctx->out = req->result;
|
||||||
err = shash_ahash_mcryptd_finup(req, &rctx->desc);
|
err = ahash_mcryptd_finup(&rctx->areq);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
req->base.complete = rctx->complete;
|
req->base.complete = rctx->complete;
|
||||||
|
@ -439,25 +441,21 @@ static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
|
||||||
static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
|
static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
||||||
struct crypto_shash *child = ctx->child;
|
struct crypto_ahash *child = ctx->child;
|
||||||
struct ahash_request *req = ahash_request_cast(req_async);
|
struct ahash_request *req = ahash_request_cast(req_async);
|
||||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||||
struct shash_desc *desc = &rctx->desc;
|
struct ahash_request *desc = &rctx->areq;
|
||||||
|
|
||||||
if (unlikely(err == -EINPROGRESS))
|
if (unlikely(err == -EINPROGRESS))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
desc->tfm = child;
|
ahash_request_set_tfm(desc, child);
|
||||||
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */
|
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
|
rctx->complete, req_async);
|
||||||
|
|
||||||
err = shash_ahash_mcryptd_digest(req, desc);
|
rctx->out = req->result;
|
||||||
|
err = ahash_mcryptd_digest(desc);
|
||||||
|
|
||||||
if (err) {
|
|
||||||
req->base.complete = rctx->complete;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
out:
|
out:
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rctx->complete(&req->base, err);
|
rctx->complete(&req->base, err);
|
||||||
|
@ -473,14 +471,14 @@ static int mcryptd_hash_export(struct ahash_request *req, void *out)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||||
|
|
||||||
return crypto_shash_export(&rctx->desc, out);
|
return crypto_ahash_export(&rctx->areq, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mcryptd_hash_import(struct ahash_request *req, const void *in)
|
static int mcryptd_hash_import(struct ahash_request *req, const void *in)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||||
|
|
||||||
return crypto_shash_import(&rctx->desc, in);
|
return crypto_ahash_import(&rctx->areq, in);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
|
@ -488,7 +486,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
{
|
{
|
||||||
struct hashd_instance_ctx *ctx;
|
struct hashd_instance_ctx *ctx;
|
||||||
struct ahash_instance *inst;
|
struct ahash_instance *inst;
|
||||||
struct shash_alg *salg;
|
struct hash_alg_common *halg;
|
||||||
struct crypto_alg *alg;
|
struct crypto_alg *alg;
|
||||||
u32 type = 0;
|
u32 type = 0;
|
||||||
u32 mask = 0;
|
u32 mask = 0;
|
||||||
|
@ -496,11 +494,11 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
|
|
||||||
mcryptd_check_internal(tb, &type, &mask);
|
mcryptd_check_internal(tb, &type, &mask);
|
||||||
|
|
||||||
salg = shash_attr_alg(tb[1], type, mask);
|
halg = ahash_attr_alg(tb[1], type, mask);
|
||||||
if (IS_ERR(salg))
|
if (IS_ERR(halg))
|
||||||
return PTR_ERR(salg);
|
return PTR_ERR(halg);
|
||||||
|
|
||||||
alg = &salg->base;
|
alg = &halg->base;
|
||||||
pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
|
pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
|
||||||
inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
|
inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
|
||||||
sizeof(*ctx));
|
sizeof(*ctx));
|
||||||
|
@ -511,7 +509,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
ctx = ahash_instance_ctx(inst);
|
ctx = ahash_instance_ctx(inst);
|
||||||
ctx->queue = queue;
|
ctx->queue = queue;
|
||||||
|
|
||||||
err = crypto_init_shash_spawn(&ctx->spawn, salg,
|
err = crypto_init_ahash_spawn(&ctx->spawn, halg,
|
||||||
ahash_crypto_instance(inst));
|
ahash_crypto_instance(inst));
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free_inst;
|
goto out_free_inst;
|
||||||
|
@ -521,8 +519,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
type |= CRYPTO_ALG_INTERNAL;
|
type |= CRYPTO_ALG_INTERNAL;
|
||||||
inst->alg.halg.base.cra_flags = type;
|
inst->alg.halg.base.cra_flags = type;
|
||||||
|
|
||||||
inst->alg.halg.digestsize = salg->digestsize;
|
inst->alg.halg.digestsize = halg->digestsize;
|
||||||
inst->alg.halg.statesize = salg->statesize;
|
inst->alg.halg.statesize = halg->statesize;
|
||||||
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
|
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
|
||||||
|
|
||||||
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
|
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
|
||||||
|
@ -539,7 +537,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||||
|
|
||||||
err = ahash_register_instance(tmpl, inst);
|
err = ahash_register_instance(tmpl, inst);
|
||||||
if (err) {
|
if (err) {
|
||||||
crypto_drop_shash(&ctx->spawn);
|
crypto_drop_ahash(&ctx->spawn);
|
||||||
out_free_inst:
|
out_free_inst:
|
||||||
kfree(inst);
|
kfree(inst);
|
||||||
}
|
}
|
||||||
|
@ -575,7 +573,7 @@ static void mcryptd_free(struct crypto_instance *inst)
|
||||||
|
|
||||||
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
||||||
case CRYPTO_ALG_TYPE_AHASH:
|
case CRYPTO_ALG_TYPE_AHASH:
|
||||||
crypto_drop_shash(&hctx->spawn);
|
crypto_drop_ahash(&hctx->spawn);
|
||||||
kfree(ahash_instance(inst));
|
kfree(ahash_instance(inst));
|
||||||
return;
|
return;
|
||||||
default:
|
default:
|
||||||
|
@ -612,55 +610,38 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
|
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
|
||||||
|
|
||||||
int shash_ahash_mcryptd_digest(struct ahash_request *req,
|
int ahash_mcryptd_digest(struct ahash_request *desc)
|
||||||
struct shash_desc *desc)
|
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = crypto_shash_init(desc) ?:
|
err = crypto_ahash_init(desc) ?:
|
||||||
shash_ahash_mcryptd_finup(req, desc);
|
ahash_mcryptd_finup(desc);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
|
|
||||||
|
|
||||||
int shash_ahash_mcryptd_update(struct ahash_request *req,
|
int ahash_mcryptd_update(struct ahash_request *desc)
|
||||||
struct shash_desc *desc)
|
|
||||||
{
|
{
|
||||||
struct crypto_shash *tfm = desc->tfm;
|
|
||||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
||||||
|
|
||||||
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
||||||
|
|
||||||
return shash->update(desc, NULL, 0);
|
return crypto_ahash_update(desc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
|
|
||||||
|
|
||||||
int shash_ahash_mcryptd_finup(struct ahash_request *req,
|
int ahash_mcryptd_finup(struct ahash_request *desc)
|
||||||
struct shash_desc *desc)
|
|
||||||
{
|
{
|
||||||
struct crypto_shash *tfm = desc->tfm;
|
|
||||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
||||||
|
|
||||||
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
||||||
|
|
||||||
return shash->finup(desc, NULL, 0, req->result);
|
return crypto_ahash_finup(desc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
|
|
||||||
|
|
||||||
int shash_ahash_mcryptd_final(struct ahash_request *req,
|
int ahash_mcryptd_final(struct ahash_request *desc)
|
||||||
struct shash_desc *desc)
|
|
||||||
{
|
{
|
||||||
struct crypto_shash *tfm = desc->tfm;
|
|
||||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
|
||||||
|
|
||||||
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
||||||
|
|
||||||
return shash->final(desc, req->result);
|
return crypto_ahash_final(desc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
|
|
||||||
|
|
||||||
struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
|
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
|
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
|
||||||
|
|
||||||
|
@ -668,12 +649,12 @@ struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
|
EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
|
||||||
|
|
||||||
struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
|
struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||||
return &rctx->desc;
|
return &rctx->areq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
|
EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
|
||||||
|
|
||||||
void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
|
void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
|
||||||
{
|
{
|
||||||
|
@ -681,7 +662,6 @@ void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
|
EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
|
||||||
|
|
||||||
|
|
||||||
static int __init mcryptd_init(void)
|
static int __init mcryptd_init(void)
|
||||||
{
|
{
|
||||||
int err, cpu;
|
int err, cpu;
|
||||||
|
|
|
@ -92,19 +92,17 @@ static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
|
||||||
|
|
||||||
struct pkcs1pad_ctx {
|
struct pkcs1pad_ctx {
|
||||||
struct crypto_akcipher *child;
|
struct crypto_akcipher *child;
|
||||||
const char *hash_name;
|
|
||||||
unsigned int key_size;
|
unsigned int key_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pkcs1pad_inst_ctx {
|
struct pkcs1pad_inst_ctx {
|
||||||
struct crypto_akcipher_spawn spawn;
|
struct crypto_akcipher_spawn spawn;
|
||||||
const char *hash_name;
|
const struct rsa_asn1_template *digest_info;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pkcs1pad_request {
|
struct pkcs1pad_request {
|
||||||
struct scatterlist in_sg[3], out_sg[2];
|
struct scatterlist in_sg[2], out_sg[1];
|
||||||
uint8_t *in_buf, *out_buf;
|
uint8_t *in_buf, *out_buf;
|
||||||
|
|
||||||
struct akcipher_request child_req;
|
struct akcipher_request child_req;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -112,40 +110,48 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
int err, size;
|
int err;
|
||||||
|
|
||||||
|
ctx->key_size = 0;
|
||||||
|
|
||||||
err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
|
err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (!err) {
|
/* Find out new modulus size from rsa implementation */
|
||||||
/* Find out new modulus size from rsa implementation */
|
err = crypto_akcipher_maxsize(ctx->child);
|
||||||
size = crypto_akcipher_maxsize(ctx->child);
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
ctx->key_size = size > 0 ? size : 0;
|
if (err > PAGE_SIZE)
|
||||||
if (size <= 0)
|
return -ENOTSUPP;
|
||||||
err = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
ctx->key_size = err;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
int err, size;
|
int err;
|
||||||
|
|
||||||
|
ctx->key_size = 0;
|
||||||
|
|
||||||
err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
|
err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (!err) {
|
/* Find out new modulus size from rsa implementation */
|
||||||
/* Find out new modulus size from rsa implementation */
|
err = crypto_akcipher_maxsize(ctx->child);
|
||||||
size = crypto_akcipher_maxsize(ctx->child);
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
ctx->key_size = size > 0 ? size : 0;
|
if (err > PAGE_SIZE)
|
||||||
if (size <= 0)
|
return -ENOTSUPP;
|
||||||
err = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
ctx->key_size = err;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
|
static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
|
||||||
|
@ -164,19 +170,10 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
|
||||||
static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
|
static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
|
||||||
struct scatterlist *next)
|
struct scatterlist *next)
|
||||||
{
|
{
|
||||||
int nsegs = next ? 1 : 0;
|
int nsegs = next ? 2 : 1;
|
||||||
|
|
||||||
if (offset_in_page(buf) + len <= PAGE_SIZE) {
|
sg_init_table(sg, nsegs);
|
||||||
nsegs += 1;
|
sg_set_buf(sg, buf, len);
|
||||||
sg_init_table(sg, nsegs);
|
|
||||||
sg_set_buf(sg, buf, len);
|
|
||||||
} else {
|
|
||||||
nsegs += 2;
|
|
||||||
sg_init_table(sg, nsegs);
|
|
||||||
sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
|
|
||||||
sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
|
|
||||||
offset_in_page(buf) + len - PAGE_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (next)
|
if (next)
|
||||||
sg_chain(sg, nsegs, next);
|
sg_chain(sg, nsegs, next);
|
||||||
|
@ -187,37 +184,36 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
|
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
|
||||||
size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
|
unsigned int pad_len;
|
||||||
size_t chunk_len, pad_left;
|
unsigned int len;
|
||||||
struct sg_mapping_iter miter;
|
u8 *out_buf;
|
||||||
|
|
||||||
if (!err) {
|
if (err)
|
||||||
if (pad_len) {
|
goto out;
|
||||||
sg_miter_start(&miter, req->dst,
|
|
||||||
sg_nents_for_len(req->dst, pad_len),
|
|
||||||
SG_MITER_ATOMIC | SG_MITER_TO_SG);
|
|
||||||
|
|
||||||
pad_left = pad_len;
|
len = req_ctx->child_req.dst_len;
|
||||||
while (pad_left) {
|
pad_len = ctx->key_size - len;
|
||||||
sg_miter_next(&miter);
|
|
||||||
|
|
||||||
chunk_len = min(miter.length, pad_left);
|
/* Four billion to one */
|
||||||
memset(miter.addr, 0, chunk_len);
|
if (likely(!pad_len))
|
||||||
pad_left -= chunk_len;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
sg_miter_stop(&miter);
|
out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
|
||||||
}
|
err = -ENOMEM;
|
||||||
|
if (!out_buf)
|
||||||
|
goto out;
|
||||||
|
|
||||||
sg_pcopy_from_buffer(req->dst,
|
sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
|
||||||
sg_nents_for_len(req->dst, ctx->key_size),
|
out_buf + pad_len, len);
|
||||||
req_ctx->out_buf, req_ctx->child_req.dst_len,
|
sg_copy_from_buffer(req->dst,
|
||||||
pad_len);
|
sg_nents_for_len(req->dst, ctx->key_size),
|
||||||
}
|
out_buf, ctx->key_size);
|
||||||
|
kzfree(out_buf);
|
||||||
|
|
||||||
|
out:
|
||||||
req->dst_len = ctx->key_size;
|
req->dst_len = ctx->key_size;
|
||||||
|
|
||||||
kfree(req_ctx->in_buf);
|
kfree(req_ctx->in_buf);
|
||||||
kzfree(req_ctx->out_buf);
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -257,21 +253,8 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->key_size > PAGE_SIZE)
|
|
||||||
return -ENOTSUPP;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Replace both input and output to add the padding in the input and
|
|
||||||
* the potential missing leading zeros in the output.
|
|
||||||
*/
|
|
||||||
req_ctx->child_req.src = req_ctx->in_sg;
|
|
||||||
req_ctx->child_req.src_len = ctx->key_size - 1;
|
|
||||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
|
||||||
req_ctx->child_req.dst_len = ctx->key_size;
|
|
||||||
|
|
||||||
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
|
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
GFP_KERNEL);
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
|
||||||
if (!req_ctx->in_buf)
|
if (!req_ctx->in_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -284,9 +267,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
|
||||||
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
||||||
ctx->key_size - 1 - req->src_len, req->src);
|
ctx->key_size - 1 - req->src_len, req->src);
|
||||||
|
|
||||||
req_ctx->out_buf = kmalloc(ctx->key_size,
|
req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
|
||||||
if (!req_ctx->out_buf) {
|
if (!req_ctx->out_buf) {
|
||||||
kfree(req_ctx->in_buf);
|
kfree(req_ctx->in_buf);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -299,6 +280,10 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
|
||||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||||
pkcs1pad_encrypt_sign_complete_cb, req);
|
pkcs1pad_encrypt_sign_complete_cb, req);
|
||||||
|
|
||||||
|
/* Reuse output buffer */
|
||||||
|
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
|
||||||
|
req->dst, ctx->key_size - 1, req->dst_len);
|
||||||
|
|
||||||
err = crypto_akcipher_encrypt(&req_ctx->child_req);
|
err = crypto_akcipher_encrypt(&req_ctx->child_req);
|
||||||
if (err != -EINPROGRESS &&
|
if (err != -EINPROGRESS &&
|
||||||
(err != -EBUSY ||
|
(err != -EBUSY ||
|
||||||
|
@ -380,18 +365,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
|
||||||
if (!ctx->key_size || req->src_len != ctx->key_size)
|
if (!ctx->key_size || req->src_len != ctx->key_size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ctx->key_size > PAGE_SIZE)
|
req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
|
||||||
return -ENOTSUPP;
|
|
||||||
|
|
||||||
/* Reuse input buffer, output to a new buffer */
|
|
||||||
req_ctx->child_req.src = req->src;
|
|
||||||
req_ctx->child_req.src_len = req->src_len;
|
|
||||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
|
||||||
req_ctx->child_req.dst_len = ctx->key_size ;
|
|
||||||
|
|
||||||
req_ctx->out_buf = kmalloc(ctx->key_size,
|
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
|
||||||
if (!req_ctx->out_buf)
|
if (!req_ctx->out_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -402,6 +376,11 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
|
||||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||||
pkcs1pad_decrypt_complete_cb, req);
|
pkcs1pad_decrypt_complete_cb, req);
|
||||||
|
|
||||||
|
/* Reuse input buffer, output to a new buffer */
|
||||||
|
akcipher_request_set_crypt(&req_ctx->child_req, req->src,
|
||||||
|
req_ctx->out_sg, req->src_len,
|
||||||
|
ctx->key_size);
|
||||||
|
|
||||||
err = crypto_akcipher_decrypt(&req_ctx->child_req);
|
err = crypto_akcipher_decrypt(&req_ctx->child_req);
|
||||||
if (err != -EINPROGRESS &&
|
if (err != -EINPROGRESS &&
|
||||||
(err != -EBUSY ||
|
(err != -EBUSY ||
|
||||||
|
@ -416,20 +395,16 @@ static int pkcs1pad_sign(struct akcipher_request *req)
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
|
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
|
||||||
const struct rsa_asn1_template *digest_info = NULL;
|
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
|
||||||
|
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
|
||||||
|
const struct rsa_asn1_template *digest_info = ictx->digest_info;
|
||||||
int err;
|
int err;
|
||||||
unsigned int ps_end, digest_size = 0;
|
unsigned int ps_end, digest_size = 0;
|
||||||
|
|
||||||
if (!ctx->key_size)
|
if (!ctx->key_size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ctx->hash_name) {
|
digest_size = digest_info->size;
|
||||||
digest_info = rsa_lookup_asn1(ctx->hash_name);
|
|
||||||
if (!digest_info)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
digest_size = digest_info->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (req->src_len + digest_size > ctx->key_size - 11)
|
if (req->src_len + digest_size > ctx->key_size - 11)
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
@ -439,21 +414,8 @@ static int pkcs1pad_sign(struct akcipher_request *req)
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->key_size > PAGE_SIZE)
|
|
||||||
return -ENOTSUPP;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Replace both input and output to add the padding in the input and
|
|
||||||
* the potential missing leading zeros in the output.
|
|
||||||
*/
|
|
||||||
req_ctx->child_req.src = req_ctx->in_sg;
|
|
||||||
req_ctx->child_req.src_len = ctx->key_size - 1;
|
|
||||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
|
||||||
req_ctx->child_req.dst_len = ctx->key_size;
|
|
||||||
|
|
||||||
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
|
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
GFP_KERNEL);
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
|
||||||
if (!req_ctx->in_buf)
|
if (!req_ctx->in_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -462,29 +424,20 @@ static int pkcs1pad_sign(struct akcipher_request *req)
|
||||||
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
|
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
|
||||||
req_ctx->in_buf[ps_end] = 0x00;
|
req_ctx->in_buf[ps_end] = 0x00;
|
||||||
|
|
||||||
if (digest_info) {
|
memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
|
||||||
memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
|
digest_info->size);
|
||||||
digest_info->size);
|
|
||||||
}
|
|
||||||
|
|
||||||
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
||||||
ctx->key_size - 1 - req->src_len, req->src);
|
ctx->key_size - 1 - req->src_len, req->src);
|
||||||
|
|
||||||
req_ctx->out_buf = kmalloc(ctx->key_size,
|
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
|
||||||
if (!req_ctx->out_buf) {
|
|
||||||
kfree(req_ctx->in_buf);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
|
||||||
ctx->key_size, NULL);
|
|
||||||
|
|
||||||
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
||||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||||
pkcs1pad_encrypt_sign_complete_cb, req);
|
pkcs1pad_encrypt_sign_complete_cb, req);
|
||||||
|
|
||||||
|
/* Reuse output buffer */
|
||||||
|
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
|
||||||
|
req->dst, ctx->key_size - 1, req->dst_len);
|
||||||
|
|
||||||
err = crypto_akcipher_sign(&req_ctx->child_req);
|
err = crypto_akcipher_sign(&req_ctx->child_req);
|
||||||
if (err != -EINPROGRESS &&
|
if (err != -EINPROGRESS &&
|
||||||
(err != -EBUSY ||
|
(err != -EBUSY ||
|
||||||
|
@ -499,56 +452,58 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
|
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
|
||||||
const struct rsa_asn1_template *digest_info;
|
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
|
||||||
|
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
|
||||||
|
const struct rsa_asn1_template *digest_info = ictx->digest_info;
|
||||||
|
unsigned int dst_len;
|
||||||
unsigned int pos;
|
unsigned int pos;
|
||||||
|
u8 *out_buf;
|
||||||
if (err == -EOVERFLOW)
|
|
||||||
/* Decrypted value had no leading 0 byte */
|
|
||||||
err = -EINVAL;
|
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
|
err = -EINVAL;
|
||||||
err = -EINVAL;
|
dst_len = req_ctx->child_req.dst_len;
|
||||||
|
if (dst_len < ctx->key_size - 1)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
|
out_buf = req_ctx->out_buf;
|
||||||
|
if (dst_len == ctx->key_size) {
|
||||||
|
if (out_buf[0] != 0x00)
|
||||||
|
/* Decrypted value had no leading 0 byte */
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
dst_len--;
|
||||||
|
out_buf++;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = -EBADMSG;
|
err = -EBADMSG;
|
||||||
if (req_ctx->out_buf[0] != 0x01)
|
if (out_buf[0] != 0x01)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
|
for (pos = 1; pos < dst_len; pos++)
|
||||||
if (req_ctx->out_buf[pos] != 0xff)
|
if (out_buf[pos] != 0xff)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (pos < 9 || pos == req_ctx->child_req.dst_len ||
|
if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
|
||||||
req_ctx->out_buf[pos] != 0x00)
|
|
||||||
goto done;
|
goto done;
|
||||||
pos++;
|
pos++;
|
||||||
|
|
||||||
if (ctx->hash_name) {
|
if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
|
||||||
digest_info = rsa_lookup_asn1(ctx->hash_name);
|
goto done;
|
||||||
if (!digest_info)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
if (memcmp(req_ctx->out_buf + pos, digest_info->data,
|
pos += digest_info->size;
|
||||||
digest_info->size))
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
pos += digest_info->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
if (req->dst_len < req_ctx->child_req.dst_len - pos)
|
if (req->dst_len < dst_len - pos)
|
||||||
err = -EOVERFLOW;
|
err = -EOVERFLOW;
|
||||||
req->dst_len = req_ctx->child_req.dst_len - pos;
|
req->dst_len = dst_len - pos;
|
||||||
|
|
||||||
if (!err)
|
if (!err)
|
||||||
sg_copy_from_buffer(req->dst,
|
sg_copy_from_buffer(req->dst,
|
||||||
sg_nents_for_len(req->dst, req->dst_len),
|
sg_nents_for_len(req->dst, req->dst_len),
|
||||||
req_ctx->out_buf + pos, req->dst_len);
|
out_buf + pos, req->dst_len);
|
||||||
done:
|
done:
|
||||||
kzfree(req_ctx->out_buf);
|
kzfree(req_ctx->out_buf);
|
||||||
|
|
||||||
|
@ -588,18 +543,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
|
||||||
if (!ctx->key_size || req->src_len < ctx->key_size)
|
if (!ctx->key_size || req->src_len < ctx->key_size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ctx->key_size > PAGE_SIZE)
|
req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
|
||||||
return -ENOTSUPP;
|
|
||||||
|
|
||||||
/* Reuse input buffer, output to a new buffer */
|
|
||||||
req_ctx->child_req.src = req->src;
|
|
||||||
req_ctx->child_req.src_len = req->src_len;
|
|
||||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
|
||||||
req_ctx->child_req.dst_len = ctx->key_size;
|
|
||||||
|
|
||||||
req_ctx->out_buf = kmalloc(ctx->key_size,
|
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
|
||||||
if (!req_ctx->out_buf)
|
if (!req_ctx->out_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -610,6 +554,11 @@ static int pkcs1pad_verify(struct akcipher_request *req)
|
||||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||||
pkcs1pad_verify_complete_cb, req);
|
pkcs1pad_verify_complete_cb, req);
|
||||||
|
|
||||||
|
/* Reuse input buffer, output to a new buffer */
|
||||||
|
akcipher_request_set_crypt(&req_ctx->child_req, req->src,
|
||||||
|
req_ctx->out_sg, req->src_len,
|
||||||
|
ctx->key_size);
|
||||||
|
|
||||||
err = crypto_akcipher_verify(&req_ctx->child_req);
|
err = crypto_akcipher_verify(&req_ctx->child_req);
|
||||||
if (err != -EINPROGRESS &&
|
if (err != -EINPROGRESS &&
|
||||||
(err != -EBUSY ||
|
(err != -EBUSY ||
|
||||||
|
@ -626,12 +575,11 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
|
||||||
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct crypto_akcipher *child_tfm;
|
struct crypto_akcipher *child_tfm;
|
||||||
|
|
||||||
child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
|
child_tfm = crypto_spawn_akcipher(&ictx->spawn);
|
||||||
if (IS_ERR(child_tfm))
|
if (IS_ERR(child_tfm))
|
||||||
return PTR_ERR(child_tfm);
|
return PTR_ERR(child_tfm);
|
||||||
|
|
||||||
ctx->child = child_tfm;
|
ctx->child = child_tfm;
|
||||||
ctx->hash_name = ictx->hash_name;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -648,12 +596,12 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
|
||||||
struct crypto_akcipher_spawn *spawn = &ctx->spawn;
|
struct crypto_akcipher_spawn *spawn = &ctx->spawn;
|
||||||
|
|
||||||
crypto_drop_akcipher(spawn);
|
crypto_drop_akcipher(spawn);
|
||||||
kfree(ctx->hash_name);
|
|
||||||
kfree(inst);
|
kfree(inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
{
|
{
|
||||||
|
const struct rsa_asn1_template *digest_info;
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
struct akcipher_instance *inst;
|
struct akcipher_instance *inst;
|
||||||
struct pkcs1pad_inst_ctx *ctx;
|
struct pkcs1pad_inst_ctx *ctx;
|
||||||
|
@ -676,7 +624,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
|
|
||||||
hash_name = crypto_attr_alg_name(tb[2]);
|
hash_name = crypto_attr_alg_name(tb[2]);
|
||||||
if (IS_ERR(hash_name))
|
if (IS_ERR(hash_name))
|
||||||
hash_name = NULL;
|
return PTR_ERR(hash_name);
|
||||||
|
|
||||||
|
digest_info = rsa_lookup_asn1(hash_name);
|
||||||
|
if (!digest_info)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||||
if (!inst)
|
if (!inst)
|
||||||
|
@ -684,7 +636,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
|
|
||||||
ctx = akcipher_instance_ctx(inst);
|
ctx = akcipher_instance_ctx(inst);
|
||||||
spawn = &ctx->spawn;
|
spawn = &ctx->spawn;
|
||||||
ctx->hash_name = hash_name ? kstrdup(hash_name, GFP_KERNEL) : NULL;
|
ctx->digest_info = digest_info;
|
||||||
|
|
||||||
crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
|
crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
|
||||||
err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
|
err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
|
||||||
|
@ -696,27 +648,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
|
|
||||||
err = -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
|
|
||||||
if (!hash_name) {
|
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||||
if (snprintf(inst->alg.base.cra_name,
|
"pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
|
||||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
|
CRYPTO_MAX_ALG_NAME ||
|
||||||
rsa_alg->base.cra_name) >=
|
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||||
CRYPTO_MAX_ALG_NAME ||
|
"pkcs1pad(%s,%s)",
|
||||||
snprintf(inst->alg.base.cra_driver_name,
|
rsa_alg->base.cra_driver_name, hash_name) >=
|
||||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
|
CRYPTO_MAX_ALG_NAME)
|
||||||
rsa_alg->base.cra_driver_name) >=
|
|
||||||
CRYPTO_MAX_ALG_NAME)
|
|
||||||
goto out_drop_alg;
|
goto out_drop_alg;
|
||||||
} else {
|
|
||||||
if (snprintf(inst->alg.base.cra_name,
|
|
||||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
|
|
||||||
rsa_alg->base.cra_name, hash_name) >=
|
|
||||||
CRYPTO_MAX_ALG_NAME ||
|
|
||||||
snprintf(inst->alg.base.cra_driver_name,
|
|
||||||
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
|
|
||||||
rsa_alg->base.cra_driver_name, hash_name) >=
|
|
||||||
CRYPTO_MAX_ALG_NAME)
|
|
||||||
goto out_free_hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
|
||||||
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
|
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
|
||||||
|
@ -738,12 +677,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
|
|
||||||
err = akcipher_register_instance(tmpl, inst);
|
err = akcipher_register_instance(tmpl, inst);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free_hash;
|
goto out_drop_alg;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_hash:
|
|
||||||
kfree(ctx->hash_name);
|
|
||||||
out_drop_alg:
|
out_drop_alg:
|
||||||
crypto_drop_akcipher(spawn);
|
crypto_drop_akcipher(spawn);
|
||||||
out_free_inst:
|
out_free_inst:
|
||||||
|
|
113
crypto/rsa.c
113
crypto/rsa.c
|
@ -10,16 +10,23 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/mpi.h>
|
||||||
#include <crypto/internal/rsa.h>
|
#include <crypto/internal/rsa.h>
|
||||||
#include <crypto/internal/akcipher.h>
|
#include <crypto/internal/akcipher.h>
|
||||||
#include <crypto/akcipher.h>
|
#include <crypto/akcipher.h>
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
|
|
||||||
|
struct rsa_mpi_key {
|
||||||
|
MPI n;
|
||||||
|
MPI e;
|
||||||
|
MPI d;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RSAEP function [RFC3447 sec 5.1.1]
|
* RSAEP function [RFC3447 sec 5.1.1]
|
||||||
* c = m^e mod n;
|
* c = m^e mod n;
|
||||||
*/
|
*/
|
||||||
static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
|
static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
|
||||||
{
|
{
|
||||||
/* (1) Validate 0 <= m < n */
|
/* (1) Validate 0 <= m < n */
|
||||||
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
|
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
|
||||||
|
@ -33,7 +40,7 @@ static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
|
||||||
* RSADP function [RFC3447 sec 5.1.2]
|
* RSADP function [RFC3447 sec 5.1.2]
|
||||||
* m = c^d mod n;
|
* m = c^d mod n;
|
||||||
*/
|
*/
|
||||||
static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
|
static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
|
||||||
{
|
{
|
||||||
/* (1) Validate 0 <= c < n */
|
/* (1) Validate 0 <= c < n */
|
||||||
if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
|
if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
|
||||||
|
@ -47,7 +54,7 @@ static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
|
||||||
* RSASP1 function [RFC3447 sec 5.2.1]
|
* RSASP1 function [RFC3447 sec 5.2.1]
|
||||||
* s = m^d mod n
|
* s = m^d mod n
|
||||||
*/
|
*/
|
||||||
static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
|
static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
|
||||||
{
|
{
|
||||||
/* (1) Validate 0 <= m < n */
|
/* (1) Validate 0 <= m < n */
|
||||||
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
|
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
|
||||||
|
@ -61,7 +68,7 @@ static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
|
||||||
* RSAVP1 function [RFC3447 sec 5.2.2]
|
* RSAVP1 function [RFC3447 sec 5.2.2]
|
||||||
* m = s^e mod n;
|
* m = s^e mod n;
|
||||||
*/
|
*/
|
||||||
static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
|
static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
|
||||||
{
|
{
|
||||||
/* (1) Validate 0 <= s < n */
|
/* (1) Validate 0 <= s < n */
|
||||||
if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
|
if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
|
||||||
|
@ -71,7 +78,7 @@ static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
|
||||||
return mpi_powm(m, s, key->e, key->n);
|
return mpi_powm(m, s, key->e, key->n);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
|
static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
|
||||||
{
|
{
|
||||||
return akcipher_tfm_ctx(tfm);
|
return akcipher_tfm_ctx(tfm);
|
||||||
}
|
}
|
||||||
|
@ -79,7 +86,7 @@ static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
|
||||||
static int rsa_enc(struct akcipher_request *req)
|
static int rsa_enc(struct akcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
const struct rsa_key *pkey = rsa_get_key(tfm);
|
const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
|
||||||
MPI m, c = mpi_alloc(0);
|
MPI m, c = mpi_alloc(0);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int sign;
|
int sign;
|
||||||
|
@ -101,7 +108,7 @@ static int rsa_enc(struct akcipher_request *req)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_m;
|
goto err_free_m;
|
||||||
|
|
||||||
ret = mpi_write_to_sgl(c, req->dst, &req->dst_len, &sign);
|
ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_m;
|
goto err_free_m;
|
||||||
|
|
||||||
|
@ -118,7 +125,7 @@ err_free_c:
|
||||||
static int rsa_dec(struct akcipher_request *req)
|
static int rsa_dec(struct akcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
const struct rsa_key *pkey = rsa_get_key(tfm);
|
const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
|
||||||
MPI c, m = mpi_alloc(0);
|
MPI c, m = mpi_alloc(0);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int sign;
|
int sign;
|
||||||
|
@ -140,7 +147,7 @@ static int rsa_dec(struct akcipher_request *req)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_c;
|
goto err_free_c;
|
||||||
|
|
||||||
ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign);
|
ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_c;
|
goto err_free_c;
|
||||||
|
|
||||||
|
@ -156,7 +163,7 @@ err_free_m:
|
||||||
static int rsa_sign(struct akcipher_request *req)
|
static int rsa_sign(struct akcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
const struct rsa_key *pkey = rsa_get_key(tfm);
|
const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
|
||||||
MPI m, s = mpi_alloc(0);
|
MPI m, s = mpi_alloc(0);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int sign;
|
int sign;
|
||||||
|
@ -178,7 +185,7 @@ static int rsa_sign(struct akcipher_request *req)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_m;
|
goto err_free_m;
|
||||||
|
|
||||||
ret = mpi_write_to_sgl(s, req->dst, &req->dst_len, &sign);
|
ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_m;
|
goto err_free_m;
|
||||||
|
|
||||||
|
@ -195,7 +202,7 @@ err_free_s:
|
||||||
static int rsa_verify(struct akcipher_request *req)
|
static int rsa_verify(struct akcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||||
const struct rsa_key *pkey = rsa_get_key(tfm);
|
const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
|
||||||
MPI s, m = mpi_alloc(0);
|
MPI s, m = mpi_alloc(0);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int sign;
|
int sign;
|
||||||
|
@ -219,7 +226,7 @@ static int rsa_verify(struct akcipher_request *req)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_s;
|
goto err_free_s;
|
||||||
|
|
||||||
ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign);
|
ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_s;
|
goto err_free_s;
|
||||||
|
|
||||||
|
@ -233,6 +240,16 @@ err_free_m:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rsa_free_mpi_key(struct rsa_mpi_key *key)
|
||||||
|
{
|
||||||
|
mpi_free(key->d);
|
||||||
|
mpi_free(key->e);
|
||||||
|
mpi_free(key->n);
|
||||||
|
key->d = NULL;
|
||||||
|
key->e = NULL;
|
||||||
|
key->n = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static int rsa_check_key_length(unsigned int len)
|
static int rsa_check_key_length(unsigned int len)
|
||||||
{
|
{
|
||||||
switch (len) {
|
switch (len) {
|
||||||
|
@ -251,49 +268,87 @@ static int rsa_check_key_length(unsigned int len)
|
||||||
static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
|
static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
|
struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
|
||||||
|
struct rsa_key raw_key = {0};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = rsa_parse_pub_key(pkey, key, keylen);
|
/* Free the old MPI key if any */
|
||||||
|
rsa_free_mpi_key(mpi_key);
|
||||||
|
|
||||||
|
ret = rsa_parse_pub_key(&raw_key, key, keylen);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
|
mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
|
||||||
rsa_free_key(pkey);
|
if (!mpi_key->e)
|
||||||
ret = -EINVAL;
|
goto err;
|
||||||
|
|
||||||
|
mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
|
||||||
|
if (!mpi_key->n)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
|
||||||
|
rsa_free_mpi_key(mpi_key);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return ret;
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
rsa_free_mpi_key(mpi_key);
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
|
struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
|
||||||
|
struct rsa_key raw_key = {0};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = rsa_parse_priv_key(pkey, key, keylen);
|
/* Free the old MPI key if any */
|
||||||
|
rsa_free_mpi_key(mpi_key);
|
||||||
|
|
||||||
|
ret = rsa_parse_priv_key(&raw_key, key, keylen);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
|
mpi_key->d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
|
||||||
rsa_free_key(pkey);
|
if (!mpi_key->d)
|
||||||
ret = -EINVAL;
|
goto err;
|
||||||
|
|
||||||
|
mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
|
||||||
|
if (!mpi_key->e)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
|
||||||
|
if (!mpi_key->n)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
|
||||||
|
rsa_free_mpi_key(mpi_key);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return ret;
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
rsa_free_mpi_key(mpi_key);
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsa_max_size(struct crypto_akcipher *tfm)
|
static int rsa_max_size(struct crypto_akcipher *tfm)
|
||||||
{
|
{
|
||||||
struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
|
struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
|
||||||
|
|
||||||
return pkey->n ? mpi_get_size(pkey->n) : -EINVAL;
|
return pkey->n ? mpi_get_size(pkey->n) : -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rsa_exit_tfm(struct crypto_akcipher *tfm)
|
static void rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||||
{
|
{
|
||||||
struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
|
struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
|
||||||
|
|
||||||
rsa_free_key(pkey);
|
rsa_free_mpi_key(pkey);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct akcipher_alg rsa = {
|
static struct akcipher_alg rsa = {
|
||||||
|
@ -310,7 +365,7 @@ static struct akcipher_alg rsa = {
|
||||||
.cra_driver_name = "rsa-generic",
|
.cra_driver_name = "rsa-generic",
|
||||||
.cra_priority = 100,
|
.cra_priority = 100,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_ctxsize = sizeof(struct rsa_key),
|
.cra_ctxsize = sizeof(struct rsa_mpi_key),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -22,20 +22,29 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
|
||||||
const void *value, size_t vlen)
|
const void *value, size_t vlen)
|
||||||
{
|
{
|
||||||
struct rsa_key *key = context;
|
struct rsa_key *key = context;
|
||||||
|
const u8 *ptr = value;
|
||||||
|
size_t n_sz = vlen;
|
||||||
|
|
||||||
key->n = mpi_read_raw_data(value, vlen);
|
/* invalid key provided */
|
||||||
|
if (!value || !vlen)
|
||||||
if (!key->n)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* In FIPS mode only allow key size 2K & 3K */
|
|
||||||
if (fips_enabled && (mpi_get_size(key->n) != 256 &&
|
|
||||||
mpi_get_size(key->n) != 384)) {
|
|
||||||
pr_err("RSA: key size not allowed in FIPS mode\n");
|
|
||||||
mpi_free(key->n);
|
|
||||||
key->n = NULL;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (fips_enabled) {
|
||||||
|
while (!*ptr && n_sz) {
|
||||||
|
ptr++;
|
||||||
|
n_sz--;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* In FIPS mode only allow key size 2K & 3K */
|
||||||
|
if (n_sz != 256 && n_sz != 384) {
|
||||||
|
pr_err("RSA: key size not allowed in FIPS mode\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
key->n = value;
|
||||||
|
key->n_sz = vlen;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,10 +53,12 @@ int rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
|
||||||
{
|
{
|
||||||
struct rsa_key *key = context;
|
struct rsa_key *key = context;
|
||||||
|
|
||||||
key->e = mpi_read_raw_data(value, vlen);
|
/* invalid key provided */
|
||||||
|
if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (!key->e)
|
key->e = value;
|
||||||
return -ENOMEM;
|
key->e_sz = vlen;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -57,46 +68,95 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
|
||||||
{
|
{
|
||||||
struct rsa_key *key = context;
|
struct rsa_key *key = context;
|
||||||
|
|
||||||
key->d = mpi_read_raw_data(value, vlen);
|
/* invalid key provided */
|
||||||
|
if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
|
||||||
if (!key->d)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* In FIPS mode only allow key size 2K & 3K */
|
|
||||||
if (fips_enabled && (mpi_get_size(key->d) != 256 &&
|
|
||||||
mpi_get_size(key->d) != 384)) {
|
|
||||||
pr_err("RSA: key size not allowed in FIPS mode\n");
|
|
||||||
mpi_free(key->d);
|
|
||||||
key->d = NULL;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
key->d = value;
|
||||||
|
key->d_sz = vlen;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_mpis(struct rsa_key *key)
|
int rsa_get_p(void *context, size_t hdrlen, unsigned char tag,
|
||||||
|
const void *value, size_t vlen)
|
||||||
{
|
{
|
||||||
mpi_free(key->n);
|
struct rsa_key *key = context;
|
||||||
mpi_free(key->e);
|
|
||||||
mpi_free(key->d);
|
/* invalid key provided */
|
||||||
key->n = NULL;
|
if (!value || !vlen || vlen > key->n_sz)
|
||||||
key->e = NULL;
|
return -EINVAL;
|
||||||
key->d = NULL;
|
|
||||||
|
key->p = value;
|
||||||
|
key->p_sz = vlen;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rsa_get_q(void *context, size_t hdrlen, unsigned char tag,
|
||||||
|
const void *value, size_t vlen)
|
||||||
|
{
|
||||||
|
struct rsa_key *key = context;
|
||||||
|
|
||||||
|
/* invalid key provided */
|
||||||
|
if (!value || !vlen || vlen > key->n_sz)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
key->q = value;
|
||||||
|
key->q_sz = vlen;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rsa_get_dp(void *context, size_t hdrlen, unsigned char tag,
|
||||||
|
const void *value, size_t vlen)
|
||||||
|
{
|
||||||
|
struct rsa_key *key = context;
|
||||||
|
|
||||||
|
/* invalid key provided */
|
||||||
|
if (!value || !vlen || vlen > key->n_sz)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
key->dp = value;
|
||||||
|
key->dp_sz = vlen;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rsa_get_dq(void *context, size_t hdrlen, unsigned char tag,
|
||||||
|
const void *value, size_t vlen)
|
||||||
|
{
|
||||||
|
struct rsa_key *key = context;
|
||||||
|
|
||||||
|
/* invalid key provided */
|
||||||
|
if (!value || !vlen || vlen > key->n_sz)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
key->dq = value;
|
||||||
|
key->dq_sz = vlen;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int rsa_get_qinv(void *context, size_t hdrlen, unsigned char tag,
|
||||||
|
const void *value, size_t vlen)
|
||||||
|
{
|
||||||
|
struct rsa_key *key = context;
|
||||||
|
|
||||||
|
/* invalid key provided */
|
||||||
|
if (!value || !vlen || vlen > key->n_sz)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
key->qinv = value;
|
||||||
|
key->qinv_sz = vlen;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rsa_free_key() - frees rsa key allocated by rsa_parse_key()
|
* rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the
|
||||||
*
|
* provided struct rsa_key, pointers to the raw key as is,
|
||||||
* @rsa_key: struct rsa_key key representation
|
* so that the caller can copy it or MPI parse it, etc.
|
||||||
*/
|
|
||||||
void rsa_free_key(struct rsa_key *key)
|
|
||||||
{
|
|
||||||
free_mpis(key);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rsa_free_key);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* rsa_parse_pub_key() - extracts an rsa public key from BER encoded buffer
|
|
||||||
* and stores it in the provided struct rsa_key
|
|
||||||
*
|
*
|
||||||
* @rsa_key: struct rsa_key key representation
|
* @rsa_key: struct rsa_key key representation
|
||||||
* @key: key in BER format
|
* @key: key in BER format
|
||||||
|
@ -107,23 +167,15 @@ EXPORT_SYMBOL_GPL(rsa_free_key);
|
||||||
int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
|
int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
|
||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
int ret;
|
return asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
|
||||||
|
|
||||||
free_mpis(rsa_key);
|
|
||||||
ret = asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
|
|
||||||
if (ret < 0)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
error:
|
|
||||||
free_mpis(rsa_key);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
|
EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rsa_parse_pub_key() - extracts an rsa private key from BER encoded buffer
|
* rsa_parse_priv_key() - decodes the BER encoded buffer and stores in the
|
||||||
* and stores it in the provided struct rsa_key
|
* provided struct rsa_key, pointers to the raw key
|
||||||
|
* as is, so that the caller can copy it or MPI parse it,
|
||||||
|
* etc.
|
||||||
*
|
*
|
||||||
* @rsa_key: struct rsa_key key representation
|
* @rsa_key: struct rsa_key key representation
|
||||||
* @key: key in BER format
|
* @key: key in BER format
|
||||||
|
@ -134,16 +186,6 @@ EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
|
||||||
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
|
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
|
||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
int ret;
|
return asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
|
||||||
|
|
||||||
free_mpis(rsa_key);
|
|
||||||
ret = asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
|
|
||||||
if (ret < 0)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
error:
|
|
||||||
free_mpis(rsa_key);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rsa_parse_priv_key);
|
EXPORT_SYMBOL_GPL(rsa_parse_priv_key);
|
||||||
|
|
|
@ -3,9 +3,9 @@ RsaPrivKey ::= SEQUENCE {
|
||||||
n INTEGER ({ rsa_get_n }),
|
n INTEGER ({ rsa_get_n }),
|
||||||
e INTEGER ({ rsa_get_e }),
|
e INTEGER ({ rsa_get_e }),
|
||||||
d INTEGER ({ rsa_get_d }),
|
d INTEGER ({ rsa_get_d }),
|
||||||
prime1 INTEGER,
|
prime1 INTEGER ({ rsa_get_p }),
|
||||||
prime2 INTEGER,
|
prime2 INTEGER ({ rsa_get_q }),
|
||||||
exponent1 INTEGER,
|
exponent1 INTEGER ({ rsa_get_dp }),
|
||||||
exponent2 INTEGER,
|
exponent2 INTEGER ({ rsa_get_dq }),
|
||||||
coefficient INTEGER
|
coefficient INTEGER ({ rsa_get_qinv })
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,6 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pagemap.h>
|
|
||||||
#include <linux/highmem.h>
|
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
|
|
||||||
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
|
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
|
||||||
|
@ -30,53 +28,6 @@ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
|
||||||
memcpy(dst, src, nbytes);
|
memcpy(dst, src, nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
|
|
||||||
{
|
|
||||||
walk->sg = sg;
|
|
||||||
|
|
||||||
BUG_ON(!sg->length);
|
|
||||||
|
|
||||||
walk->offset = sg->offset;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(scatterwalk_start);
|
|
||||||
|
|
||||||
void *scatterwalk_map(struct scatter_walk *walk)
|
|
||||||
{
|
|
||||||
return kmap_atomic(scatterwalk_page(walk)) +
|
|
||||||
offset_in_page(walk->offset);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(scatterwalk_map);
|
|
||||||
|
|
||||||
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
|
||||||
unsigned int more)
|
|
||||||
{
|
|
||||||
if (out) {
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
|
|
||||||
/* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
|
|
||||||
* PageSlab cannot be optimised away per se due to
|
|
||||||
* use of volatile pointer.
|
|
||||||
*/
|
|
||||||
if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
|
|
||||||
flush_dcache_page(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (more) {
|
|
||||||
walk->offset += PAGE_SIZE - 1;
|
|
||||||
walk->offset &= PAGE_MASK;
|
|
||||||
if (walk->offset >= walk->sg->offset + walk->sg->length)
|
|
||||||
scatterwalk_start(walk, sg_next(walk->sg));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
|
|
||||||
{
|
|
||||||
if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
|
|
||||||
scatterwalk_pagedone(walk, out, more);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(scatterwalk_done);
|
|
||||||
|
|
||||||
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
||||||
size_t nbytes, int out)
|
size_t nbytes, int out)
|
||||||
{
|
{
|
||||||
|
@ -87,9 +38,11 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
||||||
if (len_this_page > nbytes)
|
if (len_this_page > nbytes)
|
||||||
len_this_page = nbytes;
|
len_this_page = nbytes;
|
||||||
|
|
||||||
vaddr = scatterwalk_map(walk);
|
if (out != 2) {
|
||||||
memcpy_dir(buf, vaddr, len_this_page, out);
|
vaddr = scatterwalk_map(walk);
|
||||||
scatterwalk_unmap(vaddr);
|
memcpy_dir(buf, vaddr, len_this_page, out);
|
||||||
|
scatterwalk_unmap(vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
scatterwalk_advance(walk, len_this_page);
|
scatterwalk_advance(walk, len_this_page);
|
||||||
|
|
||||||
|
@ -99,7 +52,7 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
||||||
buf += len_this_page;
|
buf += len_this_page;
|
||||||
nbytes -= len_this_page;
|
nbytes -= len_this_page;
|
||||||
|
|
||||||
scatterwalk_pagedone(walk, out, 1);
|
scatterwalk_pagedone(walk, out & 1, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
|
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
|
||||||
|
@ -125,28 +78,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
|
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
|
||||||
|
|
||||||
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
|
|
||||||
{
|
|
||||||
int offset = 0, n = 0;
|
|
||||||
|
|
||||||
/* num_bytes is too small */
|
|
||||||
if (num_bytes < sg->length)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
do {
|
|
||||||
offset += sg->length;
|
|
||||||
n++;
|
|
||||||
sg = sg_next(sg);
|
|
||||||
|
|
||||||
/* num_bytes is too large */
|
|
||||||
if (unlikely(!sg && (num_bytes < offset)))
|
|
||||||
return -1;
|
|
||||||
} while (sg && (num_bytes > offset));
|
|
||||||
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
|
|
||||||
|
|
||||||
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
|
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
|
||||||
struct scatterlist *src,
|
struct scatterlist *src,
|
||||||
unsigned int len)
|
unsigned int len)
|
||||||
|
|
176
crypto/seqiv.c
176
crypto/seqiv.c
|
@ -14,50 +14,17 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <crypto/internal/geniv.h>
|
#include <crypto/internal/geniv.h>
|
||||||
#include <crypto/internal/skcipher.h>
|
|
||||||
#include <crypto/rng.h>
|
|
||||||
#include <crypto/scatterwalk.h>
|
#include <crypto/scatterwalk.h>
|
||||||
|
#include <crypto/skcipher.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
struct seqiv_ctx {
|
|
||||||
spinlock_t lock;
|
|
||||||
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
|
|
||||||
};
|
|
||||||
|
|
||||||
static void seqiv_free(struct crypto_instance *inst);
|
static void seqiv_free(struct crypto_instance *inst);
|
||||||
|
|
||||||
static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
|
|
||||||
{
|
|
||||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
|
||||||
struct crypto_ablkcipher *geniv;
|
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
|
|
||||||
|
|
||||||
out:
|
|
||||||
kfree(subreq->info);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void seqiv_complete(struct crypto_async_request *base, int err)
|
|
||||||
{
|
|
||||||
struct skcipher_givcrypt_request *req = base->data;
|
|
||||||
|
|
||||||
seqiv_complete2(req, err);
|
|
||||||
skcipher_givcrypt_complete(req, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
|
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
|
||||||
{
|
{
|
||||||
struct aead_request *subreq = aead_request_ctx(req);
|
struct aead_request *subreq = aead_request_ctx(req);
|
||||||
|
@ -85,65 +52,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
|
||||||
aead_request_complete(req, err);
|
aead_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
|
|
||||||
unsigned int ivsize)
|
|
||||||
{
|
|
||||||
unsigned int len = ivsize;
|
|
||||||
|
|
||||||
if (ivsize > sizeof(u64)) {
|
|
||||||
memset(info, 0, ivsize - sizeof(u64));
|
|
||||||
len = sizeof(u64);
|
|
||||||
}
|
|
||||||
seq = cpu_to_be64(seq);
|
|
||||||
memcpy(info + ivsize - len, &seq, len);
|
|
||||||
crypto_xor(info, ctx->salt, ivsize);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
|
||||||
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
|
||||||
crypto_completion_t compl;
|
|
||||||
void *data;
|
|
||||||
u8 *info;
|
|
||||||
unsigned int ivsize;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
|
||||||
|
|
||||||
compl = req->creq.base.complete;
|
|
||||||
data = req->creq.base.data;
|
|
||||||
info = req->creq.info;
|
|
||||||
|
|
||||||
ivsize = crypto_ablkcipher_ivsize(geniv);
|
|
||||||
|
|
||||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
|
||||||
crypto_ablkcipher_alignmask(geniv) + 1))) {
|
|
||||||
info = kmalloc(ivsize, req->creq.base.flags &
|
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
|
||||||
GFP_ATOMIC);
|
|
||||||
if (!info)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
compl = seqiv_complete;
|
|
||||||
data = req;
|
|
||||||
}
|
|
||||||
|
|
||||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
|
|
||||||
data);
|
|
||||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
|
||||||
req->creq.nbytes, info);
|
|
||||||
|
|
||||||
seqiv_geniv(ctx, info, req->seq, ivsize);
|
|
||||||
memcpy(req->giv, info, ivsize);
|
|
||||||
|
|
||||||
err = crypto_ablkcipher_encrypt(subreq);
|
|
||||||
if (unlikely(info != req->creq.info))
|
|
||||||
seqiv_complete2(req, err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int seqiv_aead_encrypt(struct aead_request *req)
|
static int seqiv_aead_encrypt(struct aead_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||||
|
@ -165,12 +73,16 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||||
info = req->iv;
|
info = req->iv;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
struct blkcipher_desc desc = {
|
SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
|
||||||
.tfm = ctx->null,
|
|
||||||
};
|
|
||||||
|
|
||||||
err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
|
skcipher_request_set_tfm(nreq, ctx->sknull);
|
||||||
req->assoclen + req->cryptlen);
|
skcipher_request_set_callback(nreq, req->base.flags,
|
||||||
|
NULL, NULL);
|
||||||
|
skcipher_request_set_crypt(nreq, req->src, req->dst,
|
||||||
|
req->assoclen + req->cryptlen,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
err = crypto_skcipher_encrypt(nreq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -229,62 +141,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
|
||||||
return crypto_aead_decrypt(subreq);
|
return crypto_aead_decrypt(subreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int seqiv_init(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
|
||||||
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
|
||||||
int err;
|
|
||||||
|
|
||||||
spin_lock_init(&ctx->lock);
|
|
||||||
|
|
||||||
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
|
|
||||||
|
|
||||||
err = 0;
|
|
||||||
if (!crypto_get_default_rng()) {
|
|
||||||
crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
|
|
||||||
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
|
|
||||||
crypto_ablkcipher_ivsize(geniv));
|
|
||||||
crypto_put_default_rng();
|
|
||||||
}
|
|
||||||
|
|
||||||
return err ?: skcipher_geniv_init(tfm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
|
|
||||||
struct rtattr **tb)
|
|
||||||
{
|
|
||||||
struct crypto_instance *inst;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
|
|
||||||
|
|
||||||
if (IS_ERR(inst))
|
|
||||||
return PTR_ERR(inst);
|
|
||||||
|
|
||||||
err = -EINVAL;
|
|
||||||
if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
|
|
||||||
goto free_inst;
|
|
||||||
|
|
||||||
inst->alg.cra_init = seqiv_init;
|
|
||||||
inst->alg.cra_exit = skcipher_geniv_exit;
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
|
|
||||||
inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
|
|
||||||
|
|
||||||
inst->alg.cra_alignmask |= __alignof__(u32) - 1;
|
|
||||||
|
|
||||||
err = crypto_register_instance(tmpl, inst);
|
|
||||||
if (err)
|
|
||||||
goto free_inst;
|
|
||||||
|
|
||||||
out:
|
|
||||||
return err;
|
|
||||||
|
|
||||||
free_inst:
|
|
||||||
skcipher_geniv_free(inst);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
|
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
{
|
{
|
||||||
struct aead_instance *inst;
|
struct aead_instance *inst;
|
||||||
|
@ -330,26 +186,20 @@ free_inst:
|
||||||
static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
{
|
{
|
||||||
struct crypto_attr_type *algt;
|
struct crypto_attr_type *algt;
|
||||||
int err;
|
|
||||||
|
|
||||||
algt = crypto_get_attr_type(tb);
|
algt = crypto_get_attr_type(tb);
|
||||||
if (IS_ERR(algt))
|
if (IS_ERR(algt))
|
||||||
return PTR_ERR(algt);
|
return PTR_ERR(algt);
|
||||||
|
|
||||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
||||||
err = seqiv_ablkcipher_create(tmpl, tb);
|
return -EINVAL;
|
||||||
else
|
|
||||||
err = seqiv_aead_create(tmpl, tb);
|
|
||||||
|
|
||||||
return err;
|
return seqiv_aead_create(tmpl, tb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void seqiv_free(struct crypto_instance *inst)
|
static void seqiv_free(struct crypto_instance *inst)
|
||||||
{
|
{
|
||||||
if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
aead_geniv_free(aead_instance(inst));
|
||||||
skcipher_geniv_free(inst);
|
|
||||||
else
|
|
||||||
aead_geniv_free(aead_instance(inst));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_template seqiv_tmpl = {
|
static struct crypto_template seqiv_tmpl = {
|
||||||
|
|
|
@ -0,0 +1,300 @@
|
||||||
|
/*
|
||||||
|
* Cryptographic API.
|
||||||
|
*
|
||||||
|
* SHA-3, as specified in
|
||||||
|
* http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
|
||||||
|
*
|
||||||
|
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the Free
|
||||||
|
* Software Foundation; either version 2 of the License, or (at your option)•
|
||||||
|
* any later version.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include <crypto/internal/hash.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <crypto/sha3.h>
|
||||||
|
#include <asm/byteorder.h>
|
||||||
|
|
||||||
|
#define KECCAK_ROUNDS 24
|
||||||
|
|
||||||
|
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
|
||||||
|
|
||||||
|
static const u64 keccakf_rndc[24] = {
|
||||||
|
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
|
||||||
|
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
|
||||||
|
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
|
||||||
|
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
|
||||||
|
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
|
||||||
|
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
|
||||||
|
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
|
||||||
|
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
|
||||||
|
};
|
||||||
|
|
||||||
|
static const int keccakf_rotc[24] = {
|
||||||
|
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
|
||||||
|
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
|
||||||
|
};
|
||||||
|
|
||||||
|
static const int keccakf_piln[24] = {
|
||||||
|
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
|
||||||
|
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
|
||||||
|
};
|
||||||
|
|
||||||
|
/* update the state with given number of rounds */
|
||||||
|
|
||||||
|
static void keccakf(u64 st[25])
|
||||||
|
{
|
||||||
|
int i, j, round;
|
||||||
|
u64 t, bc[5];
|
||||||
|
|
||||||
|
for (round = 0; round < KECCAK_ROUNDS; round++) {
|
||||||
|
|
||||||
|
/* Theta */
|
||||||
|
for (i = 0; i < 5; i++)
|
||||||
|
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
|
||||||
|
^ st[i + 20];
|
||||||
|
|
||||||
|
for (i = 0; i < 5; i++) {
|
||||||
|
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
|
||||||
|
for (j = 0; j < 25; j += 5)
|
||||||
|
st[j + i] ^= t;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Rho Pi */
|
||||||
|
t = st[1];
|
||||||
|
for (i = 0; i < 24; i++) {
|
||||||
|
j = keccakf_piln[i];
|
||||||
|
bc[0] = st[j];
|
||||||
|
st[j] = ROTL64(t, keccakf_rotc[i]);
|
||||||
|
t = bc[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Chi */
|
||||||
|
for (j = 0; j < 25; j += 5) {
|
||||||
|
for (i = 0; i < 5; i++)
|
||||||
|
bc[i] = st[j + i];
|
||||||
|
for (i = 0; i < 5; i++)
|
||||||
|
st[j + i] ^= (~bc[(i + 1) % 5]) &
|
||||||
|
bc[(i + 2) % 5];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Iota */
|
||||||
|
st[0] ^= keccakf_rndc[round];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
|
||||||
|
{
|
||||||
|
memset(sctx, 0, sizeof(*sctx));
|
||||||
|
sctx->md_len = digest_sz;
|
||||||
|
sctx->rsiz = 200 - 2 * digest_sz;
|
||||||
|
sctx->rsizw = sctx->rsiz / 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha3_224_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
sha3_init(sctx, SHA3_224_DIGEST_SIZE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha3_256_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
sha3_init(sctx, SHA3_256_DIGEST_SIZE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha3_384_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
sha3_init(sctx, SHA3_384_DIGEST_SIZE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha3_512_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
sha3_init(sctx, SHA3_512_DIGEST_SIZE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha3_update(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len)
|
||||||
|
{
|
||||||
|
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||||
|
unsigned int done;
|
||||||
|
const u8 *src;
|
||||||
|
|
||||||
|
done = 0;
|
||||||
|
src = data;
|
||||||
|
|
||||||
|
if ((sctx->partial + len) > (sctx->rsiz - 1)) {
|
||||||
|
if (sctx->partial) {
|
||||||
|
done = -sctx->partial;
|
||||||
|
memcpy(sctx->buf + sctx->partial, data,
|
||||||
|
done + sctx->rsiz);
|
||||||
|
src = sctx->buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < sctx->rsizw; i++)
|
||||||
|
sctx->st[i] ^= ((u64 *) src)[i];
|
||||||
|
keccakf(sctx->st);
|
||||||
|
|
||||||
|
done += sctx->rsiz;
|
||||||
|
src = data + done;
|
||||||
|
} while (done + (sctx->rsiz - 1) < len);
|
||||||
|
|
||||||
|
sctx->partial = 0;
|
||||||
|
}
|
||||||
|
memcpy(sctx->buf + sctx->partial, src, len - done);
|
||||||
|
sctx->partial += (len - done);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sha3_final(struct shash_desc *desc, u8 *out)
|
||||||
|
{
|
||||||
|
struct sha3_state *sctx = shash_desc_ctx(desc);
|
||||||
|
unsigned int i, inlen = sctx->partial;
|
||||||
|
|
||||||
|
sctx->buf[inlen++] = 0x06;
|
||||||
|
memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
|
||||||
|
sctx->buf[sctx->rsiz - 1] |= 0x80;
|
||||||
|
|
||||||
|
for (i = 0; i < sctx->rsizw; i++)
|
||||||
|
sctx->st[i] ^= ((u64 *) sctx->buf)[i];
|
||||||
|
|
||||||
|
keccakf(sctx->st);
|
||||||
|
|
||||||
|
for (i = 0; i < sctx->rsizw; i++)
|
||||||
|
sctx->st[i] = cpu_to_le64(sctx->st[i]);
|
||||||
|
|
||||||
|
memcpy(out, sctx->st, sctx->md_len);
|
||||||
|
|
||||||
|
memset(sctx, 0, sizeof(*sctx));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct shash_alg sha3_224 = {
|
||||||
|
.digestsize = SHA3_224_DIGEST_SIZE,
|
||||||
|
.init = sha3_224_init,
|
||||||
|
.update = sha3_update,
|
||||||
|
.final = sha3_final,
|
||||||
|
.descsize = sizeof(struct sha3_state),
|
||||||
|
.base = {
|
||||||
|
.cra_name = "sha3-224",
|
||||||
|
.cra_driver_name = "sha3-224-generic",
|
||||||
|
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||||
|
.cra_blocksize = SHA3_224_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct shash_alg sha3_256 = {
|
||||||
|
.digestsize = SHA3_256_DIGEST_SIZE,
|
||||||
|
.init = sha3_256_init,
|
||||||
|
.update = sha3_update,
|
||||||
|
.final = sha3_final,
|
||||||
|
.descsize = sizeof(struct sha3_state),
|
||||||
|
.base = {
|
||||||
|
.cra_name = "sha3-256",
|
||||||
|
.cra_driver_name = "sha3-256-generic",
|
||||||
|
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||||
|
.cra_blocksize = SHA3_256_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct shash_alg sha3_384 = {
|
||||||
|
.digestsize = SHA3_384_DIGEST_SIZE,
|
||||||
|
.init = sha3_384_init,
|
||||||
|
.update = sha3_update,
|
||||||
|
.final = sha3_final,
|
||||||
|
.descsize = sizeof(struct sha3_state),
|
||||||
|
.base = {
|
||||||
|
.cra_name = "sha3-384",
|
||||||
|
.cra_driver_name = "sha3-384-generic",
|
||||||
|
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||||
|
.cra_blocksize = SHA3_384_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct shash_alg sha3_512 = {
|
||||||
|
.digestsize = SHA3_512_DIGEST_SIZE,
|
||||||
|
.init = sha3_512_init,
|
||||||
|
.update = sha3_update,
|
||||||
|
.final = sha3_final,
|
||||||
|
.descsize = sizeof(struct sha3_state),
|
||||||
|
.base = {
|
||||||
|
.cra_name = "sha3-512",
|
||||||
|
.cra_driver_name = "sha3-512-generic",
|
||||||
|
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||||
|
.cra_blocksize = SHA3_512_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init sha3_generic_mod_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = crypto_register_shash(&sha3_224);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_out;
|
||||||
|
ret = crypto_register_shash(&sha3_256);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_out_224;
|
||||||
|
ret = crypto_register_shash(&sha3_384);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_out_256;
|
||||||
|
ret = crypto_register_shash(&sha3_512);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_out_384;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_out_384:
|
||||||
|
crypto_unregister_shash(&sha3_384);
|
||||||
|
err_out_256:
|
||||||
|
crypto_unregister_shash(&sha3_256);
|
||||||
|
err_out_224:
|
||||||
|
crypto_unregister_shash(&sha3_224);
|
||||||
|
err_out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit sha3_generic_mod_fini(void)
|
||||||
|
{
|
||||||
|
crypto_unregister_shash(&sha3_224);
|
||||||
|
crypto_unregister_shash(&sha3_256);
|
||||||
|
crypto_unregister_shash(&sha3_384);
|
||||||
|
crypto_unregister_shash(&sha3_512);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(sha3_generic_mod_init);
|
||||||
|
module_exit(sha3_generic_mod_fini);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
|
||||||
|
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-224");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-224-generic");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-256");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-256-generic");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-384");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-384-generic");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-512");
|
||||||
|
MODULE_ALIAS_CRYPTO("sha3-512-generic");
|
|
@ -16,7 +16,11 @@
|
||||||
|
|
||||||
#include <crypto/internal/skcipher.h>
|
#include <crypto/internal/skcipher.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
|
#include <linux/cryptouser.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/rtnetlink.h>
|
||||||
|
#include <linux/seq_file.h>
|
||||||
|
#include <net/netlink.h>
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
|
||||||
if (alg->cra_type == &crypto_blkcipher_type)
|
if (alg->cra_type == &crypto_blkcipher_type)
|
||||||
return sizeof(struct crypto_blkcipher *);
|
return sizeof(struct crypto_blkcipher *);
|
||||||
|
|
||||||
BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
|
if (alg->cra_type == &crypto_ablkcipher_type ||
|
||||||
alg->cra_type != &crypto_givcipher_type);
|
alg->cra_type == &crypto_givcipher_type)
|
||||||
|
return sizeof(struct crypto_ablkcipher *);
|
||||||
|
|
||||||
return sizeof(struct crypto_ablkcipher *);
|
return crypto_alg_extsize(alg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
||||||
|
@ -216,26 +221,118 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
|
||||||
|
{
|
||||||
|
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||||
|
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
|
||||||
|
|
||||||
|
alg->exit(skcipher);
|
||||||
|
}
|
||||||
|
|
||||||
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
|
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||||
|
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
|
||||||
|
|
||||||
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
|
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
|
||||||
return crypto_init_skcipher_ops_blkcipher(tfm);
|
return crypto_init_skcipher_ops_blkcipher(tfm);
|
||||||
|
|
||||||
BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
|
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
|
||||||
tfm->__crt_alg->cra_type != &crypto_givcipher_type);
|
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
|
||||||
|
return crypto_init_skcipher_ops_ablkcipher(tfm);
|
||||||
|
|
||||||
return crypto_init_skcipher_ops_ablkcipher(tfm);
|
skcipher->setkey = alg->setkey;
|
||||||
|
skcipher->encrypt = alg->encrypt;
|
||||||
|
skcipher->decrypt = alg->decrypt;
|
||||||
|
skcipher->ivsize = alg->ivsize;
|
||||||
|
skcipher->keysize = alg->max_keysize;
|
||||||
|
|
||||||
|
if (alg->exit)
|
||||||
|
skcipher->base.exit = crypto_skcipher_exit_tfm;
|
||||||
|
|
||||||
|
if (alg->init)
|
||||||
|
return alg->init(skcipher);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
|
||||||
|
{
|
||||||
|
struct skcipher_instance *skcipher =
|
||||||
|
container_of(inst, struct skcipher_instance, s.base);
|
||||||
|
|
||||||
|
skcipher->free(skcipher);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||||
|
__attribute__ ((unused));
|
||||||
|
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
|
||||||
|
base);
|
||||||
|
|
||||||
|
seq_printf(m, "type : skcipher\n");
|
||||||
|
seq_printf(m, "async : %s\n",
|
||||||
|
alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
|
||||||
|
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||||
|
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
|
||||||
|
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
|
||||||
|
seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
|
||||||
|
seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET
|
||||||
|
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_report_blkcipher rblkcipher;
|
||||||
|
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
|
||||||
|
base);
|
||||||
|
|
||||||
|
strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
|
||||||
|
strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
|
||||||
|
|
||||||
|
rblkcipher.blocksize = alg->cra_blocksize;
|
||||||
|
rblkcipher.min_keysize = skcipher->min_keysize;
|
||||||
|
rblkcipher.max_keysize = skcipher->max_keysize;
|
||||||
|
rblkcipher.ivsize = skcipher->ivsize;
|
||||||
|
|
||||||
|
if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
|
||||||
|
sizeof(struct crypto_report_blkcipher), &rblkcipher))
|
||||||
|
goto nla_put_failure;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
nla_put_failure:
|
||||||
|
return -EMSGSIZE;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static const struct crypto_type crypto_skcipher_type2 = {
|
static const struct crypto_type crypto_skcipher_type2 = {
|
||||||
.extsize = crypto_skcipher_extsize,
|
.extsize = crypto_skcipher_extsize,
|
||||||
.init_tfm = crypto_skcipher_init_tfm,
|
.init_tfm = crypto_skcipher_init_tfm,
|
||||||
|
.free = crypto_skcipher_free_instance,
|
||||||
|
#ifdef CONFIG_PROC_FS
|
||||||
|
.show = crypto_skcipher_show,
|
||||||
|
#endif
|
||||||
|
.report = crypto_skcipher_report,
|
||||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||||
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
|
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
|
||||||
.type = CRYPTO_ALG_TYPE_BLKCIPHER,
|
.type = CRYPTO_ALG_TYPE_SKCIPHER,
|
||||||
.tfmsize = offsetof(struct crypto_skcipher, base),
|
.tfmsize = offsetof(struct crypto_skcipher, base),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
|
||||||
|
const char *name, u32 type, u32 mask)
|
||||||
|
{
|
||||||
|
spawn->base.frontend = &crypto_skcipher_type2;
|
||||||
|
return crypto_grab_spawn(&spawn->base, name, type, mask);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
|
||||||
|
|
||||||
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||||
u32 type, u32 mask)
|
u32 type, u32 mask)
|
||||||
{
|
{
|
||||||
|
@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
|
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
|
||||||
|
|
||||||
|
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
|
||||||
|
{
|
||||||
|
return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
|
||||||
|
type, mask);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
|
||||||
|
|
||||||
|
static int skcipher_prepare_alg(struct skcipher_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_alg *base = &alg->base;
|
||||||
|
|
||||||
|
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!alg->chunksize)
|
||||||
|
alg->chunksize = base->cra_blocksize;
|
||||||
|
|
||||||
|
base->cra_type = &crypto_skcipher_type2;
|
||||||
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||||
|
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int crypto_register_skcipher(struct skcipher_alg *alg)
|
||||||
|
{
|
||||||
|
struct crypto_alg *base = &alg->base;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = skcipher_prepare_alg(alg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
return crypto_register_alg(base);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_register_skcipher);
|
||||||
|
|
||||||
|
void crypto_unregister_skcipher(struct skcipher_alg *alg)
|
||||||
|
{
|
||||||
|
crypto_unregister_alg(&alg->base);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
|
||||||
|
|
||||||
|
int crypto_register_skciphers(struct skcipher_alg *algs, int count)
|
||||||
|
{
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
ret = crypto_register_skcipher(&algs[i]);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
for (--i; i >= 0; --i)
|
||||||
|
crypto_unregister_skcipher(&algs[i]);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_register_skciphers);
|
||||||
|
|
||||||
|
void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = count - 1; i >= 0; --i)
|
||||||
|
crypto_unregister_skcipher(&algs[i]);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
|
||||||
|
|
||||||
|
int skcipher_register_instance(struct crypto_template *tmpl,
|
||||||
|
struct skcipher_instance *inst)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = skcipher_prepare_alg(&inst->alg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(skcipher_register_instance);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("Symmetric key cipher type");
|
MODULE_DESCRIPTION("Symmetric key cipher type");
|
||||||
|
|
450
crypto/tcrypt.c
450
crypto/tcrypt.c
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <crypto/aead.h>
|
#include <crypto/aead.h>
|
||||||
#include <crypto/hash.h>
|
#include <crypto/hash.h>
|
||||||
|
#include <crypto/skcipher.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/fips.h>
|
#include <linux/fips.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -72,7 +73,8 @@ static char *check[] = {
|
||||||
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||||
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
|
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
|
||||||
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
|
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
|
||||||
"lzo", "cts", "zlib", NULL
|
"lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
|
||||||
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
struct tcrypt_result {
|
struct tcrypt_result {
|
||||||
|
@ -91,76 +93,6 @@ static void tcrypt_complete(struct crypto_async_request *req, int err)
|
||||||
complete(&res->completion);
|
complete(&res->completion);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
|
|
||||||
struct scatterlist *sg, int blen, int secs)
|
|
||||||
{
|
|
||||||
unsigned long start, end;
|
|
||||||
int bcount;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
|
||||||
time_before(jiffies, end); bcount++) {
|
|
||||||
if (enc)
|
|
||||||
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
|
|
||||||
else
|
|
||||||
ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk("%d operations in %d seconds (%ld bytes)\n",
|
|
||||||
bcount, secs, (long)bcount * blen);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
|
|
||||||
struct scatterlist *sg, int blen)
|
|
||||||
{
|
|
||||||
unsigned long cycles = 0;
|
|
||||||
int ret = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
local_irq_disable();
|
|
||||||
|
|
||||||
/* Warm-up run. */
|
|
||||||
for (i = 0; i < 4; i++) {
|
|
||||||
if (enc)
|
|
||||||
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
|
|
||||||
else
|
|
||||||
ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The real thing. */
|
|
||||||
for (i = 0; i < 8; i++) {
|
|
||||||
cycles_t start, end;
|
|
||||||
|
|
||||||
start = get_cycles();
|
|
||||||
if (enc)
|
|
||||||
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
|
|
||||||
else
|
|
||||||
ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
|
|
||||||
end = get_cycles();
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
cycles += end - start;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
local_irq_enable();
|
|
||||||
|
|
||||||
if (ret == 0)
|
|
||||||
printk("1 operation in %lu cycles (%d bytes)\n",
|
|
||||||
(cycles + 4) / 8, blen);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int do_one_aead_op(struct aead_request *req, int ret)
|
static inline int do_one_aead_op(struct aead_request *req, int ret)
|
||||||
{
|
{
|
||||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||||
|
@ -454,106 +386,6 @@ out_noxbuf:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
|
|
||||||
struct cipher_speed_template *template,
|
|
||||||
unsigned int tcount, u8 *keysize)
|
|
||||||
{
|
|
||||||
unsigned int ret, i, j, iv_len;
|
|
||||||
const char *key;
|
|
||||||
char iv[128];
|
|
||||||
struct crypto_blkcipher *tfm;
|
|
||||||
struct blkcipher_desc desc;
|
|
||||||
const char *e;
|
|
||||||
u32 *b_size;
|
|
||||||
|
|
||||||
if (enc == ENCRYPT)
|
|
||||||
e = "encryption";
|
|
||||||
else
|
|
||||||
e = "decryption";
|
|
||||||
|
|
||||||
tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
|
|
||||||
|
|
||||||
if (IS_ERR(tfm)) {
|
|
||||||
printk("failed to load transform for %s: %ld\n", algo,
|
|
||||||
PTR_ERR(tfm));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
desc.tfm = tfm;
|
|
||||||
desc.flags = 0;
|
|
||||||
|
|
||||||
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
|
|
||||||
get_driver_name(crypto_blkcipher, tfm), e);
|
|
||||||
|
|
||||||
i = 0;
|
|
||||||
do {
|
|
||||||
|
|
||||||
b_size = block_sizes;
|
|
||||||
do {
|
|
||||||
struct scatterlist sg[TVMEMSIZE];
|
|
||||||
|
|
||||||
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
|
|
||||||
printk("template (%u) too big for "
|
|
||||||
"tvmem (%lu)\n", *keysize + *b_size,
|
|
||||||
TVMEMSIZE * PAGE_SIZE);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk("test %u (%d bit key, %d byte blocks): ", i,
|
|
||||||
*keysize * 8, *b_size);
|
|
||||||
|
|
||||||
memset(tvmem[0], 0xff, PAGE_SIZE);
|
|
||||||
|
|
||||||
/* set key, plain text and IV */
|
|
||||||
key = tvmem[0];
|
|
||||||
for (j = 0; j < tcount; j++) {
|
|
||||||
if (template[j].klen == *keysize) {
|
|
||||||
key = template[j].key;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = crypto_blkcipher_setkey(tfm, key, *keysize);
|
|
||||||
if (ret) {
|
|
||||||
printk("setkey() failed flags=%x\n",
|
|
||||||
crypto_blkcipher_get_flags(tfm));
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
sg_init_table(sg, TVMEMSIZE);
|
|
||||||
sg_set_buf(sg, tvmem[0] + *keysize,
|
|
||||||
PAGE_SIZE - *keysize);
|
|
||||||
for (j = 1; j < TVMEMSIZE; j++) {
|
|
||||||
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
|
|
||||||
memset (tvmem[j], 0xff, PAGE_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
iv_len = crypto_blkcipher_ivsize(tfm);
|
|
||||||
if (iv_len) {
|
|
||||||
memset(&iv, 0xff, iv_len);
|
|
||||||
crypto_blkcipher_set_iv(tfm, iv, iv_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (secs)
|
|
||||||
ret = test_cipher_jiffies(&desc, enc, sg,
|
|
||||||
*b_size, secs);
|
|
||||||
else
|
|
||||||
ret = test_cipher_cycles(&desc, enc, sg,
|
|
||||||
*b_size);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
printk("%s() failed flags=%x\n", e, desc.flags);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
b_size++;
|
|
||||||
i++;
|
|
||||||
} while (*b_size);
|
|
||||||
keysize++;
|
|
||||||
} while (*keysize);
|
|
||||||
|
|
||||||
out:
|
|
||||||
crypto_free_blkcipher(tfm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void test_hash_sg_init(struct scatterlist *sg)
|
static void test_hash_sg_init(struct scatterlist *sg)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -577,6 +409,127 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct test_mb_ahash_data {
|
||||||
|
struct scatterlist sg[TVMEMSIZE];
|
||||||
|
char result[64];
|
||||||
|
struct ahash_request *req;
|
||||||
|
struct tcrypt_result tresult;
|
||||||
|
char *xbuf[XBUFSIZE];
|
||||||
|
};
|
||||||
|
|
||||||
|
static void test_mb_ahash_speed(const char *algo, unsigned int sec,
|
||||||
|
struct hash_speed *speed)
|
||||||
|
{
|
||||||
|
struct test_mb_ahash_data *data;
|
||||||
|
struct crypto_ahash *tfm;
|
||||||
|
unsigned long start, end;
|
||||||
|
unsigned long cycles;
|
||||||
|
unsigned int i, j, k;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
|
||||||
|
if (!data)
|
||||||
|
return;
|
||||||
|
|
||||||
|
tfm = crypto_alloc_ahash(algo, 0, 0);
|
||||||
|
if (IS_ERR(tfm)) {
|
||||||
|
pr_err("failed to load transform for %s: %ld\n",
|
||||||
|
algo, PTR_ERR(tfm));
|
||||||
|
goto free_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < 8; ++i) {
|
||||||
|
if (testmgr_alloc_buf(data[i].xbuf))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
init_completion(&data[i].tresult.completion);
|
||||||
|
|
||||||
|
data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
|
||||||
|
if (!data[i].req) {
|
||||||
|
pr_err("alg: hash: Failed to allocate request for %s\n",
|
||||||
|
algo);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ahash_request_set_callback(data[i].req, 0,
|
||||||
|
tcrypt_complete, &data[i].tresult);
|
||||||
|
test_hash_sg_init(data[i].sg);
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
|
||||||
|
get_driver_name(crypto_ahash, tfm));
|
||||||
|
|
||||||
|
for (i = 0; speed[i].blen != 0; i++) {
|
||||||
|
/* For some reason this only tests digests. */
|
||||||
|
if (speed[i].blen != speed[i].plen)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
|
||||||
|
pr_err("template (%u) too big for tvmem (%lu)\n",
|
||||||
|
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (speed[i].klen)
|
||||||
|
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
|
||||||
|
|
||||||
|
for (k = 0; k < 8; k++)
|
||||||
|
ahash_request_set_crypt(data[k].req, data[k].sg,
|
||||||
|
data[k].result, speed[i].blen);
|
||||||
|
|
||||||
|
pr_info("test%3u "
|
||||||
|
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
|
||||||
|
i, speed[i].blen, speed[i].plen,
|
||||||
|
speed[i].blen / speed[i].plen);
|
||||||
|
|
||||||
|
start = get_cycles();
|
||||||
|
|
||||||
|
for (k = 0; k < 8; k++) {
|
||||||
|
ret = crypto_ahash_digest(data[k].req);
|
||||||
|
if (ret == -EINPROGRESS) {
|
||||||
|
ret = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
|
complete(&data[k].tresult.completion);
|
||||||
|
data[k].tresult.err = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < k; j++) {
|
||||||
|
struct tcrypt_result *tr = &data[j].tresult;
|
||||||
|
|
||||||
|
wait_for_completion(&tr->completion);
|
||||||
|
if (tr->err)
|
||||||
|
ret = tr->err;
|
||||||
|
}
|
||||||
|
|
||||||
|
end = get_cycles();
|
||||||
|
cycles = end - start;
|
||||||
|
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
|
||||||
|
cycles, cycles / (8 * speed[i].blen));
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
pr_err("At least one hashing failed ret=%d\n", ret);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
for (k = 0; k < 8; ++k)
|
||||||
|
ahash_request_free(data[k].req);
|
||||||
|
|
||||||
|
for (k = 0; k < 8; ++k)
|
||||||
|
testmgr_free_buf(data[k].xbuf);
|
||||||
|
|
||||||
|
crypto_free_ahash(tfm);
|
||||||
|
|
||||||
|
free_data:
|
||||||
|
kfree(data);
|
||||||
|
}
|
||||||
|
|
||||||
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
|
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
|
||||||
char *out, int secs)
|
char *out, int secs)
|
||||||
{
|
{
|
||||||
|
@ -812,7 +765,7 @@ static void test_hash_speed(const char *algo, unsigned int secs,
|
||||||
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
|
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
|
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
|
||||||
{
|
{
|
||||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||||
struct tcrypt_result *tr = req->base.data;
|
struct tcrypt_result *tr = req->base.data;
|
||||||
|
@ -825,7 +778,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
|
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
|
||||||
int blen, int secs)
|
int blen, int secs)
|
||||||
{
|
{
|
||||||
unsigned long start, end;
|
unsigned long start, end;
|
||||||
|
@ -836,10 +789,10 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
|
||||||
time_before(jiffies, end); bcount++) {
|
time_before(jiffies, end); bcount++) {
|
||||||
if (enc)
|
if (enc)
|
||||||
ret = do_one_acipher_op(req,
|
ret = do_one_acipher_op(req,
|
||||||
crypto_ablkcipher_encrypt(req));
|
crypto_skcipher_encrypt(req));
|
||||||
else
|
else
|
||||||
ret = do_one_acipher_op(req,
|
ret = do_one_acipher_op(req,
|
||||||
crypto_ablkcipher_decrypt(req));
|
crypto_skcipher_decrypt(req));
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -850,7 +803,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
|
static int test_acipher_cycles(struct skcipher_request *req, int enc,
|
||||||
int blen)
|
int blen)
|
||||||
{
|
{
|
||||||
unsigned long cycles = 0;
|
unsigned long cycles = 0;
|
||||||
|
@ -861,10 +814,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (enc)
|
if (enc)
|
||||||
ret = do_one_acipher_op(req,
|
ret = do_one_acipher_op(req,
|
||||||
crypto_ablkcipher_encrypt(req));
|
crypto_skcipher_encrypt(req));
|
||||||
else
|
else
|
||||||
ret = do_one_acipher_op(req,
|
ret = do_one_acipher_op(req,
|
||||||
crypto_ablkcipher_decrypt(req));
|
crypto_skcipher_decrypt(req));
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -877,10 +830,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
|
||||||
start = get_cycles();
|
start = get_cycles();
|
||||||
if (enc)
|
if (enc)
|
||||||
ret = do_one_acipher_op(req,
|
ret = do_one_acipher_op(req,
|
||||||
crypto_ablkcipher_encrypt(req));
|
crypto_skcipher_encrypt(req));
|
||||||
else
|
else
|
||||||
ret = do_one_acipher_op(req,
|
ret = do_one_acipher_op(req,
|
||||||
crypto_ablkcipher_decrypt(req));
|
crypto_skcipher_decrypt(req));
|
||||||
end = get_cycles();
|
end = get_cycles();
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -897,16 +850,16 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
struct cipher_speed_template *template,
|
struct cipher_speed_template *template,
|
||||||
unsigned int tcount, u8 *keysize)
|
unsigned int tcount, u8 *keysize, bool async)
|
||||||
{
|
{
|
||||||
unsigned int ret, i, j, k, iv_len;
|
unsigned int ret, i, j, k, iv_len;
|
||||||
struct tcrypt_result tresult;
|
struct tcrypt_result tresult;
|
||||||
const char *key;
|
const char *key;
|
||||||
char iv[128];
|
char iv[128];
|
||||||
struct ablkcipher_request *req;
|
struct skcipher_request *req;
|
||||||
struct crypto_ablkcipher *tfm;
|
struct crypto_skcipher *tfm;
|
||||||
const char *e;
|
const char *e;
|
||||||
u32 *b_size;
|
u32 *b_size;
|
||||||
|
|
||||||
|
@ -917,7 +870,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
|
|
||||||
init_completion(&tresult.completion);
|
init_completion(&tresult.completion);
|
||||||
|
|
||||||
tfm = crypto_alloc_ablkcipher(algo, 0, 0);
|
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
|
||||||
|
|
||||||
if (IS_ERR(tfm)) {
|
if (IS_ERR(tfm)) {
|
||||||
pr_err("failed to load transform for %s: %ld\n", algo,
|
pr_err("failed to load transform for %s: %ld\n", algo,
|
||||||
|
@ -926,17 +879,17 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
|
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
|
||||||
get_driver_name(crypto_ablkcipher, tfm), e);
|
get_driver_name(crypto_skcipher, tfm), e);
|
||||||
|
|
||||||
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
|
req = skcipher_request_alloc(tfm, GFP_KERNEL);
|
||||||
if (!req) {
|
if (!req) {
|
||||||
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
|
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
|
||||||
algo);
|
algo);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
tcrypt_complete, &tresult);
|
tcrypt_complete, &tresult);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
do {
|
do {
|
||||||
|
@ -966,12 +919,12 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crypto_ablkcipher_clear_flags(tfm, ~0);
|
crypto_skcipher_clear_flags(tfm, ~0);
|
||||||
|
|
||||||
ret = crypto_ablkcipher_setkey(tfm, key, *keysize);
|
ret = crypto_skcipher_setkey(tfm, key, *keysize);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("setkey() failed flags=%x\n",
|
pr_err("setkey() failed flags=%x\n",
|
||||||
crypto_ablkcipher_get_flags(tfm));
|
crypto_skcipher_get_flags(tfm));
|
||||||
goto out_free_req;
|
goto out_free_req;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -995,11 +948,11 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
|
sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
iv_len = crypto_ablkcipher_ivsize(tfm);
|
iv_len = crypto_skcipher_ivsize(tfm);
|
||||||
if (iv_len)
|
if (iv_len)
|
||||||
memset(&iv, 0xff, iv_len);
|
memset(&iv, 0xff, iv_len);
|
||||||
|
|
||||||
ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
|
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
|
||||||
|
|
||||||
if (secs)
|
if (secs)
|
||||||
ret = test_acipher_jiffies(req, enc,
|
ret = test_acipher_jiffies(req, enc,
|
||||||
|
@ -1010,7 +963,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("%s() failed flags=%x\n", e,
|
pr_err("%s() failed flags=%x\n", e,
|
||||||
crypto_ablkcipher_get_flags(tfm));
|
crypto_skcipher_get_flags(tfm));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
b_size++;
|
b_size++;
|
||||||
|
@ -1020,9 +973,25 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
} while (*keysize);
|
} while (*keysize);
|
||||||
|
|
||||||
out_free_req:
|
out_free_req:
|
||||||
ablkcipher_request_free(req);
|
skcipher_request_free(req);
|
||||||
out:
|
out:
|
||||||
crypto_free_ablkcipher(tfm);
|
crypto_free_skcipher(tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
|
struct cipher_speed_template *template,
|
||||||
|
unsigned int tcount, u8 *keysize)
|
||||||
|
{
|
||||||
|
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
|
||||||
|
struct cipher_speed_template *template,
|
||||||
|
unsigned int tcount, u8 *keysize)
|
||||||
|
{
|
||||||
|
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_available(void)
|
static void test_available(void)
|
||||||
|
@ -1284,6 +1253,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||||
ret += tcrypt_test("crct10dif");
|
ret += tcrypt_test("crct10dif");
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 48:
|
||||||
|
ret += tcrypt_test("sha3-224");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 49:
|
||||||
|
ret += tcrypt_test("sha3-256");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 50:
|
||||||
|
ret += tcrypt_test("sha3-384");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 51:
|
||||||
|
ret += tcrypt_test("sha3-512");
|
||||||
|
break;
|
||||||
|
|
||||||
case 100:
|
case 100:
|
||||||
ret += tcrypt_test("hmac(md5)");
|
ret += tcrypt_test("hmac(md5)");
|
||||||
break;
|
break;
|
||||||
|
@ -1328,6 +1313,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||||
ret += tcrypt_test("hmac(crc32)");
|
ret += tcrypt_test("hmac(crc32)");
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 111:
|
||||||
|
ret += tcrypt_test("hmac(sha3-224)");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 112:
|
||||||
|
ret += tcrypt_test("hmac(sha3-256)");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 113:
|
||||||
|
ret += tcrypt_test("hmac(sha3-384)");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 114:
|
||||||
|
ret += tcrypt_test("hmac(sha3-512)");
|
||||||
|
break;
|
||||||
|
|
||||||
case 150:
|
case 150:
|
||||||
ret += tcrypt_test("ansi_cprng");
|
ret += tcrypt_test("ansi_cprng");
|
||||||
break;
|
break;
|
||||||
|
@ -1406,6 +1407,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||||
speed_template_32_48_64);
|
speed_template_32_48_64);
|
||||||
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
|
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
|
||||||
speed_template_32_48_64);
|
speed_template_32_48_64);
|
||||||
|
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
|
||||||
|
speed_template_16_24_32);
|
||||||
|
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
|
||||||
|
speed_template_16_24_32);
|
||||||
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
|
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
|
||||||
speed_template_16_24_32);
|
speed_template_16_24_32);
|
||||||
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
||||||
|
@ -1691,6 +1696,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||||
test_hash_speed("poly1305", sec, poly1305_speed_template);
|
test_hash_speed("poly1305", sec, poly1305_speed_template);
|
||||||
if (mode > 300 && mode < 400) break;
|
if (mode > 300 && mode < 400) break;
|
||||||
|
|
||||||
|
case 322:
|
||||||
|
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 300 && mode < 400) break;
|
||||||
|
|
||||||
|
case 323:
|
||||||
|
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 300 && mode < 400) break;
|
||||||
|
|
||||||
|
case 324:
|
||||||
|
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 300 && mode < 400) break;
|
||||||
|
|
||||||
|
case 325:
|
||||||
|
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 300 && mode < 400) break;
|
||||||
|
|
||||||
case 399:
|
case 399:
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1770,6 +1791,35 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||||
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
|
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
|
||||||
if (mode > 400 && mode < 500) break;
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
case 418:
|
||||||
|
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
case 419:
|
||||||
|
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
case 420:
|
||||||
|
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
|
||||||
|
case 421:
|
||||||
|
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
case 422:
|
||||||
|
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
case 423:
|
||||||
|
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
|
case 424:
|
||||||
|
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
|
||||||
|
if (mode > 400 && mode < 500) break;
|
||||||
|
|
||||||
case 499:
|
case 499:
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1790,6 +1840,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||||
speed_template_32_48_64);
|
speed_template_32_48_64);
|
||||||
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
|
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
|
||||||
speed_template_32_48_64);
|
speed_template_32_48_64);
|
||||||
|
test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
|
||||||
|
speed_template_16_24_32);
|
||||||
|
test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
|
||||||
|
speed_template_16_24_32);
|
||||||
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
|
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
|
||||||
speed_template_16_24_32);
|
speed_template_16_24_32);
|
||||||
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
|
||||||
|
|
290
crypto/testmgr.c
290
crypto/testmgr.c
|
@ -32,6 +32,7 @@
|
||||||
#include <crypto/rng.h>
|
#include <crypto/rng.h>
|
||||||
#include <crypto/drbg.h>
|
#include <crypto/drbg.h>
|
||||||
#include <crypto/akcipher.h>
|
#include <crypto/akcipher.h>
|
||||||
|
#include <crypto/kpp.h>
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
@ -120,6 +121,11 @@ struct akcipher_test_suite {
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kpp_test_suite {
|
||||||
|
struct kpp_testvec *vecs;
|
||||||
|
unsigned int count;
|
||||||
|
};
|
||||||
|
|
||||||
struct alg_test_desc {
|
struct alg_test_desc {
|
||||||
const char *alg;
|
const char *alg;
|
||||||
int (*test)(const struct alg_test_desc *desc, const char *driver,
|
int (*test)(const struct alg_test_desc *desc, const char *driver,
|
||||||
|
@ -134,6 +140,7 @@ struct alg_test_desc {
|
||||||
struct cprng_test_suite cprng;
|
struct cprng_test_suite cprng;
|
||||||
struct drbg_test_suite drbg;
|
struct drbg_test_suite drbg;
|
||||||
struct akcipher_test_suite akcipher;
|
struct akcipher_test_suite akcipher;
|
||||||
|
struct kpp_test_suite kpp;
|
||||||
} suite;
|
} suite;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1777,8 +1784,135 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_test_rsa(struct crypto_akcipher *tfm,
|
static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
|
||||||
struct akcipher_testvec *vecs)
|
const char *alg)
|
||||||
|
{
|
||||||
|
struct kpp_request *req;
|
||||||
|
void *input_buf = NULL;
|
||||||
|
void *output_buf = NULL;
|
||||||
|
struct tcrypt_result result;
|
||||||
|
unsigned int out_len_max;
|
||||||
|
int err = -ENOMEM;
|
||||||
|
struct scatterlist src, dst;
|
||||||
|
|
||||||
|
req = kpp_request_alloc(tfm, GFP_KERNEL);
|
||||||
|
if (!req)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
init_completion(&result.completion);
|
||||||
|
|
||||||
|
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
|
||||||
|
if (err < 0)
|
||||||
|
goto free_req;
|
||||||
|
|
||||||
|
out_len_max = crypto_kpp_maxsize(tfm);
|
||||||
|
output_buf = kzalloc(out_len_max, GFP_KERNEL);
|
||||||
|
if (!output_buf) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto free_req;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Use appropriate parameter as base */
|
||||||
|
kpp_request_set_input(req, NULL, 0);
|
||||||
|
sg_init_one(&dst, output_buf, out_len_max);
|
||||||
|
kpp_request_set_output(req, &dst, out_len_max);
|
||||||
|
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
|
tcrypt_complete, &result);
|
||||||
|
|
||||||
|
/* Compute public key */
|
||||||
|
err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
|
||||||
|
if (err) {
|
||||||
|
pr_err("alg: %s: generate public key test failed. err %d\n",
|
||||||
|
alg, err);
|
||||||
|
goto free_output;
|
||||||
|
}
|
||||||
|
/* Verify calculated public key */
|
||||||
|
if (memcmp(vec->expected_a_public, sg_virt(req->dst),
|
||||||
|
vec->expected_a_public_size)) {
|
||||||
|
pr_err("alg: %s: generate public key test failed. Invalid output\n",
|
||||||
|
alg);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto free_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Calculate shared secret key by using counter part (b) public key. */
|
||||||
|
input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
|
||||||
|
if (!input_buf) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto free_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(input_buf, vec->b_public, vec->b_public_size);
|
||||||
|
sg_init_one(&src, input_buf, vec->b_public_size);
|
||||||
|
sg_init_one(&dst, output_buf, out_len_max);
|
||||||
|
kpp_request_set_input(req, &src, vec->b_public_size);
|
||||||
|
kpp_request_set_output(req, &dst, out_len_max);
|
||||||
|
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
|
tcrypt_complete, &result);
|
||||||
|
err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
|
||||||
|
if (err) {
|
||||||
|
pr_err("alg: %s: compute shard secret test failed. err %d\n",
|
||||||
|
alg, err);
|
||||||
|
goto free_all;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* verify shared secret from which the user will derive
|
||||||
|
* secret key by executing whatever hash it has chosen
|
||||||
|
*/
|
||||||
|
if (memcmp(vec->expected_ss, sg_virt(req->dst),
|
||||||
|
vec->expected_ss_size)) {
|
||||||
|
pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
|
||||||
|
alg);
|
||||||
|
err = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
free_all:
|
||||||
|
kfree(input_buf);
|
||||||
|
free_output:
|
||||||
|
kfree(output_buf);
|
||||||
|
free_req:
|
||||||
|
kpp_request_free(req);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_kpp(struct crypto_kpp *tfm, const char *alg,
|
||||||
|
struct kpp_testvec *vecs, unsigned int tcount)
|
||||||
|
{
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
for (i = 0; i < tcount; i++) {
|
||||||
|
ret = do_test_kpp(tfm, vecs++, alg);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("alg: %s: test failed on vector %d, err=%d\n",
|
||||||
|
alg, i + 1, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
|
||||||
|
u32 type, u32 mask)
|
||||||
|
{
|
||||||
|
struct crypto_kpp *tfm;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
|
||||||
|
if (IS_ERR(tfm)) {
|
||||||
|
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
|
||||||
|
driver, PTR_ERR(tfm));
|
||||||
|
return PTR_ERR(tfm);
|
||||||
|
}
|
||||||
|
if (desc->suite.kpp.vecs)
|
||||||
|
err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs,
|
||||||
|
desc->suite.kpp.count);
|
||||||
|
|
||||||
|
crypto_free_kpp(tfm);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int test_akcipher_one(struct crypto_akcipher *tfm,
|
||||||
|
struct akcipher_testvec *vecs)
|
||||||
{
|
{
|
||||||
char *xbuf[XBUFSIZE];
|
char *xbuf[XBUFSIZE];
|
||||||
struct akcipher_request *req;
|
struct akcipher_request *req;
|
||||||
|
@ -1807,6 +1941,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
|
||||||
if (err)
|
if (err)
|
||||||
goto free_req;
|
goto free_req;
|
||||||
|
|
||||||
|
err = -ENOMEM;
|
||||||
out_len_max = crypto_akcipher_maxsize(tfm);
|
out_len_max = crypto_akcipher_maxsize(tfm);
|
||||||
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
|
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
|
||||||
if (!outbuf_enc)
|
if (!outbuf_enc)
|
||||||
|
@ -1829,17 +1964,18 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
|
||||||
/* Run RSA encrypt - c = m^e mod n;*/
|
/* Run RSA encrypt - c = m^e mod n;*/
|
||||||
err = wait_async_op(&result, crypto_akcipher_encrypt(req));
|
err = wait_async_op(&result, crypto_akcipher_encrypt(req));
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("alg: rsa: encrypt test failed. err %d\n", err);
|
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
|
||||||
goto free_all;
|
goto free_all;
|
||||||
}
|
}
|
||||||
if (req->dst_len != vecs->c_size) {
|
if (req->dst_len != vecs->c_size) {
|
||||||
pr_err("alg: rsa: encrypt test failed. Invalid output len\n");
|
pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free_all;
|
goto free_all;
|
||||||
}
|
}
|
||||||
/* verify that encrypted message is equal to expected */
|
/* verify that encrypted message is equal to expected */
|
||||||
if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
|
if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
|
||||||
pr_err("alg: rsa: encrypt test failed. Invalid output\n");
|
pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
|
||||||
|
hexdump(outbuf_enc, vecs->c_size);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free_all;
|
goto free_all;
|
||||||
}
|
}
|
||||||
|
@ -1867,18 +2003,22 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
|
||||||
/* Run RSA decrypt - m = c^d mod n;*/
|
/* Run RSA decrypt - m = c^d mod n;*/
|
||||||
err = wait_async_op(&result, crypto_akcipher_decrypt(req));
|
err = wait_async_op(&result, crypto_akcipher_decrypt(req));
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("alg: rsa: decrypt test failed. err %d\n", err);
|
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
|
||||||
goto free_all;
|
goto free_all;
|
||||||
}
|
}
|
||||||
out_len = req->dst_len;
|
out_len = req->dst_len;
|
||||||
if (out_len != vecs->m_size) {
|
if (out_len < vecs->m_size) {
|
||||||
pr_err("alg: rsa: decrypt test failed. Invalid output len\n");
|
pr_err("alg: akcipher: decrypt test failed. "
|
||||||
|
"Invalid output len %u\n", out_len);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto free_all;
|
goto free_all;
|
||||||
}
|
}
|
||||||
/* verify that decrypted message is equal to the original msg */
|
/* verify that decrypted message is equal to the original msg */
|
||||||
if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) {
|
if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
|
||||||
pr_err("alg: rsa: decrypt test failed. Invalid output\n");
|
memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
|
||||||
|
vecs->m_size)) {
|
||||||
|
pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
|
||||||
|
hexdump(outbuf_dec, out_len);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
}
|
}
|
||||||
free_all:
|
free_all:
|
||||||
|
@ -1891,28 +2031,22 @@ free_xbuf:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs,
|
|
||||||
unsigned int tcount)
|
|
||||||
{
|
|
||||||
int ret, i;
|
|
||||||
|
|
||||||
for (i = 0; i < tcount; i++) {
|
|
||||||
ret = do_test_rsa(tfm, vecs++);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("alg: rsa: test failed on vector %d, err=%d\n",
|
|
||||||
i + 1, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
|
static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
|
||||||
struct akcipher_testvec *vecs, unsigned int tcount)
|
struct akcipher_testvec *vecs, unsigned int tcount)
|
||||||
{
|
{
|
||||||
if (strncmp(alg, "rsa", 3) == 0)
|
const char *algo =
|
||||||
return test_rsa(tfm, vecs, tcount);
|
crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
for (i = 0; i < tcount; i++) {
|
||||||
|
ret = test_akcipher_one(tfm, vecs++);
|
||||||
|
if (!ret)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
pr_err("alg: akcipher: test %d failed for %s, err=%d\n",
|
||||||
|
i + 1, algo, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2728,6 +2862,16 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "dh",
|
||||||
|
.test = alg_test_kpp,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.kpp = {
|
||||||
|
.vecs = dh_tv_template,
|
||||||
|
.count = DH_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
.alg = "digest_null",
|
.alg = "digest_null",
|
||||||
.test = alg_test_null,
|
.test = alg_test_null,
|
||||||
|
@ -3156,6 +3300,16 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "ecdh",
|
||||||
|
.test = alg_test_kpp,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.kpp = {
|
||||||
|
.vecs = ecdh_tv_template,
|
||||||
|
.count = ECDH_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
.alg = "gcm(aes)",
|
.alg = "gcm(aes)",
|
||||||
.test = alg_test_aead,
|
.test = alg_test_aead,
|
||||||
|
@ -3248,6 +3402,46 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||||
.count = HMAC_SHA256_TEST_VECTORS
|
.count = HMAC_SHA256_TEST_VECTORS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "hmac(sha3-224)",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = hmac_sha3_224_tv_template,
|
||||||
|
.count = HMAC_SHA3_224_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "hmac(sha3-256)",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = hmac_sha3_256_tv_template,
|
||||||
|
.count = HMAC_SHA3_256_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "hmac(sha3-384)",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = hmac_sha3_384_tv_template,
|
||||||
|
.count = HMAC_SHA3_384_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "hmac(sha3-512)",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = hmac_sha3_512_tv_template,
|
||||||
|
.count = HMAC_SHA3_512_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
.alg = "hmac(sha384)",
|
.alg = "hmac(sha384)",
|
||||||
.test = alg_test_hash,
|
.test = alg_test_hash,
|
||||||
|
@ -3658,6 +3852,46 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||||
.count = SHA256_TEST_VECTORS
|
.count = SHA256_TEST_VECTORS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "sha3-224",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = sha3_224_tv_template,
|
||||||
|
.count = SHA3_224_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "sha3-256",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = sha3_256_tv_template,
|
||||||
|
.count = SHA3_256_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "sha3-384",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = sha3_384_tv_template,
|
||||||
|
.count = SHA3_384_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
.alg = "sha3-512",
|
||||||
|
.test = alg_test_hash,
|
||||||
|
.fips_allowed = 1,
|
||||||
|
.suite = {
|
||||||
|
.hash = {
|
||||||
|
.vecs = sha3_512_tv_template,
|
||||||
|
.count = SHA3_512_TEST_VECTORS
|
||||||
|
}
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
.alg = "sha384",
|
.alg = "sha384",
|
||||||
.test = alg_test_hash,
|
.test = alg_test_hash,
|
||||||
|
|
1036
crypto/testmgr.h
1036
crypto/testmgr.h
File diff suppressed because it is too large
Load Diff
|
@ -90,7 +90,7 @@ config HW_RANDOM_BCM63XX
|
||||||
|
|
||||||
config HW_RANDOM_BCM2835
|
config HW_RANDOM_BCM2835
|
||||||
tristate "Broadcom BCM2835 Random Number Generator support"
|
tristate "Broadcom BCM2835 Random Number Generator support"
|
||||||
depends on ARCH_BCM2835
|
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
|
||||||
default HW_RANDOM
|
default HW_RANDOM
|
||||||
---help---
|
---help---
|
||||||
This driver provides kernel-side support for the Random Number
|
This driver provides kernel-side support for the Random Number
|
||||||
|
@ -396,6 +396,20 @@ config HW_RANDOM_PIC32
|
||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
config HW_RANDOM_MESON
|
||||||
|
tristate "Amlogic Meson Random Number Generator support"
|
||||||
|
depends on HW_RANDOM
|
||||||
|
depends on ARCH_MESON || COMPILE_TEST
|
||||||
|
default y
|
||||||
|
---help---
|
||||||
|
This driver provides kernel-side support for the Random Number
|
||||||
|
Generator hardware found on Amlogic Meson SoCs.
|
||||||
|
|
||||||
|
To compile this driver as a module, choose M here. the
|
||||||
|
module will be called meson-rng.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
endif # HW_RANDOM
|
endif # HW_RANDOM
|
||||||
|
|
||||||
config UML_RANDOM
|
config UML_RANDOM
|
||||||
|
|
|
@ -34,3 +34,4 @@ obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
|
||||||
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
|
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
|
||||||
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
|
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
|
||||||
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
|
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
|
||||||
|
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#define RNG_CTRL 0x0
|
#define RNG_CTRL 0x0
|
||||||
#define RNG_STATUS 0x4
|
#define RNG_STATUS 0x4
|
||||||
#define RNG_DATA 0x8
|
#define RNG_DATA 0x8
|
||||||
|
#define RNG_INT_MASK 0x10
|
||||||
|
|
||||||
/* enable rng */
|
/* enable rng */
|
||||||
#define RNG_RBGEN 0x1
|
#define RNG_RBGEN 0x1
|
||||||
|
@ -26,10 +27,24 @@
|
||||||
/* the initial numbers generated are "less random" so will be discarded */
|
/* the initial numbers generated are "less random" so will be discarded */
|
||||||
#define RNG_WARMUP_COUNT 0x40000
|
#define RNG_WARMUP_COUNT 0x40000
|
||||||
|
|
||||||
|
#define RNG_INT_OFF 0x1
|
||||||
|
|
||||||
|
static void __init nsp_rng_init(void __iomem *base)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* mask the interrupt */
|
||||||
|
val = readl(base + RNG_INT_MASK);
|
||||||
|
val |= RNG_INT_OFF;
|
||||||
|
writel(val, base + RNG_INT_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
|
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
|
||||||
bool wait)
|
bool wait)
|
||||||
{
|
{
|
||||||
void __iomem *rng_base = (void __iomem *)rng->priv;
|
void __iomem *rng_base = (void __iomem *)rng->priv;
|
||||||
|
u32 max_words = max / sizeof(u32);
|
||||||
|
u32 num_words, count;
|
||||||
|
|
||||||
while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
|
while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
|
||||||
if (!wait)
|
if (!wait)
|
||||||
|
@ -37,8 +52,14 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
*(u32 *)buf = __raw_readl(rng_base + RNG_DATA);
|
num_words = readl(rng_base + RNG_STATUS) >> 24;
|
||||||
return sizeof(u32);
|
if (num_words > max_words)
|
||||||
|
num_words = max_words;
|
||||||
|
|
||||||
|
for (count = 0; count < num_words; count++)
|
||||||
|
((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
|
||||||
|
|
||||||
|
return num_words * sizeof(u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct hwrng bcm2835_rng_ops = {
|
static struct hwrng bcm2835_rng_ops = {
|
||||||
|
@ -46,10 +67,19 @@ static struct hwrng bcm2835_rng_ops = {
|
||||||
.read = bcm2835_rng_read,
|
.read = bcm2835_rng_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct of_device_id bcm2835_rng_of_match[] = {
|
||||||
|
{ .compatible = "brcm,bcm2835-rng"},
|
||||||
|
{ .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
|
||||||
|
{ .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
static int bcm2835_rng_probe(struct platform_device *pdev)
|
static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
|
void (*rng_setup)(void __iomem *base);
|
||||||
|
const struct of_device_id *rng_id;
|
||||||
void __iomem *rng_base;
|
void __iomem *rng_base;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -61,6 +91,15 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
bcm2835_rng_ops.priv = (unsigned long)rng_base;
|
bcm2835_rng_ops.priv = (unsigned long)rng_base;
|
||||||
|
|
||||||
|
rng_id = of_match_node(bcm2835_rng_of_match, np);
|
||||||
|
if (!rng_id)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Check for rng init function, execute it */
|
||||||
|
rng_setup = rng_id->data;
|
||||||
|
if (rng_setup)
|
||||||
|
rng_setup(rng_base);
|
||||||
|
|
||||||
/* set warm-up count & enable */
|
/* set warm-up count & enable */
|
||||||
__raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
|
__raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
|
||||||
__raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
|
__raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
|
||||||
|
@ -90,10 +129,6 @@ static int bcm2835_rng_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id bcm2835_rng_of_match[] = {
|
|
||||||
{ .compatible = "brcm,bcm2835-rng", },
|
|
||||||
{},
|
|
||||||
};
|
|
||||||
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
|
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
|
||||||
|
|
||||||
static struct platform_driver bcm2835_rng_driver = {
|
static struct platform_driver bcm2835_rng_driver = {
|
||||||
|
|
|
@ -45,12 +45,12 @@ struct exynos_rng {
|
||||||
|
|
||||||
static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
|
static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
|
||||||
{
|
{
|
||||||
return __raw_readl(rng->mem + offset);
|
return readl_relaxed(rng->mem + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
|
static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
|
||||||
{
|
{
|
||||||
__raw_writel(val, rng->mem + offset);
|
writel_relaxed(val, rng->mem + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exynos_rng_configure(struct exynos_rng *exynos_rng)
|
static int exynos_rng_configure(struct exynos_rng *exynos_rng)
|
||||||
|
|
|
@ -0,0 +1,131 @@
|
||||||
|
/*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 BayLibre, SAS.
|
||||||
|
* Author: Neil Armstrong <narmstrong@baylibre.com>
|
||||||
|
* Copyright (C) 2014 Amlogic, Inc.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||||
|
* The full GNU General Public License is included in this distribution
|
||||||
|
* in the file called COPYING.
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright (c) 2016 BayLibre, SAS.
|
||||||
|
* Author: Neil Armstrong <narmstrong@baylibre.com>
|
||||||
|
* Copyright (C) 2014 Amlogic, Inc.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/io.h>
|
||||||
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/hw_random.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
|
||||||
|
#define RNG_DATA 0x00
|
||||||
|
|
||||||
|
struct meson_rng_data {
|
||||||
|
void __iomem *base;
|
||||||
|
struct platform_device *pdev;
|
||||||
|
struct hwrng rng;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
|
||||||
|
{
|
||||||
|
struct meson_rng_data *data =
|
||||||
|
container_of(rng, struct meson_rng_data, rng);
|
||||||
|
|
||||||
|
if (max < sizeof(u32))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
*(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
|
||||||
|
|
||||||
|
return sizeof(u32);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int meson_rng_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct meson_rng_data *data;
|
||||||
|
struct resource *res;
|
||||||
|
|
||||||
|
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||||
|
if (!data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
data->pdev = pdev;
|
||||||
|
|
||||||
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
data->base = devm_ioremap_resource(dev, res);
|
||||||
|
if (IS_ERR(data->base))
|
||||||
|
return PTR_ERR(data->base);
|
||||||
|
|
||||||
|
data->rng.name = pdev->name;
|
||||||
|
data->rng.read = meson_rng_read;
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, data);
|
||||||
|
|
||||||
|
return devm_hwrng_register(dev, &data->rng);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct of_device_id meson_rng_of_match[] = {
|
||||||
|
{ .compatible = "amlogic,meson-rng", },
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct platform_driver meson_rng_driver = {
|
||||||
|
.probe = meson_rng_probe,
|
||||||
|
.driver = {
|
||||||
|
.name = "meson-rng",
|
||||||
|
.of_match_table = meson_rng_of_match,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
module_platform_driver(meson_rng_driver);
|
||||||
|
|
||||||
|
MODULE_ALIAS("platform:meson-rng");
|
||||||
|
MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
|
||||||
|
MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
|
||||||
|
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
|
||||||
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_enable(&pdev->dev);
|
pm_runtime_enable(&pdev->dev);
|
||||||
pm_runtime_get_sync(&pdev->dev);
|
ret = pm_runtime_get_sync(&pdev->dev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
|
||||||
|
pm_runtime_put_noidle(&pdev->dev);
|
||||||
|
goto err_ioremap;
|
||||||
|
}
|
||||||
|
|
||||||
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
|
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
|
||||||
get_omap_rng_device_details(priv);
|
get_omap_rng_device_details(priv);
|
||||||
|
@ -435,8 +440,15 @@ static int __maybe_unused omap_rng_suspend(struct device *dev)
|
||||||
static int __maybe_unused omap_rng_resume(struct device *dev)
|
static int __maybe_unused omap_rng_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct omap_rng_dev *priv = dev_get_drvdata(dev);
|
struct omap_rng_dev *priv = dev_get_drvdata(dev);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = pm_runtime_get_sync(dev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
|
||||||
|
pm_runtime_put_noidle(dev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
pm_runtime_get_sync(dev);
|
|
||||||
priv->pdata->init(priv);
|
priv->pdata->init(priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue