Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Add speed testing on 1420-byte blocks for networking

  Algorithms:
   - Improve performance of chacha on ARM for network packets
   - Improve performance of aegis128 on ARM for network packets

  Drivers:
   - Add support for Keem Bay OCS AES/SM4
   - Add support for QAT 4xxx devices
   - Enable crypto-engine retry mechanism in caam
   - Enable support for crypto engine on sdm845 in qce
   - Add HiSilicon PRNG driver support"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (161 commits)
  crypto: qat - add capability detection logic in qat_4xxx
  crypto: qat - add AES-XTS support for QAT GEN4 devices
  crypto: qat - add AES-CTR support for QAT GEN4 devices
  crypto: atmel-i2c - select CONFIG_BITREVERSE
  crypto: hisilicon/trng - replace atomic_add_return()
  crypto: keembay - Add support for Keem Bay OCS AES/SM4
  dt-bindings: Add Keem Bay OCS AES bindings
  crypto: aegis128 - avoid spurious references crypto_aegis128_update_simd
  crypto: seed - remove trailing semicolon in macro definition
  crypto: x86/poly1305 - Use TEST %reg,%reg instead of CMP $0,%reg
  crypto: x86/sha512 - Use TEST %reg,%reg instead of CMP $0,%reg
  crypto: aesni - Use TEST %reg,%reg instead of CMP $0,%reg
  crypto: cpt - Fix sparse warnings in cptpf
  hwrng: ks-sa - Add dependency on IOMEM and OF
  crypto: lib/blake2s - Move selftest prototype into header file
  crypto: arm/aes-ce - work around Cortex-A57/A72 silion errata
  crypto: ecdh - avoid unaligned accesses in ecdh_set_secret()
  crypto: ccree - rework cache parameters handling
  crypto: cavium - Use dma_set_mask_and_coherent to simplify code
  crypto: marvell/octeontx - Use dma_set_mask_and_coherent to simplify code
  ...
This commit is contained in:
Linus Torvalds 2020-12-14 12:18:19 -08:00
commit 9e4b0d55d8
264 changed files with 8433 additions and 1998 deletions

View File

@ -0,0 +1,45 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/crypto/intel,keembay-ocs-aes.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Intel Keem Bay OCS AES Device Tree Bindings
maintainers:
- Daniele Alessandrelli <daniele.alessandrelli@intel.com>
description:
The Intel Keem Bay Offload and Crypto Subsystem (OCS) AES engine provides
hardware-accelerated AES/SM4 encryption/decryption.
properties:
compatible:
const: intel,keembay-ocs-aes
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
required:
- compatible
- reg
- interrupts
- clocks
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
crypto@30008000 {
compatible = "intel,keembay-ocs-aes";
reg = <0x30008000 0x1000>;
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk 95>;
};

View File

@ -8016,7 +8016,7 @@ F: drivers/staging/hikey9xx/
HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
M: Zaibo Xu <xuzaibo@huawei.com>
S: Maintained
F: drivers/char/hw_random/hisi-trng-v2.c
F: drivers/crypto/hisilicon/trng/trng.c
HISILICON V3XX SPI NOR FLASH Controller Driver
M: John Garry <john.garry@huawei.com>
@ -8975,13 +8975,23 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
INTEL KEEMBAY DRM DRIVER
INTEL KEEM BAY DRM DRIVER
M: Anitha Chrisanthus <anitha.chrisanthus@intel.com>
M: Edmund Dea <edmund.j.dea@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/display/intel,kmb_display.yaml
F: drivers/gpu/drm/kmb/
INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER
M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml
F: drivers/crypto/keembay/Kconfig
F: drivers/crypto/keembay/Makefile
F: drivers/crypto/keembay/keembay-ocs-aes-core.c
F: drivers/crypto/keembay/ocs-aes.c
F: drivers/crypto/keembay/ocs-aes.h
INTEL MANAGEMENT ENGINE (mei)
M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org

View File

@ -386,20 +386,32 @@ ENTRY(ce_aes_ctr_encrypt)
.Lctrloop4x:
subs r4, r4, #4
bmi .Lctr1x
add r6, r6, #1
/*
* NOTE: the sequence below has been carefully tweaked to avoid
* a silicon erratum that exists in Cortex-A57 (#1742098) and
* Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs
* may produce an incorrect result if they take their input from a
* register of which a single 32-bit lane has been updated the last
* time it was modified. To work around this, the lanes of registers
* q0-q3 below are not manipulated individually, and the different
* counter values are prepared by successive manipulations of q7.
*/
add ip, r6, #1
vmov q0, q7
rev ip, ip
add lr, r6, #2
vmov s31, ip @ set lane 3 of q1 via q7
add ip, r6, #3
rev lr, lr
vmov q1, q7
rev ip, r6
add r6, r6, #1
vmov s31, lr @ set lane 3 of q2 via q7
rev ip, ip
vmov q2, q7
vmov s7, ip
rev ip, r6
add r6, r6, #1
vmov s31, ip @ set lane 3 of q3 via q7
add r6, r6, #4
vmov q3, q7
vmov s11, ip
rev ip, r6
add r6, r6, #1
vmov s15, ip
vld1.8 {q4-q5}, [r1]!
vld1.8 {q6}, [r1]!
vld1.8 {q15}, [r1]!

View File

@ -19,7 +19,7 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)-all");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
@ -191,7 +191,8 @@ static int cbc_init(struct crypto_skcipher *tfm)
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int reqsize;
ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->enc_tfm))
return PTR_ERR(ctx->enc_tfm);
@ -441,7 +442,8 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_flags = CRYPTO_ALG_INTERNAL |
CRYPTO_ALG_NEED_FALLBACK,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,

View File

@ -23,7 +23,7 @@
asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
int nrounds);
asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
int nrounds);
int nrounds, unsigned int nbytes);
asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds);
asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
@ -42,24 +42,24 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
{
u8 buf[CHACHA_BLOCK_SIZE];
while (bytes >= CHACHA_BLOCK_SIZE * 4) {
chacha_4block_xor_neon(state, dst, src, nrounds);
bytes -= CHACHA_BLOCK_SIZE * 4;
src += CHACHA_BLOCK_SIZE * 4;
dst += CHACHA_BLOCK_SIZE * 4;
state[12] += 4;
}
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block_xor_neon(state, dst, src, nrounds);
bytes -= CHACHA_BLOCK_SIZE;
src += CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
state[12]++;
while (bytes > CHACHA_BLOCK_SIZE) {
unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U);
chacha_4block_xor_neon(state, dst, src, nrounds, l);
bytes -= l;
src += l;
dst += l;
state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
}
if (bytes) {
memcpy(buf, src, bytes);
chacha_block_xor_neon(state, buf, buf, nrounds);
memcpy(dst, buf, bytes);
const u8 *s = src;
u8 *d = dst;
if (bytes != CHACHA_BLOCK_SIZE)
s = d = memcpy(buf, src, bytes);
chacha_block_xor_neon(state, d, s, nrounds);
if (d != dst)
memcpy(dst, buf, bytes);
}
}

View File

@ -47,6 +47,7 @@
*/
#include <linux/linkage.h>
#include <asm/cache.h>
.text
.fpu neon
@ -205,7 +206,7 @@ ENDPROC(hchacha_block_neon)
.align 5
ENTRY(chacha_4block_xor_neon)
push {r4-r5}
push {r4, lr}
mov r4, sp // preserve the stack pointer
sub ip, sp, #0x20 // allocate a 32 byte buffer
bic ip, ip, #0x1f // aligned to 32 bytes
@ -229,10 +230,10 @@ ENTRY(chacha_4block_xor_neon)
vld1.32 {q0-q1}, [r0]
vld1.32 {q2-q3}, [ip]
adr r5, .Lctrinc
adr lr, .Lctrinc
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
vld1.32 {q4}, [r5, :128]
vld1.32 {q4}, [lr, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
vdup.32 q11, d5[1]
@ -455,7 +456,7 @@ ENTRY(chacha_4block_xor_neon)
// Re-interleave the words in the first two rows of each block (x0..7).
// Also add the counter values 0-3 to x12[0-3].
vld1.32 {q8}, [r5, :128] // load counter values 0-3
vld1.32 {q8}, [lr, :128] // load counter values 0-3
vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
@ -493,6 +494,8 @@ ENTRY(chacha_4block_xor_neon)
// Re-interleave the words in the last two rows of each block (x8..15).
vld1.32 {q8-q9}, [sp, :256]
mov sp, r4 // restore original stack pointer
ldr r4, [r4, #8] // load number of bytes
vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
@ -520,41 +523,121 @@ ENTRY(chacha_4block_xor_neon)
// XOR the rest of the data with the keystream
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #96
veor q0, q0, q8
veor q1, q1, q12
ble .Lle96
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q2
veor q1, q1, q6
ble .Lle128
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q10
veor q1, q1, q14
ble .Lle160
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q4
veor q1, q1, q5
ble .Lle192
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q9
veor q1, q1, q13
ble .Lle224
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]!
subs r4, r4, #32
veor q0, q0, q3
veor q1, q1, q7
blt .Llt256
.Lout:
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
mov sp, r4 // restore original stack pointer
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
pop {r4-r5}
bx lr
pop {r4, pc}
.Lle192:
vmov q4, q9
vmov q5, q13
.Lle160:
// nothing to do
.Lfinalblock:
// Process the final block if processing less than 4 full blocks.
// Entered with 32 bytes of ChaCha cipher stream in q4-q5, and the
// previous 32 byte output block that still needs to be written at
// [r1] in q0-q1.
beq .Lfullblock
.Lpartialblock:
adr lr, .Lpermute + 32
add r2, r2, r4
add lr, lr, r4
add r4, r4, r1
vld1.8 {q2-q3}, [lr]
vld1.8 {q6-q7}, [r2]
add r4, r4, #32
vtbl.8 d4, {q4-q5}, d4
vtbl.8 d5, {q4-q5}, d5
vtbl.8 d6, {q4-q5}, d6
vtbl.8 d7, {q4-q5}, d7
veor q6, q6, q2
veor q7, q7, q3
vst1.8 {q6-q7}, [r4] // overlapping stores
vst1.8 {q0-q1}, [r1]
pop {r4, pc}
.Lfullblock:
vmov q11, q4
vmov q15, q5
b .Lout
.Lle96:
vmov q4, q2
vmov q5, q6
b .Lfinalblock
.Lle128:
vmov q4, q10
vmov q5, q14
b .Lfinalblock
.Lle224:
vmov q4, q3
vmov q5, q7
b .Lfinalblock
.Llt256:
vmov q4, q11
vmov q5, q15
b .Lpartialblock
ENDPROC(chacha_4block_xor_neon)
.align L1_CACHE_SHIFT
.Lpermute:
.byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
.byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
.byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
.byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
.byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
.byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
.byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
.byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f

View File

@ -7,7 +7,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>

View File

@ -3,7 +3,7 @@
#define ASM_ARM_CRYPTO_SHA1_H
#include <linux/crypto.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len);

View File

@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>

View File

@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/neon.h>
#include <asm/simd.h>

View File

@ -7,7 +7,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>

View File

@ -17,7 +17,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <asm/simd.h>
#include <asm/neon.h>

View File

@ -13,7 +13,7 @@
#include <crypto/internal/simd.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/simd.h>

View File

@ -6,7 +6,7 @@
*/
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
#include <linux/module.h>

View File

@ -7,7 +7,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
#include <linux/module.h>

View File

@ -1082,6 +1082,7 @@ CONFIG_CRYPTO_DEV_CCREE=m
CONFIG_CRYPTO_DEV_HISI_SEC2=m
CONFIG_CRYPTO_DEV_HISI_ZIP=m
CONFIG_CRYPTO_DEV_HISI_HPRE=m
CONFIG_CRYPTO_DEV_HISI_TRNG=m
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y

View File

@ -10,7 +10,7 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>

View File

@ -195,7 +195,6 @@ SYM_FUNC_START(chacha_4block_xor_neon)
adr_l x10, .Lpermute
and x5, x4, #63
add x10, x10, x5
add x11, x10, #64
//
// This function encrypts four consecutive ChaCha blocks by loading
@ -645,11 +644,11 @@ CPU_BE( rev a15, a15 )
zip2 v31.4s, v14.4s, v15.4s
eor a15, a15, w9
mov x3, #64
add x3, x2, x4
sub x3, x3, #128 // start of last block
subs x5, x4, #128
add x6, x5, x2
csel x3, x3, xzr, ge
csel x2, x2, x6, ge
csel x2, x2, x3, ge
// interleave 64-bit words in state n, n+2
zip1 v0.2d, v16.2d, v18.2d
@ -658,13 +657,10 @@ CPU_BE( rev a15, a15 )
zip1 v8.2d, v17.2d, v19.2d
zip2 v12.2d, v17.2d, v19.2d
stp a2, a3, [x1, #-56]
ld1 {v16.16b-v19.16b}, [x2], x3
subs x6, x4, #192
ccmp x3, xzr, #4, lt
add x7, x6, x2
csel x3, x3, xzr, eq
csel x2, x2, x7, eq
ld1 {v16.16b-v19.16b}, [x2], #64
csel x2, x2, x3, ge
zip1 v1.2d, v20.2d, v22.2d
zip2 v5.2d, v20.2d, v22.2d
@ -672,13 +668,10 @@ CPU_BE( rev a15, a15 )
zip1 v9.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
stp a6, a7, [x1, #-40]
ld1 {v20.16b-v23.16b}, [x2], x3
subs x7, x4, #256
ccmp x3, xzr, #4, lt
add x8, x7, x2
csel x3, x3, xzr, eq
csel x2, x2, x8, eq
ld1 {v20.16b-v23.16b}, [x2], #64
csel x2, x2, x3, ge
zip1 v2.2d, v24.2d, v26.2d
zip2 v6.2d, v24.2d, v26.2d
@ -686,12 +679,10 @@ CPU_BE( rev a15, a15 )
zip1 v10.2d, v25.2d, v27.2d
zip2 v14.2d, v25.2d, v27.2d
stp a10, a11, [x1, #-24]
ld1 {v24.16b-v27.16b}, [x2], x3
subs x8, x4, #320
ccmp x3, xzr, #4, lt
add x9, x8, x2
csel x2, x2, x9, eq
ld1 {v24.16b-v27.16b}, [x2], #64
csel x2, x2, x3, ge
zip1 v3.2d, v28.2d, v30.2d
zip2 v7.2d, v28.2d, v30.2d
@ -699,151 +690,105 @@ CPU_BE( rev a15, a15 )
zip1 v11.2d, v29.2d, v31.2d
zip2 v15.2d, v29.2d, v31.2d
stp a14, a15, [x1, #-8]
tbnz x5, #63, .Lt128
ld1 {v28.16b-v31.16b}, [x2]
// xor with corresponding input, write to output
tbnz x5, #63, 0f
eor v16.16b, v16.16b, v0.16b
eor v17.16b, v17.16b, v1.16b
eor v18.16b, v18.16b, v2.16b
eor v19.16b, v19.16b, v3.16b
st1 {v16.16b-v19.16b}, [x1], #64
cbz x5, .Lout
tbnz x6, #63, 1f
tbnz x6, #63, .Lt192
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v6.16b
eor v23.16b, v23.16b, v7.16b
st1 {v20.16b-v23.16b}, [x1], #64
cbz x6, .Lout
tbnz x7, #63, 2f
st1 {v16.16b-v19.16b}, [x1], #64
tbnz x7, #63, .Lt256
eor v24.16b, v24.16b, v8.16b
eor v25.16b, v25.16b, v9.16b
eor v26.16b, v26.16b, v10.16b
eor v27.16b, v27.16b, v11.16b
st1 {v24.16b-v27.16b}, [x1], #64
cbz x7, .Lout
tbnz x8, #63, 3f
st1 {v20.16b-v23.16b}, [x1], #64
tbnz x8, #63, .Lt320
eor v28.16b, v28.16b, v12.16b
eor v29.16b, v29.16b, v13.16b
eor v30.16b, v30.16b, v14.16b
eor v31.16b, v31.16b, v15.16b
st1 {v24.16b-v27.16b}, [x1], #64
st1 {v28.16b-v31.16b}, [x1]
.Lout: frame_pop
ret
// fewer than 128 bytes of in/output
0: ld1 {v8.16b}, [x10]
ld1 {v9.16b}, [x11]
movi v10.16b, #16
sub x2, x1, #64
add x1, x1, x5
ld1 {v16.16b-v19.16b}, [x2]
tbl v4.16b, {v0.16b-v3.16b}, v8.16b
tbx v20.16b, {v16.16b-v19.16b}, v9.16b
add v8.16b, v8.16b, v10.16b
add v9.16b, v9.16b, v10.16b
tbl v5.16b, {v0.16b-v3.16b}, v8.16b
tbx v21.16b, {v16.16b-v19.16b}, v9.16b
add v8.16b, v8.16b, v10.16b
add v9.16b, v9.16b, v10.16b
tbl v6.16b, {v0.16b-v3.16b}, v8.16b
tbx v22.16b, {v16.16b-v19.16b}, v9.16b
add v8.16b, v8.16b, v10.16b
add v9.16b, v9.16b, v10.16b
tbl v7.16b, {v0.16b-v3.16b}, v8.16b
tbx v23.16b, {v16.16b-v19.16b}, v9.16b
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v6.16b
eor v23.16b, v23.16b, v7.16b
st1 {v20.16b-v23.16b}, [x1]
b .Lout
// fewer than 192 bytes of in/output
1: ld1 {v8.16b}, [x10]
ld1 {v9.16b}, [x11]
movi v10.16b, #16
add x1, x1, x6
tbl v0.16b, {v4.16b-v7.16b}, v8.16b
tbx v20.16b, {v16.16b-v19.16b}, v9.16b
add v8.16b, v8.16b, v10.16b
add v9.16b, v9.16b, v10.16b
tbl v1.16b, {v4.16b-v7.16b}, v8.16b
tbx v21.16b, {v16.16b-v19.16b}, v9.16b
add v8.16b, v8.16b, v10.16b
add v9.16b, v9.16b, v10.16b
tbl v2.16b, {v4.16b-v7.16b}, v8.16b
tbx v22.16b, {v16.16b-v19.16b}, v9.16b
add v8.16b, v8.16b, v10.16b
add v9.16b, v9.16b, v10.16b
tbl v3.16b, {v4.16b-v7.16b}, v8.16b
tbx v23.16b, {v16.16b-v19.16b}, v9.16b
.Lt192: cbz x5, 1f // exactly 128 bytes?
ld1 {v28.16b-v31.16b}, [x10]
add x5, x5, x1
tbl v28.16b, {v4.16b-v7.16b}, v28.16b
tbl v29.16b, {v4.16b-v7.16b}, v29.16b
tbl v30.16b, {v4.16b-v7.16b}, v30.16b
tbl v31.16b, {v4.16b-v7.16b}, v31.16b
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v1.16b
eor v22.16b, v22.16b, v2.16b
eor v23.16b, v23.16b, v3.16b
st1 {v20.16b-v23.16b}, [x1]
0: eor v20.16b, v20.16b, v28.16b
eor v21.16b, v21.16b, v29.16b
eor v22.16b, v22.16b, v30.16b
eor v23.16b, v23.16b, v31.16b
st1 {v20.16b-v23.16b}, [x5] // overlapping stores
1: st1 {v16.16b-v19.16b}, [x1]
b .Lout
// fewer than 128 bytes of in/output
.Lt128: ld1 {v28.16b-v31.16b}, [x10]
add x5, x5, x1
sub x1, x1, #64
tbl v28.16b, {v0.16b-v3.16b}, v28.16b
tbl v29.16b, {v0.16b-v3.16b}, v29.16b
tbl v30.16b, {v0.16b-v3.16b}, v30.16b
tbl v31.16b, {v0.16b-v3.16b}, v31.16b
ld1 {v16.16b-v19.16b}, [x1] // reload first output block
b 0b
// fewer than 256 bytes of in/output
2: ld1 {v4.16b}, [x10]
ld1 {v5.16b}, [x11]
movi v6.16b, #16
add x1, x1, x7
.Lt256: cbz x6, 2f // exactly 192 bytes?
ld1 {v4.16b-v7.16b}, [x10]
add x6, x6, x1
tbl v0.16b, {v8.16b-v11.16b}, v4.16b
tbx v24.16b, {v20.16b-v23.16b}, v5.16b
add v4.16b, v4.16b, v6.16b
add v5.16b, v5.16b, v6.16b
tbl v1.16b, {v8.16b-v11.16b}, v4.16b
tbx v25.16b, {v20.16b-v23.16b}, v5.16b
add v4.16b, v4.16b, v6.16b
add v5.16b, v5.16b, v6.16b
tbl v2.16b, {v8.16b-v11.16b}, v4.16b
tbx v26.16b, {v20.16b-v23.16b}, v5.16b
add v4.16b, v4.16b, v6.16b
add v5.16b, v5.16b, v6.16b
tbl v3.16b, {v8.16b-v11.16b}, v4.16b
tbx v27.16b, {v20.16b-v23.16b}, v5.16b
eor v24.16b, v24.16b, v0.16b
eor v25.16b, v25.16b, v1.16b
eor v26.16b, v26.16b, v2.16b
eor v27.16b, v27.16b, v3.16b
st1 {v24.16b-v27.16b}, [x1]
b .Lout
// fewer than 320 bytes of in/output
3: ld1 {v4.16b}, [x10]
ld1 {v5.16b}, [x11]
movi v6.16b, #16
add x1, x1, x8
tbl v0.16b, {v12.16b-v15.16b}, v4.16b
tbx v28.16b, {v24.16b-v27.16b}, v5.16b
add v4.16b, v4.16b, v6.16b
add v5.16b, v5.16b, v6.16b
tbl v1.16b, {v12.16b-v15.16b}, v4.16b
tbx v29.16b, {v24.16b-v27.16b}, v5.16b
add v4.16b, v4.16b, v6.16b
add v5.16b, v5.16b, v6.16b
tbl v2.16b, {v12.16b-v15.16b}, v4.16b
tbx v30.16b, {v24.16b-v27.16b}, v5.16b
add v4.16b, v4.16b, v6.16b
add v5.16b, v5.16b, v6.16b
tbl v3.16b, {v12.16b-v15.16b}, v4.16b
tbx v31.16b, {v24.16b-v27.16b}, v5.16b
tbl v1.16b, {v8.16b-v11.16b}, v5.16b
tbl v2.16b, {v8.16b-v11.16b}, v6.16b
tbl v3.16b, {v8.16b-v11.16b}, v7.16b
eor v28.16b, v28.16b, v0.16b
eor v29.16b, v29.16b, v1.16b
eor v30.16b, v30.16b, v2.16b
eor v31.16b, v31.16b, v3.16b
st1 {v28.16b-v31.16b}, [x1]
st1 {v28.16b-v31.16b}, [x6] // overlapping stores
2: st1 {v20.16b-v23.16b}, [x1]
b .Lout
// fewer than 320 bytes of in/output
.Lt320: cbz x7, 3f // exactly 256 bytes?
ld1 {v4.16b-v7.16b}, [x10]
add x7, x7, x1
tbl v0.16b, {v12.16b-v15.16b}, v4.16b
tbl v1.16b, {v12.16b-v15.16b}, v5.16b
tbl v2.16b, {v12.16b-v15.16b}, v6.16b
tbl v3.16b, {v12.16b-v15.16b}, v7.16b
eor v28.16b, v28.16b, v0.16b
eor v29.16b, v29.16b, v1.16b
eor v30.16b, v30.16b, v2.16b
eor v31.16b, v31.16b, v3.16b
st1 {v28.16b-v31.16b}, [x7] // overlapping stores
3: st1 {v24.16b-v27.16b}, [x1]
b .Lout
SYM_FUNC_END(chacha_4block_xor_neon)
@ -851,7 +796,7 @@ SYM_FUNC_END(chacha_4block_xor_neon)
.align L1_CACHE_SHIFT
.Lpermute:
.set .Li, 0
.rept 192
.rept 128
.byte (.Li - 64)
.set .Li, .Li + 1
.endr

View File

@ -544,7 +544,22 @@ CPU_LE( rev w8, w8 )
ext XL.16b, XL.16b, XL.16b, #8
rev64 XL.16b, XL.16b
eor XL.16b, XL.16b, KS0.16b
.if \enc == 1
st1 {XL.16b}, [x10] // store tag
.else
ldp x11, x12, [sp, #40] // load tag pointer and authsize
adr_l x17, .Lpermute_table
ld1 {KS0.16b}, [x11] // load supplied tag
add x17, x17, x12
ld1 {KS1.16b}, [x17] // load permute vector
cmeq XL.16b, XL.16b, KS0.16b // compare tags
mvn XL.16b, XL.16b // -1 for fail, 0 for pass
tbl XL.16b, {XL.16b}, KS1.16b // keep authsize bytes only
sminv b0, XL.16b // signed minimum across XL
smov w0, v0.b[0] // return b0
.endif
4: ldp x29, x30, [sp], #32
ret

View File

@ -55,10 +55,10 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
u64 const h[][2], u64 dg[], u8 ctr[],
u32 const rk[], int rounds, u8 tag[]);
asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
u64 const h[][2], u64 dg[], u8 ctr[],
u32 const rk[], int rounds, u8 tag[]);
asmlinkage int pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
u64 const h[][2], u64 dg[], u8 ctr[],
u32 const rk[], int rounds, const u8 l[],
const u8 tag[], u64 authsize);
static int ghash_init(struct shash_desc *desc)
{
@ -168,7 +168,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
put_unaligned_be64(ctx->digest[1], dst);
put_unaligned_be64(ctx->digest[0], dst + 8);
*ctx = (struct ghash_desc_ctx){};
memzero_explicit(ctx, sizeof(*ctx));
return 0;
}
@ -458,6 +458,7 @@ static int gcm_decrypt(struct aead_request *req)
unsigned int authsize = crypto_aead_authsize(aead);
int nrounds = num_rounds(&ctx->aes_key);
struct skcipher_walk walk;
u8 otag[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u8 iv[AES_BLOCK_SIZE];
u64 dg[2] = {};
@ -474,9 +475,15 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
scatterwalk_map_and_copy(otag, req->src,
req->assoclen + req->cryptlen - authsize,
authsize, 0);
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (likely(crypto_simd_usable())) {
int ret;
do {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
@ -493,9 +500,10 @@ static int gcm_decrypt(struct aead_request *req)
}
kernel_neon_begin();
pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h,
dg, iv, ctx->aes_key.key_enc, nrounds,
tag);
ret = pmull_gcm_decrypt(nbytes, dst, src,
ctx->ghash_key.h,
dg, iv, ctx->aes_key.key_enc,
nrounds, tag, otag, authsize);
kernel_neon_end();
if (unlikely(!nbytes))
@ -507,6 +515,11 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
} while (walk.nbytes);
if (err)
return err;
if (ret)
return -EBADMSG;
} else {
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
@ -548,23 +561,20 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_done(&walk, 0);
}
if (err)
return err;
put_unaligned_be64(dg[1], tag);
put_unaligned_be64(dg[0], tag + 8);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
aes_encrypt(&ctx->aes_key, iv, iv);
crypto_xor(tag, iv, AES_BLOCK_SIZE);
if (crypto_memneq(tag, otag, authsize)) {
memzero_explicit(tag, AES_BLOCK_SIZE);
return -EBADMSG;
}
}
if (err)
return err;
/* compare calculated auth tag with the stored one */
scatterwalk_map_and_copy(buf, req->src,
req->assoclen + req->cryptlen - authsize,
authsize, 0);
if (crypto_memneq(tag, buf, authsize))
return -EBADMSG;
return 0;
}

View File

@ -840,7 +840,6 @@ poly1305_blocks_neon:
ldp d14,d15,[sp,#64]
addp $ACC2,$ACC2,$ACC2
ldr x30,[sp,#8]
.inst 0xd50323bf // autiasp
////////////////////////////////////////////////////////////////
// lazy reduction, but without narrowing
@ -882,6 +881,7 @@ poly1305_blocks_neon:
str x4,[$ctx,#8] // set is_base2_26
ldr x29,[sp],#80
.inst 0xd50323bf // autiasp
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon

View File

@ -779,7 +779,6 @@ poly1305_blocks_neon:
ldp d14,d15,[sp,#64]
addp v21.2d,v21.2d,v21.2d
ldr x30,[sp,#8]
.inst 0xd50323bf // autiasp
////////////////////////////////////////////////////////////////
// lazy reduction, but without narrowing
@ -821,6 +820,7 @@ poly1305_blocks_neon:
str x4,[x0,#8] // set is_base2_26
ldr x29,[sp],#80
.inst 0xd50323bf // autiasp
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon

View File

@ -177,7 +177,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
}
poly1305_emit(&dctx->h, dst, dctx->s);
*dctx = (struct poly1305_desc_ctx){};
memzero_explicit(dctx, sizeof(*dctx));
}
EXPORT_SYMBOL(poly1305_final_arch);

View File

@ -10,7 +10,7 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>

View File

@ -10,7 +10,7 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>

View File

@ -10,7 +10,7 @@
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/types.h>
#include <linux/string.h>

View File

@ -94,7 +94,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
if (digest_size & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
*sctx = (struct sha3_state){};
memzero_explicit(sctx, sizeof(*sctx));
return 0;
}

View File

@ -14,7 +14,7 @@
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>

View File

@ -8,7 +8,7 @@
#include <crypto/internal/hash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <asm/neon.h>

View File

@ -41,7 +41,7 @@ do { \
*/
#define read_octeon_64bit_hash_dword(index) \
({ \
u64 __value; \
__be64 __value; \
\
__asm__ __volatile__ ( \
"dmfc2 %[rt],0x0048+" STR(index) \

View File

@ -68,10 +68,11 @@ static int octeon_md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = cpu_to_le32(MD5_H0);
mctx->hash[1] = cpu_to_le32(MD5_H1);
mctx->hash[2] = cpu_to_le32(MD5_H2);
mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->hash[0] = MD5_H0;
mctx->hash[1] = MD5_H1;
mctx->hash[2] = MD5_H2;
mctx->hash[3] = MD5_H3;
cpu_to_le32_array(mctx->hash, 4);
mctx->byte_count = 0;
return 0;
@ -139,8 +140,9 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out)
}
memset(p, 0, padding);
mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
cpu_to_le32_array(mctx->block + 14, 2);
octeon_md5_transform(mctx->block);
octeon_md5_read_hash(mctx);

View File

@ -14,7 +14,7 @@
*/
#include <linux/mm.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>

View File

@ -15,7 +15,7 @@
*/
#include <linux/mm.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>

View File

@ -14,7 +14,7 @@
*/
#include <linux/mm.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>

View File

@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>

View File

@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <asm/byteorder.h>
void powerpc_sha_transform(u32 *state, const u8 *src);

View File

@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>
@ -177,7 +177,7 @@ static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out)
{
u32 D[SHA256_DIGEST_SIZE >> 2];
__be32 D[SHA256_DIGEST_SIZE >> 2];
__be32 *dst = (__be32 *)out;
ppc_spe_sha256_final(desc, (u8 *)D);

View File

@ -11,7 +11,8 @@
#define _CRYPTO_ARCH_S390_SHA_H
#include <linux/crypto.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
/* must be big enough for the largest SHA variant */

View File

@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <asm/cpacf.h>
#include "sha.h"

View File

@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <asm/cpacf.h>
#include "sha.h"

View File

@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
#include <asm/cpacf.h>

View File

@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
#include <asm/cpacf.h>

View File

@ -8,7 +8,7 @@
* Author(s): Jan Glauber (jang@de.ibm.com)
*/
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>

View File

@ -9,7 +9,7 @@
#include <linux/kexec.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <asm/purgatory.h>
int verify_sha256_digest(void)

View File

@ -35,7 +35,7 @@ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
if (keylen != sizeof(u32))
return -EINVAL;
*(__le32 *)mctx = le32_to_cpup((__le32 *)key);
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}

View File

@ -33,10 +33,11 @@ static int md5_sparc64_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = cpu_to_le32(MD5_H0);
mctx->hash[1] = cpu_to_le32(MD5_H1);
mctx->hash[2] = cpu_to_le32(MD5_H2);
mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->hash[0] = MD5_H0;
mctx->hash[1] = MD5_H1;
mctx->hash[2] = MD5_H2;
mctx->hash[3] = MD5_H3;
le32_to_cpu_array(mctx->hash, 4);
mctx->byte_count = 0;
return 0;

View File

@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <asm/pstate.h>
#include <asm/elf.h>

View File

@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <asm/pstate.h>
#include <asm/elf.h>

View File

@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <asm/pstate.h>
#include <asm/elf.h>

View File

@ -1 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only

View File

@ -318,7 +318,7 @@ _initial_blocks_\@:
# Main loop - Encrypt/Decrypt remaining blocks
cmp $0, %r13
test %r13, %r13
je _zero_cipher_left_\@
sub $64, %r13
je _four_cipher_left_\@
@ -437,7 +437,7 @@ _multiple_of_16_bytes_\@:
mov PBlockLen(%arg2), %r12
cmp $0, %r12
test %r12, %r12
je _partial_done\@
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
@ -474,7 +474,7 @@ _T_8_\@:
add $8, %r10
sub $8, %r11
psrldq $8, %xmm0
cmp $0, %r11
test %r11, %r11
je _return_T_done_\@
_T_4_\@:
movd %xmm0, %eax
@ -482,7 +482,7 @@ _T_4_\@:
add $4, %r10
sub $4, %r11
psrldq $4, %xmm0
cmp $0, %r11
test %r11, %r11
je _return_T_done_\@
_T_123_\@:
movd %xmm0, %eax
@ -619,7 +619,7 @@ _get_AAD_blocks\@:
/* read the last <16B of AAD */
_get_AAD_rest\@:
cmp $0, %r11
test %r11, %r11
je _get_AAD_done\@
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
@ -640,7 +640,7 @@ _get_AAD_done\@:
.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
AAD_HASH operation
mov PBlockLen(%arg2), %r13
cmp $0, %r13
test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
@ -692,7 +692,7 @@ _no_extra_mask_1_\@:
pshufb %xmm2, %xmm3
pxor %xmm3, \AAD_HASH
cmp $0, %r10
test %r10, %r10
jl _partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
@ -727,7 +727,7 @@ _no_extra_mask_2_\@:
pshufb %xmm2, %xmm9
pxor %xmm9, \AAD_HASH
cmp $0, %r10
test %r10, %r10
jl _partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
@ -747,7 +747,7 @@ _encode_done_\@:
pshufb %xmm2, %xmm9
.endif
# output encrypted Bytes
cmp $0, %r10
test %r10, %r10
jl _partial_fill_\@
mov %r13, %r12
mov $16, %r13
@ -2720,7 +2720,7 @@ SYM_FUNC_END(aesni_ctr_enc)
*/
SYM_FUNC_START(aesni_xts_crypt8)
FRAME_BEGIN
cmpb $0, %cl
testb %cl, %cl
movl $0, %ecx
movl $240, %r10d
leaq _aesni_enc4, %r11

View File

@ -369,7 +369,7 @@ _initial_num_blocks_is_0\@:
_initial_blocks_encrypted\@:
cmp $0, %r13
test %r13, %r13
je _zero_cipher_left\@
sub $128, %r13
@ -528,7 +528,7 @@ _multiple_of_16_bytes\@:
vmovdqu HashKey(arg2), %xmm13
mov PBlockLen(arg2), %r12
cmp $0, %r12
test %r12, %r12
je _partial_done\@
#GHASH computation for the last <16 Byte block
@ -573,7 +573,7 @@ _T_8\@:
add $8, %r10
sub $8, %r11
vpsrldq $8, %xmm9, %xmm9
cmp $0, %r11
test %r11, %r11
je _return_T_done\@
_T_4\@:
vmovd %xmm9, %eax
@ -581,7 +581,7 @@ _T_4\@:
add $4, %r10
sub $4, %r11
vpsrldq $4, %xmm9, %xmm9
cmp $0, %r11
test %r11, %r11
je _return_T_done\@
_T_123\@:
vmovd %xmm9, %eax
@ -625,7 +625,7 @@ _get_AAD_blocks\@:
cmp $16, %r11
jge _get_AAD_blocks\@
vmovdqu \T8, \T7
cmp $0, %r11
test %r11, %r11
je _get_AAD_done\@
vpxor \T7, \T7, \T7
@ -644,7 +644,7 @@ _get_AAD_rest8\@:
vpxor \T1, \T7, \T7
jmp _get_AAD_rest8\@
_get_AAD_rest4\@:
cmp $0, %r11
test %r11, %r11
jle _get_AAD_rest0\@
mov (%r10), %eax
movq %rax, \T1
@ -749,7 +749,7 @@ _done_read_partial_block_\@:
.macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
AAD_HASH ENC_DEC
mov PBlockLen(arg2), %r13
cmp $0, %r13
test %r13, %r13
je _partial_block_done_\@ # Leave Macro if no partial blocks
# Read in input data without over reading
cmp $16, \PLAIN_CYPH_LEN
@ -801,7 +801,7 @@ _no_extra_mask_1_\@:
vpshufb %xmm2, %xmm3, %xmm3
vpxor %xmm3, \AAD_HASH, \AAD_HASH
cmp $0, %r10
test %r10, %r10
jl _partial_incomplete_1_\@
# GHASH computation for the last <16 Byte block
@ -836,7 +836,7 @@ _no_extra_mask_2_\@:
vpshufb %xmm2, %xmm9, %xmm9
vpxor %xmm9, \AAD_HASH, \AAD_HASH
cmp $0, %r10
test %r10, %r10
jl _partial_incomplete_2_\@
# GHASH computation for the last <16 Byte block
@ -856,7 +856,7 @@ _encode_done_\@:
vpshufb %xmm2, %xmm9, %xmm9
.endif
# output encrypted Bytes
cmp $0, %r10
test %r10, %r10
jl _partial_fill_\@
mov %r13, %r12
mov $16, %r13

View File

@ -251,7 +251,7 @@ $code.=<<___;
mov %rax,8($ctx)
mov %rax,16($ctx)
cmp \$0,$inp
test $inp,$inp
je .Lno_key
___
$code.=<<___ if (!$kernel);

View File

@ -210,7 +210,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
}
poly1305_simd_emit(&dctx->h, dst, dctx->s);
*dctx = (struct poly1305_desc_ctx){};
memzero_explicit(dctx, sizeof(*dctx));
}
EXPORT_SYMBOL(poly1305_final_arch);

View File

@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/simd.h>

View File

@ -35,7 +35,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/string.h>
#include <asm/simd.h>

View File

@ -278,7 +278,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
# "blocks" is the message length in SHA512 blocks
########################################################################
SYM_FUNC_START(sha512_transform_avx)
cmp $0, msglen
test msglen, msglen
je nowork
# Allocate Stack Space

View File

@ -280,7 +280,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
########################################################################
SYM_FUNC_START(sha512_transform_ssse3)
cmp $0, msglen
test msglen, msglen
je nowork
# Allocate Stack Space

View File

@ -34,7 +34,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <asm/simd.h>

View File

@ -9,7 +9,7 @@
*/
#include <linux/bug.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <asm/purgatory.h>
#include "../boot/string.h"

View File

@ -145,7 +145,7 @@ config CRYPTO_MANAGER_DISABLE_TESTS
config CRYPTO_MANAGER_EXTRA_TESTS
bool "Enable extra run-time crypto self tests"
depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS
depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER
help
Enable extra run-time self tests of registered crypto algorithms,
including randomized fuzz tests.
@ -201,7 +201,7 @@ config CRYPTO_AUTHENC
config CRYPTO_TEST
tristate "Testing module"
depends on m
depends on m || EXPERT
select CRYPTO_MANAGER
help
Quick & dirty crypto test module.

View File

@ -67,9 +67,11 @@ void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
void crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor,
u64 assoclen, u64 cryptlen);
int crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize);
static void crypto_aegis128_update(struct aegis_state *state)
{
@ -84,9 +86,10 @@ static void crypto_aegis128_update(struct aegis_state *state)
}
static void crypto_aegis128_update_a(struct aegis_state *state,
const union aegis_block *msg)
const union aegis_block *msg,
bool do_simd)
{
if (aegis128_do_simd()) {
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && do_simd) {
crypto_aegis128_update_simd(state, msg);
return;
}
@ -95,9 +98,10 @@ static void crypto_aegis128_update_a(struct aegis_state *state,
crypto_aegis_block_xor(&state->blocks[0], msg);
}
static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg)
static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg,
bool do_simd)
{
if (aegis128_do_simd()) {
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && do_simd) {
crypto_aegis128_update_simd(state, msg);
return;
}
@ -126,27 +130,28 @@ static void crypto_aegis128_init(struct aegis_state *state,
crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]);
for (i = 0; i < 5; i++) {
crypto_aegis128_update_a(state, key);
crypto_aegis128_update_a(state, &key_iv);
crypto_aegis128_update_a(state, key, false);
crypto_aegis128_update_a(state, &key_iv, false);
}
}
static void crypto_aegis128_ad(struct aegis_state *state,
const u8 *src, unsigned int size)
const u8 *src, unsigned int size,
bool do_simd)
{
if (AEGIS_ALIGNED(src)) {
const union aegis_block *src_blk =
(const union aegis_block *)src;
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis128_update_a(state, src_blk);
crypto_aegis128_update_a(state, src_blk, do_simd);
size -= AEGIS_BLOCK_SIZE;
src_blk++;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis128_update_u(state, src);
crypto_aegis128_update_u(state, src, do_simd);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
@ -154,6 +159,12 @@ static void crypto_aegis128_ad(struct aegis_state *state,
}
}
static void crypto_aegis128_wipe_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
memzero_explicit(dst, size);
}
static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
@ -172,7 +183,7 @@ static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis128_update_a(state, src_blk);
crypto_aegis128_update_a(state, src_blk, false);
*dst_blk = tmp;
@ -188,7 +199,7 @@ static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis128_update_u(state, src);
crypto_aegis128_update_u(state, src, false);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
@ -207,7 +218,7 @@ static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis128_update_a(state, &msg);
crypto_aegis128_update_a(state, &msg, false);
crypto_aegis_block_xor(&msg, &tmp);
@ -233,7 +244,7 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis128_update_a(state, &tmp);
crypto_aegis128_update_a(state, &tmp, false);
*dst_blk = tmp;
@ -249,7 +260,7 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis128_update_a(state, &tmp);
crypto_aegis128_update_a(state, &tmp, false);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
@ -271,7 +282,7 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size);
crypto_aegis128_update_a(state, &msg);
crypto_aegis128_update_a(state, &msg, false);
memcpy(dst, msg.bytes, size);
}
@ -279,7 +290,8 @@ static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
static void crypto_aegis128_process_ad(struct aegis_state *state,
struct scatterlist *sg_src,
unsigned int assoclen)
unsigned int assoclen,
bool do_simd)
{
struct scatter_walk walk;
union aegis_block buf;
@ -296,13 +308,13 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
if (pos > 0) {
unsigned int fill = AEGIS_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis128_update_a(state, &buf);
crypto_aegis128_update_a(state, &buf, do_simd);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis128_ad(state, src, left);
crypto_aegis128_ad(state, src, left, do_simd);
src += left & ~(AEGIS_BLOCK_SIZE - 1);
left &= AEGIS_BLOCK_SIZE - 1;
}
@ -318,13 +330,12 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos);
crypto_aegis128_update_a(state, &buf);
crypto_aegis128_update_a(state, &buf, do_simd);
}
}
static __always_inline
int crypto_aegis128_process_crypt(struct aegis_state *state,
struct aead_request *req,
struct skcipher_walk *walk,
void (*crypt)(struct aegis_state *state,
u8 *dst, const u8 *src,
@ -361,7 +372,7 @@ static void crypto_aegis128_final(struct aegis_state *state,
crypto_aegis_block_xor(&tmp, &state->blocks[3]);
for (i = 0; i < 7; i++)
crypto_aegis128_update_a(state, &tmp);
crypto_aegis128_update_a(state, &tmp, false);
for (i = 0; i < AEGIS128_STATE_BLOCKS; i++)
crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
@ -389,7 +400,7 @@ static int crypto_aegis128_setauthsize(struct crypto_aead *tfm,
return 0;
}
static int crypto_aegis128_encrypt(struct aead_request *req)
static int crypto_aegis128_encrypt_generic(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
@ -400,27 +411,18 @@ static int crypto_aegis128_encrypt(struct aead_request *req)
struct aegis_state state;
skcipher_walk_aead_encrypt(&walk, req, false);
if (aegis128_do_simd()) {
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_process_crypt(&state, req, &walk,
crypto_aegis128_encrypt_chunk_simd);
crypto_aegis128_final_simd(&state, &tag, req->assoclen,
cryptlen);
} else {
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_process_crypt(&state, req, &walk,
crypto_aegis128_encrypt_chunk);
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
}
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, false);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_encrypt_chunk);
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
static int crypto_aegis128_decrypt(struct aead_request *req)
static int crypto_aegis128_decrypt_generic(struct aead_request *req)
{
static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@ -435,60 +437,152 @@ static int crypto_aegis128_decrypt(struct aead_request *req)
authsize, 0);
skcipher_walk_aead_decrypt(&walk, req, false);
if (aegis128_do_simd()) {
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_process_crypt(&state, req, &walk,
crypto_aegis128_decrypt_chunk_simd);
crypto_aegis128_final_simd(&state, &tag, req->assoclen,
cryptlen);
} else {
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_process_crypt(&state, req, &walk,
crypto_aegis128_decrypt_chunk);
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
}
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, false);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_decrypt_chunk);
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
if (unlikely(crypto_memneq(tag.bytes, zeros, authsize))) {
/*
* From Chapter 4. 'Security Analysis' of the AEGIS spec [0]
*
* "3. If verification fails, the decrypted plaintext and the
* wrong authentication tag should not be given as output."
*
* [0] https://competitions.cr.yp.to/round3/aegisv11.pdf
*/
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_process_crypt(NULL, &walk,
crypto_aegis128_wipe_chunk);
memzero_explicit(&tag, sizeof(tag));
return -EBADMSG;
}
return 0;
}
static struct aead_alg crypto_aegis128_alg = {
.setkey = crypto_aegis128_setkey,
.setauthsize = crypto_aegis128_setauthsize,
.encrypt = crypto_aegis128_encrypt,
.decrypt = crypto_aegis128_decrypt,
static int crypto_aegis128_encrypt_simd(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int cryptlen = req->cryptlen;
struct skcipher_walk walk;
struct aegis_state state;
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
if (!aegis128_do_simd())
return crypto_aegis128_encrypt_generic(req);
.base = {
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
skcipher_walk_aead_encrypt(&walk, req, false);
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, true);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_encrypt_chunk_simd);
crypto_aegis128_final_simd(&state, &tag, req->assoclen, cryptlen, 0);
.cra_priority = 100,
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
.cra_name = "aegis128",
.cra_driver_name = "aegis128-generic",
static int crypto_aegis128_decrypt_simd(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
.cra_module = THIS_MODULE,
if (!aegis128_do_simd())
return crypto_aegis128_decrypt_generic(req);
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, true);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_decrypt_chunk_simd);
if (unlikely(crypto_aegis128_final_simd(&state, &tag, req->assoclen,
cryptlen, authsize))) {
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_process_crypt(NULL, &walk,
crypto_aegis128_wipe_chunk);
return -EBADMSG;
}
return 0;
}
static struct aead_alg crypto_aegis128_alg_generic = {
.setkey = crypto_aegis128_setkey,
.setauthsize = crypto_aegis128_setauthsize,
.encrypt = crypto_aegis128_encrypt_generic,
.decrypt = crypto_aegis128_decrypt_generic,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
.base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
.base.cra_module = THIS_MODULE,
};
static struct aead_alg crypto_aegis128_alg_simd = {
.setkey = crypto_aegis128_setkey,
.setauthsize = crypto_aegis128_setauthsize,
.encrypt = crypto_aegis128_encrypt_simd,
.decrypt = crypto_aegis128_decrypt_simd,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
.base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",
.base.cra_module = THIS_MODULE,
};
static int __init crypto_aegis128_module_init(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
crypto_aegis128_have_simd())
static_branch_enable(&have_simd);
int ret;
return crypto_register_aead(&crypto_aegis128_alg);
ret = crypto_register_aead(&crypto_aegis128_alg_generic);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
crypto_aegis128_have_simd()) {
ret = crypto_register_aead(&crypto_aegis128_alg_simd);
if (ret) {
crypto_unregister_aead(&crypto_aegis128_alg_generic);
return ret;
}
static_branch_enable(&have_simd);
}
return 0;
}
static void __exit crypto_aegis128_module_exit(void)
{
crypto_unregister_aead(&crypto_aegis128_alg);
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
crypto_aegis128_have_simd())
crypto_unregister_aead(&crypto_aegis128_alg_simd);
crypto_unregister_aead(&crypto_aegis128_alg_generic);
}
subsys_initcall(crypto_aegis128_module_init);
@ -499,3 +593,4 @@ MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm");
MODULE_ALIAS_CRYPTO("aegis128");
MODULE_ALIAS_CRYPTO("aegis128-generic");
MODULE_ALIAS_CRYPTO("aegis128-simd");

View File

@ -20,7 +20,6 @@
extern int aegis128_have_aes_insn;
void *memcpy(void *dest, const void *src, size_t n);
void *memset(void *s, int c, size_t n);
struct aegis128_state {
uint8x16_t v[5];
@ -173,10 +172,57 @@ void crypto_aegis128_update_neon(void *state, const void *msg)
aegis128_save_state_neon(st, state);
}
#ifdef CONFIG_ARM
/*
* AArch32 does not provide these intrinsics natively because it does not
* implement the underlying instructions. AArch32 only provides 64-bit
* wide vtbl.8/vtbx.8 instruction, so use those instead.
*/
static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
{
union {
uint8x16_t val;
uint8x8x2_t pair;
} __a = { a };
return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
vtbl2_u8(__a.pair, vget_high_u8(b)));
}
static uint8x16_t vqtbx1q_u8(uint8x16_t v, uint8x16_t a, uint8x16_t b)
{
union {
uint8x16_t val;
uint8x8x2_t pair;
} __a = { a };
return vcombine_u8(vtbx2_u8(vget_low_u8(v), __a.pair, vget_low_u8(b)),
vtbx2_u8(vget_high_u8(v), __a.pair, vget_high_u8(b)));
}
static int8_t vminvq_s8(int8x16_t v)
{
int8x8_t s = vpmin_s8(vget_low_s8(v), vget_high_s8(v));
s = vpmin_s8(s, s);
s = vpmin_s8(s, s);
s = vpmin_s8(s, s);
return vget_lane_s8(s, 0);
}
#endif
static const uint8_t permute[] __aligned(64) = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
};
void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size)
{
struct aegis128_state st = aegis128_load_state_neon(state);
const int short_input = size < AEGIS_BLOCK_SIZE;
uint8x16_t msg;
preload_sbox();
@ -186,7 +232,8 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
msg = vld1q_u8(src);
st = aegis128_update_neon(st, msg);
vst1q_u8(dst, msg ^ s);
msg ^= s;
vst1q_u8(dst, msg);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
@ -195,13 +242,26 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
if (size > 0) {
uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
uint8_t buf[AEGIS_BLOCK_SIZE] = {};
uint8_t buf[AEGIS_BLOCK_SIZE];
const void *in = src;
void *out = dst;
uint8x16_t m;
memcpy(buf, src, size);
msg = vld1q_u8(buf);
st = aegis128_update_neon(st, msg);
vst1q_u8(buf, msg ^ s);
memcpy(dst, buf, size);
if (__builtin_expect(short_input, 0))
in = out = memcpy(buf + AEGIS_BLOCK_SIZE - size, src, size);
m = vqtbl1q_u8(vld1q_u8(in + size - AEGIS_BLOCK_SIZE),
vld1q_u8(permute + 32 - size));
st = aegis128_update_neon(st, m);
vst1q_u8(out + size - AEGIS_BLOCK_SIZE,
vqtbl1q_u8(m ^ s, vld1q_u8(permute + size)));
if (__builtin_expect(short_input, 0))
memcpy(dst, out, size);
else
vst1q_u8(out - AEGIS_BLOCK_SIZE, msg);
}
aegis128_save_state_neon(st, state);
@ -211,6 +271,7 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size)
{
struct aegis128_state st = aegis128_load_state_neon(state);
const int short_input = size < AEGIS_BLOCK_SIZE;
uint8x16_t msg;
preload_sbox();
@ -228,21 +289,34 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
if (size > 0) {
uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
uint8_t buf[AEGIS_BLOCK_SIZE];
const void *in = src;
void *out = dst;
uint8x16_t m;
vst1q_u8(buf, s);
memcpy(buf, src, size);
msg = vld1q_u8(buf) ^ s;
vst1q_u8(buf, msg);
memcpy(dst, buf, size);
if (__builtin_expect(short_input, 0))
in = out = memcpy(buf + AEGIS_BLOCK_SIZE - size, src, size);
st = aegis128_update_neon(st, msg);
m = s ^ vqtbx1q_u8(s, vld1q_u8(in + size - AEGIS_BLOCK_SIZE),
vld1q_u8(permute + 32 - size));
st = aegis128_update_neon(st, m);
vst1q_u8(out + size - AEGIS_BLOCK_SIZE,
vqtbl1q_u8(m, vld1q_u8(permute + size)));
if (__builtin_expect(short_input, 0))
memcpy(dst, out, size);
else
vst1q_u8(out - AEGIS_BLOCK_SIZE, msg);
}
aegis128_save_state_neon(st, state);
}
void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
uint64_t cryptlen)
int crypto_aegis128_final_neon(void *state, void *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize)
{
struct aegis128_state st = aegis128_load_state_neon(state);
uint8x16_t v;
@ -250,13 +324,21 @@ void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
preload_sbox();
v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8 * assoclen),
vmov_n_u64(8 * cryptlen));
v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8ULL * assoclen),
vmov_n_u64(8ULL * cryptlen));
for (i = 0; i < 7; i++)
st = aegis128_update_neon(st, v);
v = vld1q_u8(tag_xor);
v ^= st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
v = st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
if (authsize > 0) {
v = vqtbl1q_u8(~vceqq_u8(v, vld1q_u8(tag_xor)),
vld1q_u8(permute + authsize));
return vminvq_s8((int8x16_t)v);
}
vst1q_u8(tag_xor, v);
return 0;
}

View File

@ -14,8 +14,10 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size);
void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size);
void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
uint64_t cryptlen);
int crypto_aegis128_final_neon(void *state, void *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize);
int aegis128_have_aes_insn __ro_after_init;
@ -60,11 +62,18 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
kernel_neon_end();
}
void crypto_aegis128_final_simd(union aegis_block *state,
union aegis_block *tag_xor,
u64 assoclen, u64 cryptlen)
int crypto_aegis128_final_simd(union aegis_block *state,
union aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize)
{
int ret;
kernel_neon_begin();
crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen);
ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen,
authsize);
kernel_neon_end();
return ret;
}

View File

@ -147,7 +147,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
struct sockaddr_alg_new *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
int err;
@ -155,7 +155,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (sock->state == SS_CONNECTED)
return -EINVAL;
if (addr_len < sizeof(*sa))
BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
offsetof(struct sockaddr_alg, salg_name));
BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
if (addr_len < sizeof(*sa) + 1)
return -EINVAL;
/* If caller uses non-allowed flag, return error. */
@ -163,7 +167,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
type = alg_get_type(sa->salg_type);
if (PTR_ERR(type) == -ENOENT) {

View File

@ -10,7 +10,7 @@
#include <linux/tpm_command.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <asm/unaligned.h>
#include <keys/asymmetric-subtype.h>
#include <keys/trusted_tpm.h>

View File

@ -53,12 +53,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
(const u64 *)params.key, params.key_size) < 0)
return -EINVAL;
memcpy(ctx->private_key, params.key, params.key_size);
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
ctx->private_key, params.key_size) < 0) {
memzero_explicit(ctx->private_key, params.key_size);
return -EINVAL;
}
return 0;
}

View File

@ -322,7 +322,7 @@ static const u32 KC[SEED_NUM_KCONSTANTS] = {
SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)]; \
t0 += t1; \
X1 ^= t0; \
X2 ^= t1;
X2 ^= t1
static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)

View File

@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>

View File

@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>

View File

@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/percpu.h>
#include <asm/byteorder.h>

View File

@ -119,12 +119,6 @@ static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
memset(ec, 0, sizeof(*ec));
}
static int sm2_ec_ctx_reset(struct mpi_ec_ctx *ec)
{
sm2_ec_ctx_deinit(ec);
return sm2_ec_ctx_init(ec);
}
/* RESULT must have been initialized and is set on success to the
* point given by VALUE.
*/
@ -132,55 +126,48 @@ static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
{
int rc;
size_t n;
const unsigned char *buf;
unsigned char *buf_memory;
unsigned char *buf;
MPI x, y;
n = (mpi_get_nbits(value)+7)/8;
buf_memory = kmalloc(n, GFP_KERNEL);
rc = mpi_print(GCRYMPI_FMT_USG, buf_memory, n, &n, value);
if (rc) {
kfree(buf_memory);
return rc;
}
buf = buf_memory;
n = MPI_NBYTES(value);
buf = kmalloc(n, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (n < 1) {
kfree(buf_memory);
return -EINVAL;
}
if (*buf != 4) {
kfree(buf_memory);
return -EINVAL; /* No support for point compression. */
}
if (((n-1)%2)) {
kfree(buf_memory);
return -EINVAL;
}
n = (n-1)/2;
rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value);
if (rc)
goto err_freebuf;
rc = -EINVAL;
if (n < 1 || ((n - 1) % 2))
goto err_freebuf;
/* No support for point compression */
if (*buf != 0x4)
goto err_freebuf;
rc = -ENOMEM;
n = (n - 1) / 2;
x = mpi_read_raw_data(buf + 1, n);
if (!x) {
kfree(buf_memory);
return -ENOMEM;
}
if (!x)
goto err_freebuf;
y = mpi_read_raw_data(buf + 1 + n, n);
kfree(buf_memory);
if (!y) {
mpi_free(x);
return -ENOMEM;
}
if (!y)
goto err_freex;
mpi_normalize(x);
mpi_normalize(y);
mpi_set(result->x, x);
mpi_set(result->y, y);
mpi_set_ui(result->z, 1);
mpi_free(x);
mpi_free(y);
rc = 0;
return 0;
mpi_free(y);
err_freex:
mpi_free(x);
err_freebuf:
kfree(buf);
return rc;
}
struct sm2_signature_ctx {
@ -399,10 +386,6 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
MPI a;
int rc;
rc = sm2_ec_ctx_reset(ec);
if (rc)
return rc;
ec->Q = mpi_point_new(0);
if (!ec->Q)
return -ENOMEM;

View File

@ -77,8 +77,8 @@ static const char *check[] = {
NULL
};
static u32 block_sizes[] = { 16, 64, 256, 1024, 1472, 8192, 0 };
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 };
static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
#define XBUFSIZE 8
#define MAX_IVLEN 32
@ -256,10 +256,10 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
struct test_mb_aead_data *data;
struct crypto_aead *tfm;
unsigned int i, j, iv_len;
const int *b_size;
const char *key;
const char *e;
void *assoc;
u32 *b_size;
char *iv;
int ret;
@ -337,15 +337,17 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
do {
b_size = aead_sizes;
do {
if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
int bs = round_up(*b_size, crypto_aead_blocksize(tfm));
if (bs + authsize > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for buffer (%lu)\n",
authsize + *b_size,
authsize + bs,
XBUFSIZE * PAGE_SIZE);
goto out;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
*keysize * 8, bs);
/* Set up tfm global state, i.e. the key */
@ -380,11 +382,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
memset(assoc, 0xff, aad_size);
sg_init_aead(cur->sg, cur->xbuf,
*b_size + (enc ? 0 : authsize),
bs + (enc ? 0 : authsize),
assoc, aad_size);
sg_init_aead(cur->sgout, cur->xoutbuf,
*b_size + (enc ? authsize : 0),
bs + (enc ? authsize : 0),
assoc, aad_size);
aead_request_set_ad(cur->req, aad_size);
@ -394,7 +396,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
aead_request_set_crypt(cur->req,
cur->sgout,
cur->sg,
*b_size, iv);
bs, iv);
ret = crypto_aead_encrypt(cur->req);
ret = do_one_aead_op(cur->req, ret);
@ -406,18 +408,18 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
aead_request_set_crypt(cur->req, cur->sg,
cur->sgout, *b_size +
cur->sgout, bs +
(enc ? 0 : authsize),
iv);
}
if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
ret = test_mb_aead_jiffies(data, enc, bs,
secs, num_mb);
cond_resched();
} else {
ret = test_mb_aead_cycles(data, enc, *b_size,
ret = test_mb_aead_cycles(data, enc, bs,
num_mb);
}
@ -534,7 +536,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
unsigned int *b_size;
const int *b_size;
unsigned int iv_len;
struct crypto_wait wait;
@ -590,12 +592,14 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
do {
b_size = aead_sizes;
do {
u32 bs = round_up(*b_size, crypto_aead_blocksize(tfm));
assoc = axbuf[0];
memset(assoc, 0xff, aad_size);
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
*keysize + *b_size,
*keysize + bs,
TVMEMSIZE * PAGE_SIZE);
goto out;
}
@ -616,7 +620,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
crypto_aead_clear_flags(tfm, ~0);
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, *b_size);
i, *keysize * 8, bs);
memset(tvmem[0], 0xff, PAGE_SIZE);
@ -627,11 +631,11 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out;
}
sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0), assoc,
bs + (enc ? authsize : 0), assoc,
aad_size);
aead_request_set_ad(req, aad_size);
@ -644,7 +648,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
* reversed (input <-> output) to calculate it
*/
aead_request_set_crypt(req, sgout, sg,
*b_size, iv);
bs, iv);
ret = do_one_aead_op(req,
crypto_aead_encrypt(req));
@ -656,15 +660,15 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
bs + (enc ? 0 : authsize),
iv);
if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
ret = test_aead_jiffies(req, enc, bs,
secs);
cond_resched();
} else {
ret = test_aead_cycles(req, enc, *b_size);
ret = test_aead_cycles(req, enc, bs);
}
if (ret) {
@ -1253,9 +1257,9 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
struct test_mb_skcipher_data *data;
struct crypto_skcipher *tfm;
unsigned int i, j, iv_len;
const int *b_size;
const char *key;
const char *e;
u32 *b_size;
char iv[128];
int ret;
@ -1316,14 +1320,16 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
do {
b_size = block_sizes;
do {
if (*b_size > XBUFSIZE * PAGE_SIZE) {
u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm));
if (bs > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for buffer (%lu)\n",
*b_size, XBUFSIZE * PAGE_SIZE);
goto out;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
*keysize * 8, bs);
/* Set up tfm global state, i.e. the key */
@ -1353,7 +1359,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
for (j = 0; j < num_mb; ++j) {
struct test_mb_skcipher_data *cur = &data[j];
unsigned int k = *b_size;
unsigned int k = bs;
unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
unsigned int p = 0;
@ -1377,12 +1383,12 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
bs, secs,
num_mb);
cond_resched();
} else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
bs, num_mb);
}
if (ret) {
@ -1497,8 +1503,8 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
char iv[128];
struct skcipher_request *req;
struct crypto_skcipher *tfm;
const int *b_size;
const char *e;
u32 *b_size;
if (enc == ENCRYPT)
e = "encryption";
@ -1533,17 +1539,18 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
b_size = block_sizes;
do {
u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm));
struct scatterlist sg[TVMEMSIZE];
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for "
"tvmem (%lu)\n", *keysize + *b_size,
"tvmem (%lu)\n", *keysize + bs,
TVMEMSIZE * PAGE_SIZE);
goto out_free_req;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
*keysize * 8, bs);
memset(tvmem[0], 0xff, PAGE_SIZE);
@ -1565,7 +1572,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
goto out_free_req;
}
k = *keysize + *b_size;
k = *keysize + bs;
sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
if (k > PAGE_SIZE) {
@ -1582,22 +1589,22 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
sg_set_buf(sg + j, tvmem[j], k);
memset(tvmem[j], 0xff, k);
} else {
sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
sg_set_buf(sg, tvmem[0] + *keysize, bs);
}
iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
skcipher_request_set_crypt(req, sg, sg, bs, iv);
if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
bs, secs);
cond_resched();
} else {
ret = test_acipher_cycles(req, enc,
*b_size);
bs);
}
if (ret) {
@ -3066,7 +3073,7 @@ err_free_tv:
*/
static void __exit tcrypt_mod_fini(void) { }
subsys_initcall(tcrypt_mod_init);
late_initcall(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);

View File

@ -1171,8 +1171,7 @@ static inline const void *sg_data(struct scatterlist *sg)
}
/* Test one hash test vector in one configuration, using the shash API */
static int test_shash_vec_cfg(const char *driver,
const struct hash_testvec *vec,
static int test_shash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct shash_desc *desc,
@ -1183,6 +1182,7 @@ static int test_shash_vec_cfg(const char *driver,
const unsigned int alignmask = crypto_shash_alignmask(tfm);
const unsigned int digestsize = crypto_shash_digestsize(tfm);
const unsigned int statesize = crypto_shash_statesize(tfm);
const char *driver = crypto_shash_driver_name(tfm);
const struct test_sg_division *divs[XBUFSIZE];
unsigned int i;
u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
@ -1355,8 +1355,7 @@ static int check_nonfinal_ahash_op(const char *op, int err,
}
/* Test one hash test vector in one configuration, using the ahash API */
static int test_ahash_vec_cfg(const char *driver,
const struct hash_testvec *vec,
static int test_ahash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct ahash_request *req,
@ -1367,6 +1366,7 @@ static int test_ahash_vec_cfg(const char *driver,
const unsigned int alignmask = crypto_ahash_alignmask(tfm);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int statesize = crypto_ahash_statesize(tfm);
const char *driver = crypto_ahash_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const struct test_sg_division *divs[XBUFSIZE];
DECLARE_CRYPTO_WAIT(wait);
@ -1521,8 +1521,7 @@ result_ready:
driver, cfg);
}
static int test_hash_vec_cfg(const char *driver,
const struct hash_testvec *vec,
static int test_hash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct ahash_request *req,
@ -1539,20 +1538,18 @@ static int test_hash_vec_cfg(const char *driver,
*/
if (desc) {
err = test_shash_vec_cfg(driver, vec, vec_name, cfg, desc, tsgl,
err = test_shash_vec_cfg(vec, vec_name, cfg, desc, tsgl,
hashstate);
if (err)
return err;
}
return test_ahash_vec_cfg(driver, vec, vec_name, cfg, req, tsgl,
hashstate);
return test_ahash_vec_cfg(vec, vec_name, cfg, req, tsgl, hashstate);
}
static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
unsigned int vec_num, struct ahash_request *req,
struct shash_desc *desc, struct test_sglist *tsgl,
u8 *hashstate)
static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
struct ahash_request *req, struct shash_desc *desc,
struct test_sglist *tsgl, u8 *hashstate)
{
char vec_name[16];
unsigned int i;
@ -1561,7 +1558,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
sprintf(vec_name, "%u", vec_num);
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
err = test_hash_vec_cfg(driver, vec, vec_name,
err = test_hash_vec_cfg(vec, vec_name,
&default_hash_testvec_configs[i],
req, desc, tsgl, hashstate);
if (err)
@ -1576,7 +1573,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate);
if (err)
return err;
@ -1633,8 +1630,7 @@ done:
* Test the hash algorithm represented by @req against the corresponding generic
* implementation, if one is available.
*/
static int test_hash_vs_generic_impl(const char *driver,
const char *generic_driver,
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
struct ahash_request *req,
struct shash_desc *desc,
@ -1646,6 +1642,7 @@ static int test_hash_vs_generic_impl(const char *driver,
const unsigned int blocksize = crypto_ahash_blocksize(tfm);
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
const char *driver = crypto_ahash_driver_name(tfm);
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_shash *generic_tfm = NULL;
struct shash_desc *generic_desc = NULL;
@ -1732,7 +1729,7 @@ static int test_hash_vs_generic_impl(const char *driver,
vec_name, sizeof(vec_name));
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
err = test_hash_vec_cfg(driver, &vec, vec_name, cfg,
err = test_hash_vec_cfg(&vec, vec_name, cfg,
req, desc, tsgl, hashstate);
if (err)
goto out;
@ -1749,8 +1746,7 @@ out:
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_hash_vs_generic_impl(const char *driver,
const char *generic_driver,
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
struct ahash_request *req,
struct shash_desc *desc,
@ -1820,6 +1816,7 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
driver, PTR_ERR(atfm));
return PTR_ERR(atfm);
}
driver = crypto_ahash_driver_name(atfm);
req = ahash_request_alloc(atfm, GFP_KERNEL);
if (!req) {
@ -1859,13 +1856,12 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
}
for (i = 0; i < num_vecs; i++) {
err = test_hash_vec(driver, &vecs[i], i, req, desc, tsgl,
hashstate);
err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
}
err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
err = test_hash_vs_generic_impl(generic_driver, maxkeysize, req,
desc, tsgl, hashstate);
out:
kfree(hashstate);
@ -1923,8 +1919,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
return err;
}
static int test_aead_vec_cfg(const char *driver, int enc,
const struct aead_testvec *vec,
static int test_aead_vec_cfg(int enc, const struct aead_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct aead_request *req,
@ -1934,6 +1929,7 @@ static int test_aead_vec_cfg(const char *driver, int enc,
const unsigned int alignmask = crypto_aead_alignmask(tfm);
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const char *driver = crypto_aead_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const char *op = enc ? "encryption" : "decryption";
DECLARE_CRYPTO_WAIT(wait);
@ -2106,9 +2102,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
return 0;
}
static int test_aead_vec(const char *driver, int enc,
const struct aead_testvec *vec, unsigned int vec_num,
struct aead_request *req,
static int test_aead_vec(int enc, const struct aead_testvec *vec,
unsigned int vec_num, struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
char vec_name[16];
@ -2121,7 +2116,7 @@ static int test_aead_vec(const char *driver, int enc,
sprintf(vec_name, "%u", vec_num);
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
err = test_aead_vec_cfg(driver, enc, vec, vec_name,
err = test_aead_vec_cfg(enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
@ -2136,7 +2131,7 @@ static int test_aead_vec(const char *driver, int enc,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
err = test_aead_vec_cfg(driver, enc, vec, vec_name,
err = test_aead_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
@ -2152,7 +2147,6 @@ static int test_aead_vec(const char *driver, int enc,
struct aead_extra_tests_ctx {
struct aead_request *req;
struct crypto_aead *tfm;
const char *driver;
const struct alg_test_desc *test_desc;
struct cipher_test_sglists *tsgls;
unsigned int maxdatasize;
@ -2358,7 +2352,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
if (ctx->vec.novrfy) {
generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
sizeof(ctx->cfgname));
err = test_aead_vec_cfg(ctx->driver, DECRYPT, &ctx->vec,
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
ctx->req, ctx->tsgls);
if (err)
@ -2377,7 +2371,7 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
{
struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
const char *driver = ctx->driver;
const char *driver = crypto_aead_driver_name(tfm);
const char *generic_driver = ctx->test_desc->generic_driver;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *generic_tfm = NULL;
@ -2454,14 +2448,14 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
sizeof(ctx->cfgname));
if (!ctx->vec.novrfy) {
err = test_aead_vec_cfg(driver, ENCRYPT, &ctx->vec,
err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
ctx->req, ctx->tsgls);
if (err)
goto out;
}
if (ctx->vec.crypt_error == 0 || ctx->vec.novrfy) {
err = test_aead_vec_cfg(driver, DECRYPT, &ctx->vec,
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
ctx->req, ctx->tsgls);
if (err)
@ -2476,8 +2470,7 @@ out:
return err;
}
static int test_aead_extra(const char *driver,
const struct alg_test_desc *test_desc,
static int test_aead_extra(const struct alg_test_desc *test_desc,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
@ -2493,7 +2486,6 @@ static int test_aead_extra(const char *driver,
return -ENOMEM;
ctx->req = req;
ctx->tfm = crypto_aead_reqtfm(req);
ctx->driver = driver;
ctx->test_desc = test_desc;
ctx->tsgls = tsgls;
ctx->maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
@ -2528,8 +2520,7 @@ out:
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead_extra(const char *driver,
const struct alg_test_desc *test_desc,
static int test_aead_extra(const struct alg_test_desc *test_desc,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
@ -2537,8 +2528,7 @@ static int test_aead_extra(const char *driver,
}
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead(const char *driver, int enc,
const struct aead_test_suite *suite,
static int test_aead(int enc, const struct aead_test_suite *suite,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
@ -2546,8 +2536,7 @@ static int test_aead(const char *driver, int enc,
int err;
for (i = 0; i < suite->count; i++) {
err = test_aead_vec(driver, enc, &suite->vecs[i], i, req,
tsgls);
err = test_aead_vec(enc, &suite->vecs[i], i, req, tsgls);
if (err)
return err;
cond_resched();
@ -2575,6 +2564,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_aead_driver_name(tfm);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@ -2592,15 +2582,15 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
goto out;
}
err = test_aead(driver, ENCRYPT, suite, req, tsgls);
err = test_aead(ENCRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_aead(driver, DECRYPT, suite, req, tsgls);
err = test_aead(DECRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_aead_extra(driver, desc, req, tsgls);
err = test_aead_extra(desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@ -2695,8 +2685,7 @@ out_nobuf:
return ret;
}
static int test_skcipher_vec_cfg(const char *driver, int enc,
const struct cipher_testvec *vec,
static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct skcipher_request *req,
@ -2705,6 +2694,7 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const unsigned int alignmask = crypto_skcipher_alignmask(tfm);
const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
const char *driver = crypto_skcipher_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const char *op = enc ? "encryption" : "decryption";
DECLARE_CRYPTO_WAIT(wait);
@ -2859,8 +2849,7 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
return 0;
}
static int test_skcipher_vec(const char *driver, int enc,
const struct cipher_testvec *vec,
static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
unsigned int vec_num,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
@ -2875,7 +2864,7 @@ static int test_skcipher_vec(const char *driver, int enc,
sprintf(vec_name, "%u", vec_num);
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
@ -2890,7 +2879,7 @@ static int test_skcipher_vec(const char *driver, int enc,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
@ -2961,8 +2950,7 @@ done:
* Test the skcipher algorithm represented by @req against the corresponding
* generic implementation, if one is available.
*/
static int test_skcipher_vs_generic_impl(const char *driver,
const char *generic_driver,
static int test_skcipher_vs_generic_impl(const char *generic_driver,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
@ -2972,6 +2960,7 @@ static int test_skcipher_vs_generic_impl(const char *driver,
const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
const char *driver = crypto_skcipher_driver_name(tfm);
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_skcipher *generic_tfm = NULL;
struct skcipher_request *generic_req = NULL;
@ -3077,11 +3066,11 @@ static int test_skcipher_vs_generic_impl(const char *driver,
vec_name, sizeof(vec_name));
generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
cfg, req, tsgls);
if (err)
goto out;
err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
err = test_skcipher_vec_cfg(DECRYPT, &vec, vec_name,
cfg, req, tsgls);
if (err)
goto out;
@ -3099,8 +3088,7 @@ out:
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher_vs_generic_impl(const char *driver,
const char *generic_driver,
static int test_skcipher_vs_generic_impl(const char *generic_driver,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
@ -3108,8 +3096,7 @@ static int test_skcipher_vs_generic_impl(const char *driver,
}
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher(const char *driver, int enc,
const struct cipher_test_suite *suite,
static int test_skcipher(int enc, const struct cipher_test_suite *suite,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
@ -3117,8 +3104,7 @@ static int test_skcipher(const char *driver, int enc,
int err;
for (i = 0; i < suite->count; i++) {
err = test_skcipher_vec(driver, enc, &suite->vecs[i], i, req,
tsgls);
err = test_skcipher_vec(enc, &suite->vecs[i], i, req, tsgls);
if (err)
return err;
cond_resched();
@ -3146,6 +3132,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_skcipher_driver_name(tfm);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@ -3163,16 +3150,15 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
goto out;
}
err = test_skcipher(driver, ENCRYPT, suite, req, tsgls);
err = test_skcipher(ENCRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
err = test_skcipher(DECRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_skcipher_vs_generic_impl(driver, desc->generic_driver, req,
tsgls);
err = test_skcipher_vs_generic_impl(desc->generic_driver, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
skcipher_request_free(req);
@ -3602,6 +3588,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
"%ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_shash_driver_name(tfm);
do {
SHASH_DESC_ON_STACK(shash, tfm);
@ -5677,15 +5664,21 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
type, mask);
test_done:
if (rc && (fips_enabled || panic_on_fail)) {
fips_fail_notify();
panic("alg: self-tests for %s (%s) failed in %s mode!\n",
driver, alg, fips_enabled ? "fips" : "panic_on_fail");
if (rc) {
if (fips_enabled || panic_on_fail) {
fips_fail_notify();
panic("alg: self-tests for %s (%s) failed in %s mode!\n",
driver, alg,
fips_enabled ? "fips" : "panic_on_fail");
}
WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)",
driver, alg, rc);
} else {
if (fips_enabled)
pr_info("alg: self-tests for %s (%s) passed\n",
driver, alg);
}
if (fips_enabled && !rc)
pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
return rc;
notest:

View File

@ -348,19 +348,6 @@ config HW_RANDOM_HISI
If unsure, say Y.
config HW_RANDOM_HISI_V2
tristate "HiSilicon True Random Number Generator V2 support"
depends on HW_RANDOM && ARM64 && ACPI
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number
Generator V2 hardware found on HiSilicon Hi1620 SoC.
To compile this driver as a module, choose M here: the
module will be called hisi-trng-v2.
If unsure, say Y.
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
@ -508,6 +495,7 @@ config HW_RANDOM_NPCM
config HW_RANDOM_KEYSTONE
depends on ARCH_KEYSTONE || COMPILE_TEST
depends on HAS_IOMEM && OF
default HW_RANDOM
tristate "TI Keystone NETCP SA Hardware random number generator"
help

View File

@ -30,7 +30,6 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_HISI_V2) += hisi-trng-v2.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o

View File

@ -1,99 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
#define HISI_TRNG_QUALITY 512
#define SLEEP_US 10
#define TIMEOUT_US 10000
struct hisi_trng {
void __iomem *base;
struct hwrng rng;
};
static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct hisi_trng *trng;
int currsize = 0;
u32 val = 0;
u32 ret;
trng = container_of(rng, struct hisi_trng, rng);
do {
ret = readl_poll_timeout(trng->base + HISI_TRNG_REG, val,
val, SLEEP_US, TIMEOUT_US);
if (ret)
return currsize;
if (max - currsize >= HISI_TRNG_BYTES) {
memcpy(buf + currsize, &val, HISI_TRNG_BYTES);
currsize += HISI_TRNG_BYTES;
if (currsize == max)
return currsize;
continue;
}
/* copy remaining bytes */
memcpy(buf + currsize, &val, max - currsize);
currsize = max;
} while (currsize < max);
return currsize;
}
static int hisi_trng_probe(struct platform_device *pdev)
{
struct hisi_trng *trng;
int ret;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
trng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->base))
return PTR_ERR(trng->base);
trng->rng.name = pdev->name;
trng->rng.read = hisi_trng_read;
trng->rng.quality = HISI_TRNG_QUALITY;
ret = devm_hwrng_register(&pdev->dev, &trng->rng);
if (ret)
dev_err(&pdev->dev, "failed to register hwrng!\n");
return ret;
}
static const struct acpi_device_id hisi_trng_acpi_match[] = {
{ "HISI02B3", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
static struct platform_driver hisi_trng_driver = {
.probe = hisi_trng_probe,
.driver = {
.name = "hisi-trng-v2",
.acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
},
};
module_platform_driver(hisi_trng_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Weili Qian <qianweili@huawei.com>");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_DESCRIPTION("HiSilicon true random number generator V2 driver");

View File

@ -252,10 +252,8 @@ static int imx_rngc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "Couldn't get irq %d\n", irq);
if (irq < 0)
return irq;
}
ret = clk_prepare_enable(rngc->clk);
if (ret)

View File

@ -336,7 +336,7 @@
#include <linux/completion.h>
#include <linux/uuid.h>
#include <crypto/chacha.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <asm/processor.h>
#include <linux/uaccess.h>

View File

@ -548,6 +548,7 @@ config CRYPTO_DEV_ATMEL_SHA
config CRYPTO_DEV_ATMEL_I2C
tristate
select BITREVERSE
config CRYPTO_DEV_ATMEL_ECC
tristate "Support for Microchip / Atmel ECC hw accelerator"
@ -648,7 +649,7 @@ choice
default CRYPTO_DEV_QCE_ENABLE_ALL
depends on CRYPTO_DEV_QCE
help
This option allows to choose whether to build support for all algorihtms
This option allows to choose whether to build support for all algorithms
(default), hashes-only, or skciphers-only.
The QCE engine does not appear to scale as well as the CPU to handle
@ -900,4 +901,6 @@ config CRYPTO_DEV_SA2UL
used for crypto offload. Select this if you want to use hardware
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
endif # CRYPTO_HW

View File

@ -51,3 +51,4 @@ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += keembay/

View File

@ -43,7 +43,7 @@ config CRYPTO_DEV_SUN8I_CE
depends on CRYPTO_DEV_ALLWINNER
depends on PM
help
Select y here to have support for the crypto Engine availlable on
Select y here to have support for the crypto Engine available on
Allwinner SoC H2+, H3, H5, H6, R40 and A64.
The Crypto Engine handle AES/3DES ciphers in ECB/CBC mode.

View File

@ -25,7 +25,7 @@
#include <linux/pm_runtime.h>
#include <crypto/md5.h>
#include <crypto/skcipher.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>

View File

@ -13,7 +13,8 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include "sun8i-ce.h"
@ -262,13 +263,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
u32 common;
u64 byte_count;
__le32 *bf;
void *buf;
void *buf = NULL;
int j, i, todo;
int nbw = 0;
u64 fill, min_fill;
__be64 *bebits;
__le64 *lebits;
void *result;
void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
@ -285,13 +286,17 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
/* the padding could be up to two block. */
buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
if (!buf)
return -ENOMEM;
if (!buf) {
err = -ENOMEM;
goto theend;
}
bf = (__le32 *)buf;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result)
return -ENOMEM;
if (!result) {
err = -ENOMEM;
goto theend;
}
flow = rctx->flow;
chan = &ce->chanlist[flow];
@ -403,11 +408,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
kfree(buf);
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
kfree(result);
theend:
kfree(buf);
kfree(result);
crypto_finalize_hash_request(engine, breq, err);
return 0;
}

View File

@ -16,7 +16,8 @@
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <crypto/rng.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
/* CE Registers */
#define CE_TDQ 0x00

View File

@ -13,7 +13,8 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include "sun8i-ss.h"

View File

@ -15,7 +15,8 @@
#include <linux/crypto.h>
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#define SS_START 1

View File

@ -20,7 +20,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/ctr.h>
#include <crypto/skcipher.h>
#include "crypto4xx_reg_def.h"

View File

@ -30,7 +30,7 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/rng.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@ -917,7 +917,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
(crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);

View File

@ -16,7 +16,8 @@
#include <crypto/authenc.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include "atmel-sha-regs.h"
struct atmel_aes_dev;

View File

@ -33,7 +33,8 @@
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include "atmel-sha-regs.h"
@ -459,7 +460,6 @@ static int atmel_sha_init(struct ahash_request *req)
break;
default:
return -EINVAL;
break;
}
ctx->bufcnt = 0;

View File

@ -28,7 +28,8 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/xts.h>
/* Max length of a line in all cache levels for Artpec SoCs. */

View File

@ -26,11 +26,12 @@
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/hmac.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
#include "util.h"

View File

@ -16,7 +16,8 @@
#include <crypto/aead.h>
#include <crypto/arc4.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
#include "spu.h"

View File

@ -17,7 +17,8 @@
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
enum spu_cipher_alg {
CIPHER_ALG_NONE = 0x0,

View File

@ -3404,8 +3404,8 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
fallback = crypto_alloc_skcipher(tfm_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
pr_err("Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
return PTR_ERR(fallback);
}

View File

@ -852,7 +852,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
if (!IS_ERR_OR_NULL(drv_ctx))
if (!IS_ERR(drv_ctx))
drv_ctx->op_type = type;
ctx->drv_ctx[type] = drv_ctx;
@ -955,7 +955,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct caam_drv_ctx *drv_ctx;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR_OR_NULL(drv_ctx))
if (IS_ERR(drv_ctx))
return (struct aead_edesc *)drv_ctx;
/* allocate space for base edesc and hw desc commands, link tables */
@ -1165,7 +1165,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, encrypt);
if (IS_ERR_OR_NULL(edesc))
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor */
@ -1259,7 +1259,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
struct caam_drv_ctx *drv_ctx;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR_OR_NULL(drv_ctx))
if (IS_ERR(drv_ctx))
return (struct skcipher_edesc *)drv_ctx;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
@ -2502,8 +2502,8 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
fallback = crypto_alloc_skcipher(tfm_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
pr_err("Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
return PTR_ERR(fallback);
}

View File

@ -1611,7 +1611,8 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
fallback = crypto_alloc_skcipher(tfm_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
dev_err(caam_alg->caam.dev,
"Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
return PTR_ERR(fallback);
}

View File

@ -34,7 +34,8 @@
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include <crypto/chacha.h>
#include <crypto/poly1305.h>

View File

@ -16,6 +16,14 @@
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
/*
* Maximum size for crypto-engine software queue based on Job Ring
* size (JOBR_DEPTH) and a THRESHOLD (reserved for the non-crypto-API
* requests that are not passed through crypto-engine)
*/
#define THRESHOLD 15
#define CRYPTO_ENGINE_MAX_QLEN (JOBR_DEPTH - THRESHOLD)
/* Kconfig params for interrupt coalescing if selected (else zero) */
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
#define JOBR_INTC JRCFG_ICEN

Some files were not shown because too many files have changed in this diff Show More