Merge branch 'random-5.17-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random
Pull random number generator updates from Jason Donenfeld: "These a bit more numerous than usual for the RNG, due to folks resubmitting patches that had been pending prior and generally renewed interest. There are a few categories of patches in here: 1) Dominik Brodowski and I traded a series back and forth for a some weeks that fixed numerous issues related to seeds being provided at extremely early boot by the firmware, before other parts of the kernel or of the RNG have been initialized, both fixing some crashes and addressing correctness around early boot randomness. One of these is marked for stable. 2) I replaced the RNG's usage of SHA-1 with BLAKE2s in the entropy extractor, and made the construction a bit safer and more standard. This was sort of a long overdue low hanging fruit, as we were supposed to have phased out SHA-1 usage quite some time ago (even if all we needed here was non-invertibility). Along the way it also made extraction 131% faster. This required a bit of Kconfig and symbol plumbing to make things work well with the crypto libraries, which is one of the reasons why I'm sending you this pull early in the cycle. 3) I got rid of a truly superfluous call to RDRAND in the hot path, which resulted in a whopping 370% increase in performance. 4) Sebastian Andrzej Siewior sent some patches regarding PREEMPT_RT, the full series of which wasn't ready yet, but the first two preparatory cleanups were good on their own. One of them touches files in kernel/irq/, which is the other reason why I'm sending you this pull early in the cycle. 5) Other assorted correctness fixes from Eric Biggers, Jann Horn, Mark Brown, Dominik Brodowski, and myself" * 'random-5.17-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: random: don't reset crng_init_cnt on urandom_read() random: avoid superfluous call to RDRAND in CRNG extraction random: early initialization of ChaCha constants random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs random: harmonize "crng init done" messages random: mix bootloader randomness into pool random: do not throw away excess input to crng_fast_load random: do not re-init if crng_reseed completes before primary init random: fix crash on multiple early calls to add_bootloader_randomness() random: do not sign extend bytes for rotation when mixing random: use BLAKE2s instead of SHA1 in extraction lib/crypto: blake2s: include as built-in random: fix data race on crng init time random: fix data race on crng_node_pool irq: remove unused flags argument from __handle_irq_event_percpu() random: remove unused irq_flags argument from add_interrupt_randomness() random: document add_hwgenerator_randomness() with other input functions MAINTAINERS: add git tree for random.c
This commit is contained in:
commit
d93aebbd76
|
@ -15998,6 +15998,7 @@ F: arch/mips/generic/board-ranchu.c
|
|||
RANDOM NUMBER DRIVER
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git
|
||||
S: Maintained
|
||||
F: drivers/char/random.c
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
|
|||
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o
|
||||
obj-$(if $(CONFIG_CRYPTO_BLAKE2S_ARM),y) += libblake2s-arm.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
|
||||
obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
|
||||
|
@ -31,7 +32,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
|
|||
sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
|
||||
sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
|
||||
sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
|
||||
blake2s-arm-y := blake2s-core.o blake2s-glue.o
|
||||
blake2s-arm-y := blake2s-shash.o
|
||||
libblake2s-arm-y:= blake2s-core.o blake2s-glue.o
|
||||
blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
|
||||
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
|
||||
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
|
||||
|
|
|
@ -167,8 +167,8 @@
|
|||
.endm
|
||||
|
||||
//
|
||||
// void blake2s_compress_arch(struct blake2s_state *state,
|
||||
// const u8 *block, size_t nblocks, u32 inc);
|
||||
// void blake2s_compress(struct blake2s_state *state,
|
||||
// const u8 *block, size_t nblocks, u32 inc);
|
||||
//
|
||||
// Only the first three fields of struct blake2s_state are used:
|
||||
// u32 h[8]; (inout)
|
||||
|
@ -176,7 +176,7 @@
|
|||
// u32 f[2]; (in)
|
||||
//
|
||||
.align 5
|
||||
ENTRY(blake2s_compress_arch)
|
||||
ENTRY(blake2s_compress)
|
||||
push {r0-r2,r4-r11,lr} // keep this an even number
|
||||
|
||||
.Lnext_block:
|
||||
|
@ -303,4 +303,4 @@ ENTRY(blake2s_compress_arch)
|
|||
str r3, [r12], #4
|
||||
bne 1b
|
||||
b .Lcopy_block_done
|
||||
ENDPROC(blake2s_compress_arch)
|
||||
ENDPROC(blake2s_compress)
|
||||
|
|
|
@ -1,78 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* BLAKE2s digest algorithm, ARM scalar implementation
|
||||
*
|
||||
* Copyright 2020 Google LLC
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
/* defined in blake2s-core.S */
|
||||
EXPORT_SYMBOL(blake2s_compress_arch);
|
||||
|
||||
static int crypto_blake2s_update_arm(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_arm, \
|
||||
.final = crypto_blake2s_final_arm, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_arm_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_arm_mod_init(void)
|
||||
{
|
||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
|
||||
crypto_register_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs)) : 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_arm_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
|
||||
crypto_unregister_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_arm_mod_init);
|
||||
module_exit(blake2s_arm_mod_exit);
|
||||
|
||||
MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-arm");
|
||||
EXPORT_SYMBOL(blake2s_compress);
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* BLAKE2s digest algorithm, ARM scalar implementation
|
||||
*
|
||||
* Copyright 2020 Google LLC
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
static int crypto_blake2s_update_arm(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_arm, \
|
||||
.final = crypto_blake2s_final_arm, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_arm_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_arm_mod_init(void)
|
||||
{
|
||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
|
||||
crypto_register_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs)) : 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_arm_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
|
||||
crypto_unregister_shashes(blake2s_arm_algs,
|
||||
ARRAY_SIZE(blake2s_arm_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_arm_mod_init);
|
||||
module_exit(blake2s_arm_mod_exit);
|
||||
|
||||
MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-arm");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-arm");
|
|
@ -62,7 +62,9 @@ obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
|
|||
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o
|
||||
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
||||
blake2s-x86_64-y := blake2s-shash.o
|
||||
obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o
|
||||
libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
|
||||
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
@ -28,9 +27,8 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state,
|
|||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3);
|
||||
static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512);
|
||||
|
||||
void blake2s_compress_arch(struct blake2s_state *state,
|
||||
const u8 *block, size_t nblocks,
|
||||
const u32 inc)
|
||||
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc)
|
||||
{
|
||||
/* SIMD disables preemption, so relax after processing each page. */
|
||||
BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
|
||||
|
@ -56,49 +54,12 @@ void blake2s_compress_arch(struct blake2s_state *state,
|
|||
block += blocks * BLAKE2S_BLOCK_SIZE;
|
||||
} while (nblocks);
|
||||
}
|
||||
EXPORT_SYMBOL(blake2s_compress_arch);
|
||||
|
||||
static int crypto_blake2s_update_x86(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress_arch);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_x86, \
|
||||
.final = crypto_blake2s_final_x86, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
EXPORT_SYMBOL(blake2s_compress);
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
return 0;
|
||||
|
||||
static_branch_enable(&blake2s_use_ssse3);
|
||||
if (boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
static_branch_enable(&blake2s_use_ssse3);
|
||||
|
||||
if (IS_ENABLED(CONFIG_AS_AVX512) &&
|
||||
boot_cpu_has(X86_FEATURE_AVX) &&
|
||||
|
@ -109,26 +70,9 @@ static int __init blake2s_mod_init(void)
|
|||
XFEATURE_MASK_AVX512, NULL))
|
||||
static_branch_enable(&blake2s_use_avx512);
|
||||
|
||||
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
|
||||
crypto_register_shashes(blake2s_algs,
|
||||
ARRAY_SIZE(blake2s_algs)) : 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
static int crypto_blake2s_update_x86(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, blake2s_compress);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 200, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_x86, \
|
||||
.final = crypto_blake2s_final_x86, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
module_init(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-x86");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-x86");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -79,7 +79,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
|
|||
inc_irq_stat(hyperv_stimer0_count);
|
||||
if (hv_stimer0_handler)
|
||||
hv_stimer0_handler();
|
||||
add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
|
||||
add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
|
||||
ack_APIC_irq();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
|
|
|
@ -1919,9 +1919,10 @@ config CRYPTO_STATS
|
|||
config CRYPTO_HASH_INFO
|
||||
bool
|
||||
|
||||
source "lib/crypto/Kconfig"
|
||||
source "drivers/crypto/Kconfig"
|
||||
source "crypto/asymmetric_keys/Kconfig"
|
||||
source "certs/Kconfig"
|
||||
|
||||
endif # if CRYPTO
|
||||
|
||||
source "lib/crypto/Kconfig"
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
/*
|
||||
* random.c -- A strong random number generator
|
||||
*
|
||||
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
|
||||
* Rights Reserved.
|
||||
* Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*
|
||||
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
|
||||
*
|
||||
|
@ -78,12 +77,12 @@
|
|||
* an *estimate* of how many bits of randomness have been stored into
|
||||
* the random number generator's internal state.
|
||||
*
|
||||
* When random bytes are desired, they are obtained by taking the SHA
|
||||
* hash of the contents of the "entropy pool". The SHA hash avoids
|
||||
* When random bytes are desired, they are obtained by taking the BLAKE2s
|
||||
* hash of the contents of the "entropy pool". The BLAKE2s hash avoids
|
||||
* exposing the internal state of the entropy pool. It is believed to
|
||||
* be computationally infeasible to derive any useful information
|
||||
* about the input of SHA from its output. Even if it is possible to
|
||||
* analyze SHA in some clever way, as long as the amount of data
|
||||
* about the input of BLAKE2s from its output. Even if it is possible to
|
||||
* analyze BLAKE2s in some clever way, as long as the amount of data
|
||||
* returned from the generator is less than the inherent entropy in
|
||||
* the pool, the output data is totally unpredictable. For this
|
||||
* reason, the routine decreases its internal estimate of how many
|
||||
|
@ -93,7 +92,7 @@
|
|||
* If this estimate goes to zero, the routine can still generate
|
||||
* random numbers; however, an attacker may (at least in theory) be
|
||||
* able to infer the future output of the generator from prior
|
||||
* outputs. This requires successful cryptanalysis of SHA, which is
|
||||
* outputs. This requires successful cryptanalysis of BLAKE2s, which is
|
||||
* not believed to be feasible, but there is a remote possibility.
|
||||
* Nonetheless, these numbers should be useful for the vast majority
|
||||
* of purposes.
|
||||
|
@ -200,8 +199,11 @@
|
|||
* void add_device_randomness(const void *buf, unsigned int size);
|
||||
* void add_input_randomness(unsigned int type, unsigned int code,
|
||||
* unsigned int value);
|
||||
* void add_interrupt_randomness(int irq, int irq_flags);
|
||||
* void add_interrupt_randomness(int irq);
|
||||
* void add_disk_randomness(struct gendisk *disk);
|
||||
* void add_hwgenerator_randomness(const char *buffer, size_t count,
|
||||
* size_t entropy);
|
||||
* void add_bootloader_randomness(const void *buf, unsigned int size);
|
||||
*
|
||||
* add_device_randomness() is for adding data to the random pool that
|
||||
* is likely to differ between two devices (or possibly even per boot).
|
||||
|
@ -228,6 +230,14 @@
|
|||
* particular randomness source. They do this by keeping track of the
|
||||
* first and second order deltas of the event timings.
|
||||
*
|
||||
* add_hwgenerator_randomness() is for true hardware RNGs, and will credit
|
||||
* entropy as specified by the caller. If the entropy pool is full it will
|
||||
* block until more entropy is needed.
|
||||
*
|
||||
* add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
|
||||
* add_device_randomness(), depending on whether or not the configuration
|
||||
* option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
|
||||
*
|
||||
* Ensuring unpredictability at system startup
|
||||
* ============================================
|
||||
*
|
||||
|
@ -336,7 +346,7 @@
|
|||
#include <linux/completion.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <crypto/chacha.h>
|
||||
#include <crypto/sha1.h>
|
||||
#include <crypto/blake2s.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -356,10 +366,7 @@
|
|||
#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
|
||||
#define OUTPUT_POOL_SHIFT 10
|
||||
#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
|
||||
#define EXTRACT_SIZE 10
|
||||
|
||||
|
||||
#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
|
||||
#define EXTRACT_SIZE (BLAKE2S_HASH_SIZE / 2)
|
||||
|
||||
/*
|
||||
* To allow fractional bits to be tracked, the entropy_count field is
|
||||
|
@ -395,7 +402,7 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
|
|||
* Thanks to Colin Plumb for suggesting this.
|
||||
*
|
||||
* The mixing operation is much less sensitive than the output hash,
|
||||
* where we use SHA-1. All that we want of mixing operation is that
|
||||
* where we use BLAKE2s. All that we want of mixing operation is that
|
||||
* it be a good non-cryptographic hash; i.e. it not produce collisions
|
||||
* when fed "random" data of the sort we expect to see. As long as
|
||||
* the pool state differs for different inputs, we have preserved the
|
||||
|
@ -450,6 +457,10 @@ struct crng_state {
|
|||
|
||||
static struct crng_state primary_crng = {
|
||||
.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
|
||||
.state[0] = CHACHA_CONSTANT_EXPA,
|
||||
.state[1] = CHACHA_CONSTANT_ND_3,
|
||||
.state[2] = CHACHA_CONSTANT_2_BY,
|
||||
.state[3] = CHACHA_CONSTANT_TE_K,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -461,6 +472,7 @@ static struct crng_state primary_crng = {
|
|||
* its value (from 0->1->2).
|
||||
*/
|
||||
static int crng_init = 0;
|
||||
static bool crng_need_final_init = false;
|
||||
#define crng_ready() (likely(crng_init > 1))
|
||||
static int crng_init_cnt = 0;
|
||||
static unsigned long crng_global_init_time = 0;
|
||||
|
@ -539,7 +551,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
|
|||
unsigned long i, tap1, tap2, tap3, tap4, tap5;
|
||||
int input_rotate;
|
||||
int wordmask = r->poolinfo->poolwords - 1;
|
||||
const char *bytes = in;
|
||||
const unsigned char *bytes = in;
|
||||
__u32 w;
|
||||
|
||||
tap1 = r->poolinfo->tap1;
|
||||
|
@ -751,7 +763,6 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
|
|||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* Hack to deal with crazy userspace progams when they are all trying
|
||||
* to access /dev/urandom in parallel. The programs are almost
|
||||
|
@ -759,7 +770,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
|
|||
* their brain damage.
|
||||
*/
|
||||
static struct crng_state **crng_node_pool __read_mostly;
|
||||
#endif
|
||||
|
||||
static void invalidate_batched_entropy(void);
|
||||
static void numa_crng_init(void);
|
||||
|
@ -807,7 +817,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
|
|||
return arch_init;
|
||||
}
|
||||
|
||||
static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
|
||||
static void crng_initialize_secondary(struct crng_state *crng)
|
||||
{
|
||||
chacha_init_consts(crng->state);
|
||||
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
|
||||
|
@ -817,18 +827,46 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
|
|||
|
||||
static void __init crng_initialize_primary(struct crng_state *crng)
|
||||
{
|
||||
chacha_init_consts(crng->state);
|
||||
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
|
||||
if (crng_init_try_arch_early(crng) && trust_cpu) {
|
||||
if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
pr_notice("crng done (trusting CPU's manufacturer)\n");
|
||||
pr_notice("crng init done (trusting CPU's manufacturer)\n");
|
||||
}
|
||||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
static void crng_finalize_init(struct crng_state *crng)
|
||||
{
|
||||
if (crng != &primary_crng || crng_init >= 2)
|
||||
return;
|
||||
if (!system_wq) {
|
||||
/* We can't call numa_crng_init until we have workqueues,
|
||||
* so mark this for processing later. */
|
||||
crng_need_final_init = true;
|
||||
return;
|
||||
}
|
||||
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
pr_notice("crng init done\n");
|
||||
if (unseeded_warning.missed) {
|
||||
pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
|
||||
unseeded_warning.missed);
|
||||
unseeded_warning.missed = 0;
|
||||
}
|
||||
if (urandom_warning.missed) {
|
||||
pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
|
||||
urandom_warning.missed);
|
||||
urandom_warning.missed = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void do_numa_crng_init(struct work_struct *work)
|
||||
{
|
||||
int i;
|
||||
|
@ -843,8 +881,8 @@ static void do_numa_crng_init(struct work_struct *work)
|
|||
crng_initialize_secondary(crng);
|
||||
pool[i] = crng;
|
||||
}
|
||||
mb();
|
||||
if (cmpxchg(&crng_node_pool, NULL, pool)) {
|
||||
/* pairs with READ_ONCE() in select_crng() */
|
||||
if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
|
||||
for_each_node(i)
|
||||
kfree(pool[i]);
|
||||
kfree(pool);
|
||||
|
@ -855,20 +893,35 @@ static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
|
|||
|
||||
static void numa_crng_init(void)
|
||||
{
|
||||
schedule_work(&numa_crng_init_work);
|
||||
if (IS_ENABLED(CONFIG_NUMA))
|
||||
schedule_work(&numa_crng_init_work);
|
||||
}
|
||||
|
||||
static struct crng_state *select_crng(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_NUMA)) {
|
||||
struct crng_state **pool;
|
||||
int nid = numa_node_id();
|
||||
|
||||
/* pairs with cmpxchg_release() in do_numa_crng_init() */
|
||||
pool = READ_ONCE(crng_node_pool);
|
||||
if (pool && pool[nid])
|
||||
return pool[nid];
|
||||
}
|
||||
|
||||
return &primary_crng;
|
||||
}
|
||||
#else
|
||||
static void numa_crng_init(void) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* crng_fast_load() can be called by code in the interrupt service
|
||||
* path. So we can't afford to dilly-dally.
|
||||
* path. So we can't afford to dilly-dally. Returns the number of
|
||||
* bytes processed from cp.
|
||||
*/
|
||||
static int crng_fast_load(const char *cp, size_t len)
|
||||
static size_t crng_fast_load(const char *cp, size_t len)
|
||||
{
|
||||
unsigned long flags;
|
||||
char *p;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
|
||||
return 0;
|
||||
|
@ -879,7 +932,7 @@ static int crng_fast_load(const char *cp, size_t len)
|
|||
p = (unsigned char *) &primary_crng.state[4];
|
||||
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
|
||||
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
|
||||
cp++; crng_init_cnt++; len--;
|
||||
cp++; crng_init_cnt++; len--; ret++;
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
|
||||
|
@ -887,7 +940,7 @@ static int crng_fast_load(const char *cp, size_t len)
|
|||
crng_init = 1;
|
||||
pr_notice("fast init done\n");
|
||||
}
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -962,41 +1015,24 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
|||
crng->state[i+4] ^= buf.key[i] ^ rv;
|
||||
}
|
||||
memzero_explicit(&buf, sizeof(buf));
|
||||
crng->init_time = jiffies;
|
||||
WRITE_ONCE(crng->init_time, jiffies);
|
||||
spin_unlock_irqrestore(&crng->lock, flags);
|
||||
if (crng == &primary_crng && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
pr_notice("crng init done\n");
|
||||
if (unseeded_warning.missed) {
|
||||
pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
|
||||
unseeded_warning.missed);
|
||||
unseeded_warning.missed = 0;
|
||||
}
|
||||
if (urandom_warning.missed) {
|
||||
pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
|
||||
urandom_warning.missed);
|
||||
urandom_warning.missed = 0;
|
||||
}
|
||||
}
|
||||
crng_finalize_init(crng);
|
||||
}
|
||||
|
||||
static void _extract_crng(struct crng_state *crng,
|
||||
__u8 out[CHACHA_BLOCK_SIZE])
|
||||
{
|
||||
unsigned long v, flags;
|
||||
unsigned long flags, init_time;
|
||||
|
||||
if (crng_ready() &&
|
||||
(time_after(crng_global_init_time, crng->init_time) ||
|
||||
time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
|
||||
crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
|
||||
if (crng_ready()) {
|
||||
init_time = READ_ONCE(crng->init_time);
|
||||
if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
|
||||
time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
|
||||
crng_reseed(crng, crng == &primary_crng ?
|
||||
&input_pool : NULL);
|
||||
}
|
||||
spin_lock_irqsave(&crng->lock, flags);
|
||||
if (arch_get_random_long(&v))
|
||||
crng->state[14] ^= v;
|
||||
chacha20_block(&crng->state[0], out);
|
||||
if (crng->state[12] == 0)
|
||||
crng->state[13]++;
|
||||
|
@ -1005,15 +1041,7 @@ static void _extract_crng(struct crng_state *crng,
|
|||
|
||||
static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
|
||||
{
|
||||
struct crng_state *crng = NULL;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
if (crng_node_pool)
|
||||
crng = crng_node_pool[numa_node_id()];
|
||||
if (crng == NULL)
|
||||
#endif
|
||||
crng = &primary_crng;
|
||||
_extract_crng(crng, out);
|
||||
_extract_crng(select_crng(), out);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1042,15 +1070,7 @@ static void _crng_backtrack_protect(struct crng_state *crng,
|
|||
|
||||
static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
|
||||
{
|
||||
struct crng_state *crng = NULL;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
if (crng_node_pool)
|
||||
crng = crng_node_pool[numa_node_id()];
|
||||
if (crng == NULL)
|
||||
#endif
|
||||
crng = &primary_crng;
|
||||
_crng_backtrack_protect(crng, tmp, used);
|
||||
_crng_backtrack_protect(select_crng(), tmp, used);
|
||||
}
|
||||
|
||||
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
|
||||
|
@ -1242,7 +1262,7 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
|
|||
return *ptr;
|
||||
}
|
||||
|
||||
void add_interrupt_randomness(int irq, int irq_flags)
|
||||
void add_interrupt_randomness(int irq)
|
||||
{
|
||||
struct entropy_store *r;
|
||||
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
|
||||
|
@ -1269,7 +1289,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
|||
if (unlikely(crng_init == 0)) {
|
||||
if ((fast_pool->count >= 64) &&
|
||||
crng_fast_load((char *) fast_pool->pool,
|
||||
sizeof(fast_pool->pool))) {
|
||||
sizeof(fast_pool->pool)) > 0) {
|
||||
fast_pool->count = 0;
|
||||
fast_pool->last = now;
|
||||
}
|
||||
|
@ -1368,56 +1388,49 @@ retry:
|
|||
*/
|
||||
static void extract_buf(struct entropy_store *r, __u8 *out)
|
||||
{
|
||||
int i;
|
||||
union {
|
||||
__u32 w[5];
|
||||
unsigned long l[LONGS(20)];
|
||||
} hash;
|
||||
__u32 workspace[SHA1_WORKSPACE_WORDS];
|
||||
struct blake2s_state state __aligned(__alignof__(unsigned long));
|
||||
u8 hash[BLAKE2S_HASH_SIZE];
|
||||
unsigned long *salt;
|
||||
unsigned long flags;
|
||||
|
||||
blake2s_init(&state, sizeof(hash));
|
||||
|
||||
/*
|
||||
* If we have an architectural hardware random number
|
||||
* generator, use it for SHA's initial vector
|
||||
* generator, use it for BLAKE2's salt & personal fields.
|
||||
*/
|
||||
sha1_init(hash.w);
|
||||
for (i = 0; i < LONGS(20); i++) {
|
||||
for (salt = (unsigned long *)&state.h[4];
|
||||
salt < (unsigned long *)&state.h[8]; ++salt) {
|
||||
unsigned long v;
|
||||
if (!arch_get_random_long(&v))
|
||||
break;
|
||||
hash.l[i] = v;
|
||||
*salt ^= v;
|
||||
}
|
||||
|
||||
/* Generate a hash across the pool, 16 words (512 bits) at a time */
|
||||
/* Generate a hash across the pool */
|
||||
spin_lock_irqsave(&r->lock, flags);
|
||||
for (i = 0; i < r->poolinfo->poolwords; i += 16)
|
||||
sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
|
||||
blake2s_update(&state, (const u8 *)r->pool,
|
||||
r->poolinfo->poolwords * sizeof(*r->pool));
|
||||
blake2s_final(&state, hash); /* final zeros out state */
|
||||
|
||||
/*
|
||||
* We mix the hash back into the pool to prevent backtracking
|
||||
* attacks (where the attacker knows the state of the pool
|
||||
* plus the current outputs, and attempts to find previous
|
||||
* ouputs), unless the hash function can be inverted. By
|
||||
* mixing at least a SHA1 worth of hash data back, we make
|
||||
* outputs), unless the hash function can be inverted. By
|
||||
* mixing at least a hash worth of hash data back, we make
|
||||
* brute-forcing the feedback as hard as brute-forcing the
|
||||
* hash.
|
||||
*/
|
||||
__mix_pool_bytes(r, hash.w, sizeof(hash.w));
|
||||
__mix_pool_bytes(r, hash, sizeof(hash));
|
||||
spin_unlock_irqrestore(&r->lock, flags);
|
||||
|
||||
memzero_explicit(workspace, sizeof(workspace));
|
||||
|
||||
/*
|
||||
* In case the hash function has some recognizable output
|
||||
* pattern, we fold it in half. Thus, we always feed back
|
||||
* twice as much data as we output.
|
||||
/* Note that EXTRACT_SIZE is half of hash size here, because above
|
||||
* we've dumped the full length back into mixer. By reducing the
|
||||
* amount that we emit, we retain a level of forward secrecy.
|
||||
*/
|
||||
hash.w[0] ^= hash.w[3];
|
||||
hash.w[1] ^= hash.w[4];
|
||||
hash.w[2] ^= rol32(hash.w[2], 16);
|
||||
|
||||
memcpy(out, &hash, EXTRACT_SIZE);
|
||||
memzero_explicit(&hash, sizeof(hash));
|
||||
memcpy(out, hash, EXTRACT_SIZE);
|
||||
memzero_explicit(hash, sizeof(hash));
|
||||
}
|
||||
|
||||
static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
|
||||
|
@ -1775,6 +1788,8 @@ static void __init init_std_data(struct entropy_store *r)
|
|||
int __init rand_initialize(void)
|
||||
{
|
||||
init_std_data(&input_pool);
|
||||
if (crng_need_final_init)
|
||||
crng_finalize_init(&primary_crng);
|
||||
crng_initialize_primary(&primary_crng);
|
||||
crng_global_init_time = jiffies;
|
||||
if (ratelimit_disable) {
|
||||
|
@ -1816,7 +1831,6 @@ urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
|
|||
static ssize_t
|
||||
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
unsigned long flags;
|
||||
static int maxwarn = 10;
|
||||
|
||||
if (!crng_ready() && maxwarn > 0) {
|
||||
|
@ -1824,9 +1838,6 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
|
|||
if (__ratelimit(&urandom_warning))
|
||||
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
|
||||
current->comm, nbytes);
|
||||
spin_lock_irqsave(&primary_crng.lock, flags);
|
||||
crng_init_cnt = 0;
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
}
|
||||
|
||||
return urandom_read_nowarn(file, buf, nbytes, ppos);
|
||||
|
@ -1949,7 +1960,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
|||
if (crng_init < 2)
|
||||
return -ENODATA;
|
||||
crng_reseed(&primary_crng, &input_pool);
|
||||
crng_global_init_time = jiffies - 1;
|
||||
WRITE_ONCE(crng_global_init_time, jiffies - 1);
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2275,15 +2286,20 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
|||
struct entropy_store *poolp = &input_pool;
|
||||
|
||||
if (unlikely(crng_init == 0)) {
|
||||
crng_fast_load(buffer, count);
|
||||
return;
|
||||
size_t ret = crng_fast_load(buffer, count);
|
||||
mix_pool_bytes(poolp, buffer, ret);
|
||||
count -= ret;
|
||||
buffer += ret;
|
||||
if (!count || crng_init == 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Suspend writing if we're above the trickle threshold.
|
||||
* We'll be woken up again once below random_write_wakeup_thresh,
|
||||
* or when the calling thread is about to terminate.
|
||||
*/
|
||||
wait_event_interruptible(random_write_wait, kthread_should_stop() ||
|
||||
wait_event_interruptible(random_write_wait,
|
||||
!system_wq || kthread_should_stop() ||
|
||||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
|
||||
mix_pool_bytes(poolp, buffer, count);
|
||||
credit_entropy_bits(poolp, entropy);
|
||||
|
|
|
@ -1381,7 +1381,7 @@ static void vmbus_isr(void)
|
|||
tasklet_schedule(&hv_cpu->msg_dpc);
|
||||
}
|
||||
|
||||
add_interrupt_randomness(vmbus_interrupt, 0);
|
||||
add_interrupt_randomness(vmbus_interrupt);
|
||||
}
|
||||
|
||||
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
|
||||
|
|
|
@ -81,7 +81,6 @@ config WIREGUARD
|
|||
select CRYPTO
|
||||
select CRYPTO_LIB_CURVE25519
|
||||
select CRYPTO_LIB_CHACHA20POLY1305
|
||||
select CRYPTO_LIB_BLAKE2S
|
||||
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
|
||||
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
|
||||
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
|
||||
|
|
|
@ -47,12 +47,19 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
|
|||
hchacha_block_generic(state, out, nrounds);
|
||||
}
|
||||
|
||||
enum chacha_constants { /* expand 32-byte k */
|
||||
CHACHA_CONSTANT_EXPA = 0x61707865U,
|
||||
CHACHA_CONSTANT_ND_3 = 0x3320646eU,
|
||||
CHACHA_CONSTANT_2_BY = 0x79622d32U,
|
||||
CHACHA_CONSTANT_TE_K = 0x6b206574U
|
||||
};
|
||||
|
||||
static inline void chacha_init_consts(u32 *state)
|
||||
{
|
||||
state[0] = 0x61707865; /* "expa" */
|
||||
state[1] = 0x3320646e; /* "nd 3" */
|
||||
state[2] = 0x79622d32; /* "2-by" */
|
||||
state[3] = 0x6b206574; /* "te k" */
|
||||
state[0] = CHACHA_CONSTANT_EXPA;
|
||||
state[1] = CHACHA_CONSTANT_ND_3;
|
||||
state[2] = CHACHA_CONSTANT_2_BY;
|
||||
state[3] = CHACHA_CONSTANT_TE_K;
|
||||
}
|
||||
|
||||
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
|
||||
|
|
|
@ -11,11 +11,11 @@
|
|||
#include <crypto/internal/hash.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
|
||||
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
|
||||
void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc);
|
||||
|
||||
bool blake2s_selftest(void);
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline void add_latent_entropy(void) {}
|
|||
|
||||
extern void add_input_randomness(unsigned int type, unsigned int code,
|
||||
unsigned int value) __latent_entropy;
|
||||
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
|
||||
extern void add_interrupt_randomness(int irq) __latent_entropy;
|
||||
|
||||
extern void get_random_bytes(void *buf, int nbytes);
|
||||
extern int wait_for_random_bytes(void);
|
||||
|
|
|
@ -575,8 +575,6 @@ EXPORT_SYMBOL_GPL(handle_simple_irq);
|
|||
*/
|
||||
void handle_untracked_irq(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
|
@ -593,7 +591,7 @@ void handle_untracked_irq(struct irq_desc *desc)
|
|||
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
__handle_irq_event_percpu(desc, &flags);
|
||||
__handle_irq_event_percpu(desc);
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
|
|
|
@ -136,7 +136,7 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
|
|||
wake_up_process(action->thread);
|
||||
}
|
||||
|
||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
|
||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
unsigned int irq = desc->irq_data.irq;
|
||||
|
@ -174,10 +174,6 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
|
|||
}
|
||||
|
||||
__irq_wake_thread(desc, action);
|
||||
|
||||
fallthrough; /* to add to randomness */
|
||||
case IRQ_HANDLED:
|
||||
*flags |= action->flags;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -193,11 +189,10 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
|
|||
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
|
||||
{
|
||||
irqreturn_t retval;
|
||||
unsigned int flags = 0;
|
||||
|
||||
retval = __handle_irq_event_percpu(desc, &flags);
|
||||
retval = __handle_irq_event_percpu(desc);
|
||||
|
||||
add_interrupt_randomness(desc->irq_data.irq, flags);
|
||||
add_interrupt_randomness(desc->irq_data.irq);
|
||||
|
||||
if (!irq_settings_no_debug(desc))
|
||||
note_interrupt(desc, retval);
|
||||
|
|
|
@ -103,7 +103,7 @@ extern int __irq_get_irqchip_state(struct irq_data *data,
|
|||
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
|
||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
|
||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc);
|
||||
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
|
||||
irqreturn_t handle_irq_event(struct irq_desc *desc);
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
comment "Crypto library routines"
|
||||
|
||||
config CRYPTO_LIB_AES
|
||||
tristate
|
||||
|
||||
|
@ -9,14 +7,14 @@ config CRYPTO_LIB_ARC4
|
|||
tristate
|
||||
|
||||
config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
|
||||
tristate
|
||||
bool
|
||||
help
|
||||
Declares whether the architecture provides an arch-specific
|
||||
accelerated implementation of the Blake2s library interface,
|
||||
either builtin or as a module.
|
||||
|
||||
config CRYPTO_LIB_BLAKE2S_GENERIC
|
||||
tristate
|
||||
def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
|
||||
help
|
||||
This symbol can be depended upon by arch implementations of the
|
||||
Blake2s library interface that require the generic code as a
|
||||
|
@ -24,15 +22,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC
|
|||
implementation is enabled, this implementation serves the users
|
||||
of CRYPTO_LIB_BLAKE2S.
|
||||
|
||||
config CRYPTO_LIB_BLAKE2S
|
||||
tristate "BLAKE2s hash function library"
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
|
||||
select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n
|
||||
help
|
||||
Enable the Blake2s library interface. This interface may be fulfilled
|
||||
by either the generic implementation or an arch-specific one, if one
|
||||
is available and enabled.
|
||||
|
||||
config CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
tristate
|
||||
help
|
||||
|
@ -51,7 +40,7 @@ config CRYPTO_LIB_CHACHA_GENERIC
|
|||
of CRYPTO_LIB_CHACHA.
|
||||
|
||||
config CRYPTO_LIB_CHACHA
|
||||
tristate "ChaCha library interface"
|
||||
tristate
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
|
||||
help
|
||||
|
@ -76,7 +65,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
|
|||
of CRYPTO_LIB_CURVE25519.
|
||||
|
||||
config CRYPTO_LIB_CURVE25519
|
||||
tristate "Curve25519 scalar multiplication library"
|
||||
tristate
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
|
||||
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
|
||||
help
|
||||
|
@ -111,7 +100,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
|
|||
of CRYPTO_LIB_POLY1305.
|
||||
|
||||
config CRYPTO_LIB_POLY1305
|
||||
tristate "Poly1305 library interface"
|
||||
tristate
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
|
||||
help
|
||||
|
@ -120,7 +109,7 @@ config CRYPTO_LIB_POLY1305
|
|||
is available and enabled.
|
||||
|
||||
config CRYPTO_LIB_CHACHA20POLY1305
|
||||
tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
|
||||
tristate
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||
select CRYPTO_LIB_CHACHA
|
||||
|
|
|
@ -10,11 +10,10 @@ libaes-y := aes.o
|
|||
obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
|
||||
libarc4-y := arc4.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o
|
||||
libblake2s-generic-y += blake2s-generic.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o
|
||||
libblake2s-y += blake2s.o
|
||||
# blake2s is used by the /dev/random driver which is always builtin
|
||||
obj-y += libblake2s.o
|
||||
libblake2s-y := blake2s.o
|
||||
libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
|
||||
libchacha20poly1305-y += chacha20poly1305.o
|
||||
|
|
|
@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state,
|
|||
state->t[1] += (state->t[0] < inc);
|
||||
}
|
||||
|
||||
void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
|
||||
void blake2s_compress(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc)
|
||||
__weak __alias(blake2s_compress_generic);
|
||||
|
||||
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
|
||||
size_t nblocks, const u32 inc)
|
||||
{
|
||||
u32 m[16];
|
||||
|
|
|
@ -16,12 +16,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
|
||||
# define blake2s_compress blake2s_compress_arch
|
||||
#else
|
||||
# define blake2s_compress blake2s_compress_generic
|
||||
#endif
|
||||
|
||||
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
|
||||
{
|
||||
__blake2s_update(state, in, inlen, blake2s_compress);
|
||||
|
|
Loading…
Reference in New Issue