Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 3.20: - Added 192/256-bit key support to aesni GCM. - Added MIPS OCTEON MD5 support. - Fixed hwrng starvation and race conditions. - Added note that memzero_explicit is not a subsitute for memset. - Added user-space interface for crypto_rng. - Misc fixes" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (71 commits) crypto: tcrypt - do not allocate iv on stack for aead speed tests crypto: testmgr - limit IV copy length in aead tests crypto: tcrypt - fix buflen reminder calculation crypto: testmgr - mark rfc4106(gcm(aes)) as fips_allowed crypto: caam - fix resource clean-up on error path for caam_jr_init crypto: caam - pair irq map and dispose in the same function crypto: ccp - terminate ccp_support array with empty element crypto: caam - remove unused local variable crypto: caam - remove dead code crypto: caam - don't emit ICV check failures to dmesg hwrng: virtio - drop extra empty line crypto: replace scatterwalk_sg_next with sg_next crypto: atmel - Free memory in error path crypto: doc - remove colons in comments crypto: seqiv - Ensure that IV size is at least 8 bytes crypto: cts - Weed out non-CBC algorithms MAINTAINERS: add linux-crypto to hw random crypto: cts - Remove bogus use of seqiv crypto: qat - don't need qat_auth_state struct crypto: algif_rng - fix sparse non static symbol warning ...
This commit is contained in:
commit
fee5429e02
|
@ -4434,6 +4434,7 @@ F: include/linux/hwmon*.h
|
||||||
HARDWARE RANDOM NUMBER GENERATOR CORE
|
HARDWARE RANDOM NUMBER GENERATOR CORE
|
||||||
M: Matt Mackall <mpm@selenic.com>
|
M: Matt Mackall <mpm@selenic.com>
|
||||||
M: Herbert Xu <herbert@gondor.apana.org.au>
|
M: Herbert Xu <herbert@gondor.apana.org.au>
|
||||||
|
L: linux-crypto@vger.kernel.org
|
||||||
S: Odd fixes
|
S: Odd fixes
|
||||||
F: Documentation/hw_random.txt
|
F: Documentation/hw_random.txt
|
||||||
F: drivers/char/hw_random/
|
F: drivers/char/hw_random/
|
||||||
|
|
|
@ -16,6 +16,7 @@ obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o
|
||||||
obj-y += dma-octeon.o
|
obj-y += dma-octeon.o
|
||||||
obj-y += octeon-memcpy.o
|
obj-y += octeon-memcpy.o
|
||||||
obj-y += executive/
|
obj-y += executive/
|
||||||
|
obj-y += crypto/
|
||||||
|
|
||||||
obj-$(CONFIG_MTD) += flash_setup.o
|
obj-$(CONFIG_MTD) += flash_setup.o
|
||||||
obj-$(CONFIG_SMP) += smp.o
|
obj-$(CONFIG_SMP) += smp.o
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
#
|
||||||
|
# OCTEON-specific crypto modules.
|
||||||
|
#
|
||||||
|
|
||||||
|
obj-y += octeon-crypto.o
|
||||||
|
|
||||||
|
obj-$(CONFIG_CRYPTO_MD5_OCTEON) += octeon-md5.o
|
|
@ -0,0 +1,66 @@
|
||||||
|
/*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2004-2012 Cavium Networks
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <asm/cop2.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
|
#include "octeon-crypto.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any
|
||||||
|
* crypto operations in calls to octeon_crypto_enable/disable in order to make
|
||||||
|
* sure the state of COP2 isn't corrupted if userspace is also performing
|
||||||
|
* hardware crypto operations. Allocate the state parameter on the stack.
|
||||||
|
* Preemption must be disabled to prevent context switches.
|
||||||
|
*
|
||||||
|
* @state: Pointer to state structure to store current COP2 state in.
|
||||||
|
*
|
||||||
|
* Returns: Flags to be passed to octeon_crypto_disable()
|
||||||
|
*/
|
||||||
|
unsigned long octeon_crypto_enable(struct octeon_cop2_state *state)
|
||||||
|
{
|
||||||
|
int status;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
status = read_c0_status();
|
||||||
|
write_c0_status(status | ST0_CU2);
|
||||||
|
if (KSTK_STATUS(current) & ST0_CU2) {
|
||||||
|
octeon_cop2_save(&(current->thread.cp2));
|
||||||
|
KSTK_STATUS(current) &= ~ST0_CU2;
|
||||||
|
status &= ~ST0_CU2;
|
||||||
|
} else if (status & ST0_CU2) {
|
||||||
|
octeon_cop2_save(state);
|
||||||
|
}
|
||||||
|
local_irq_restore(flags);
|
||||||
|
return status & ST0_CU2;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(octeon_crypto_enable);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disable access to Octeon's COP2 crypto hardware in the kernel. This must be
|
||||||
|
* called after an octeon_crypto_enable() before any context switch or return to
|
||||||
|
* userspace.
|
||||||
|
*
|
||||||
|
* @state: Pointer to COP2 state to restore
|
||||||
|
* @flags: Return value from octeon_crypto_enable()
|
||||||
|
*/
|
||||||
|
void octeon_crypto_disable(struct octeon_cop2_state *state,
|
||||||
|
unsigned long crypto_flags)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
if (crypto_flags & ST0_CU2)
|
||||||
|
octeon_cop2_restore(state);
|
||||||
|
else
|
||||||
|
write_c0_status(read_c0_status() & ~ST0_CU2);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(octeon_crypto_disable);
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2012-2013 Cavium Inc., All Rights Reserved.
|
||||||
|
*
|
||||||
|
* MD5 instruction definitions added by Aaro Koskinen <aaro.koskinen@iki.fi>.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef __LINUX_OCTEON_CRYPTO_H
|
||||||
|
#define __LINUX_OCTEON_CRYPTO_H
|
||||||
|
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <asm/mipsregs.h>
|
||||||
|
|
||||||
|
#define OCTEON_CR_OPCODE_PRIORITY 300
|
||||||
|
|
||||||
|
extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
|
||||||
|
extern void octeon_crypto_disable(struct octeon_cop2_state *state,
|
||||||
|
unsigned long flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Macros needed to implement MD5:
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The index can be 0-1.
|
||||||
|
*/
|
||||||
|
#define write_octeon_64bit_hash_dword(value, index) \
|
||||||
|
do { \
|
||||||
|
__asm__ __volatile__ ( \
|
||||||
|
"dmtc2 %[rt],0x0048+" STR(index) \
|
||||||
|
: \
|
||||||
|
: [rt] "d" (value)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The index can be 0-1.
|
||||||
|
*/
|
||||||
|
#define read_octeon_64bit_hash_dword(index) \
|
||||||
|
({ \
|
||||||
|
u64 __value; \
|
||||||
|
\
|
||||||
|
__asm__ __volatile__ ( \
|
||||||
|
"dmfc2 %[rt],0x0048+" STR(index) \
|
||||||
|
: [rt] "=d" (__value) \
|
||||||
|
: ); \
|
||||||
|
\
|
||||||
|
__value; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The index can be 0-6.
|
||||||
|
*/
|
||||||
|
#define write_octeon_64bit_block_dword(value, index) \
|
||||||
|
do { \
|
||||||
|
__asm__ __volatile__ ( \
|
||||||
|
"dmtc2 %[rt],0x0040+" STR(index) \
|
||||||
|
: \
|
||||||
|
: [rt] "d" (value)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The value is the final block dword (64-bit).
|
||||||
|
*/
|
||||||
|
#define octeon_md5_start(value) \
|
||||||
|
do { \
|
||||||
|
__asm__ __volatile__ ( \
|
||||||
|
"dmtc2 %[rt],0x4047" \
|
||||||
|
: \
|
||||||
|
: [rt] "d" (value)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#endif /* __LINUX_OCTEON_CRYPTO_H */
|
|
@ -0,0 +1,216 @@
|
||||||
|
/*
|
||||||
|
* Cryptographic API.
|
||||||
|
*
|
||||||
|
* MD5 Message Digest Algorithm (RFC1321).
|
||||||
|
*
|
||||||
|
* Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>.
|
||||||
|
*
|
||||||
|
* Based on crypto/md5.c, which is:
|
||||||
|
*
|
||||||
|
* Derived from cryptoapi implementation, originally based on the
|
||||||
|
* public domain implementation written by Colin Plumb in 1993.
|
||||||
|
*
|
||||||
|
* Copyright (c) Cryptoapi developers.
|
||||||
|
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the Free
|
||||||
|
* Software Foundation; either version 2 of the License, or (at your option)
|
||||||
|
* any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <crypto/md5.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <asm/byteorder.h>
|
||||||
|
#include <linux/cryptohash.h>
|
||||||
|
#include <asm/octeon/octeon.h>
|
||||||
|
#include <crypto/internal/hash.h>
|
||||||
|
|
||||||
|
#include "octeon-crypto.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We pass everything as 64-bit. OCTEON can handle misaligned data.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void octeon_md5_store_hash(struct md5_state *ctx)
|
||||||
|
{
|
||||||
|
u64 *hash = (u64 *)ctx->hash;
|
||||||
|
|
||||||
|
write_octeon_64bit_hash_dword(hash[0], 0);
|
||||||
|
write_octeon_64bit_hash_dword(hash[1], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void octeon_md5_read_hash(struct md5_state *ctx)
|
||||||
|
{
|
||||||
|
u64 *hash = (u64 *)ctx->hash;
|
||||||
|
|
||||||
|
hash[0] = read_octeon_64bit_hash_dword(0);
|
||||||
|
hash[1] = read_octeon_64bit_hash_dword(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void octeon_md5_transform(const void *_block)
|
||||||
|
{
|
||||||
|
const u64 *block = _block;
|
||||||
|
|
||||||
|
write_octeon_64bit_block_dword(block[0], 0);
|
||||||
|
write_octeon_64bit_block_dword(block[1], 1);
|
||||||
|
write_octeon_64bit_block_dword(block[2], 2);
|
||||||
|
write_octeon_64bit_block_dword(block[3], 3);
|
||||||
|
write_octeon_64bit_block_dword(block[4], 4);
|
||||||
|
write_octeon_64bit_block_dword(block[5], 5);
|
||||||
|
write_octeon_64bit_block_dword(block[6], 6);
|
||||||
|
octeon_md5_start(block[7]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int octeon_md5_init(struct shash_desc *desc)
|
||||||
|
{
|
||||||
|
struct md5_state *mctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
mctx->hash[0] = cpu_to_le32(0x67452301);
|
||||||
|
mctx->hash[1] = cpu_to_le32(0xefcdab89);
|
||||||
|
mctx->hash[2] = cpu_to_le32(0x98badcfe);
|
||||||
|
mctx->hash[3] = cpu_to_le32(0x10325476);
|
||||||
|
mctx->byte_count = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
|
||||||
|
unsigned int len)
|
||||||
|
{
|
||||||
|
struct md5_state *mctx = shash_desc_ctx(desc);
|
||||||
|
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
|
||||||
|
struct octeon_cop2_state state;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
mctx->byte_count += len;
|
||||||
|
|
||||||
|
if (avail > len) {
|
||||||
|
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
||||||
|
data, len);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
|
||||||
|
avail);
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
preempt_disable();
|
||||||
|
flags = octeon_crypto_enable(&state);
|
||||||
|
octeon_md5_store_hash(mctx);
|
||||||
|
|
||||||
|
octeon_md5_transform(mctx->block);
|
||||||
|
data += avail;
|
||||||
|
len -= avail;
|
||||||
|
|
||||||
|
while (len >= sizeof(mctx->block)) {
|
||||||
|
octeon_md5_transform(data);
|
||||||
|
data += sizeof(mctx->block);
|
||||||
|
len -= sizeof(mctx->block);
|
||||||
|
}
|
||||||
|
|
||||||
|
octeon_md5_read_hash(mctx);
|
||||||
|
octeon_crypto_disable(&state, flags);
|
||||||
|
preempt_enable();
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
|
memcpy(mctx->block, data, len);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int octeon_md5_final(struct shash_desc *desc, u8 *out)
|
||||||
|
{
|
||||||
|
struct md5_state *mctx = shash_desc_ctx(desc);
|
||||||
|
const unsigned int offset = mctx->byte_count & 0x3f;
|
||||||
|
char *p = (char *)mctx->block + offset;
|
||||||
|
int padding = 56 - (offset + 1);
|
||||||
|
struct octeon_cop2_state state;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
*p++ = 0x80;
|
||||||
|
|
||||||
|
local_bh_disable();
|
||||||
|
preempt_disable();
|
||||||
|
flags = octeon_crypto_enable(&state);
|
||||||
|
octeon_md5_store_hash(mctx);
|
||||||
|
|
||||||
|
if (padding < 0) {
|
||||||
|
memset(p, 0x00, padding + sizeof(u64));
|
||||||
|
octeon_md5_transform(mctx->block);
|
||||||
|
p = (char *)mctx->block;
|
||||||
|
padding = 56;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(p, 0, padding);
|
||||||
|
mctx->block[14] = cpu_to_le32(mctx->byte_count << 3);
|
||||||
|
mctx->block[15] = cpu_to_le32(mctx->byte_count >> 29);
|
||||||
|
octeon_md5_transform(mctx->block);
|
||||||
|
|
||||||
|
octeon_md5_read_hash(mctx);
|
||||||
|
octeon_crypto_disable(&state, flags);
|
||||||
|
preempt_enable();
|
||||||
|
local_bh_enable();
|
||||||
|
|
||||||
|
memcpy(out, mctx->hash, sizeof(mctx->hash));
|
||||||
|
memset(mctx, 0, sizeof(*mctx));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int octeon_md5_export(struct shash_desc *desc, void *out)
|
||||||
|
{
|
||||||
|
struct md5_state *ctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
memcpy(out, ctx, sizeof(*ctx));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int octeon_md5_import(struct shash_desc *desc, const void *in)
|
||||||
|
{
|
||||||
|
struct md5_state *ctx = shash_desc_ctx(desc);
|
||||||
|
|
||||||
|
memcpy(ctx, in, sizeof(*ctx));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct shash_alg alg = {
|
||||||
|
.digestsize = MD5_DIGEST_SIZE,
|
||||||
|
.init = octeon_md5_init,
|
||||||
|
.update = octeon_md5_update,
|
||||||
|
.final = octeon_md5_final,
|
||||||
|
.export = octeon_md5_export,
|
||||||
|
.import = octeon_md5_import,
|
||||||
|
.descsize = sizeof(struct md5_state),
|
||||||
|
.statesize = sizeof(struct md5_state),
|
||||||
|
.base = {
|
||||||
|
.cra_name = "md5",
|
||||||
|
.cra_driver_name= "octeon-md5",
|
||||||
|
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
|
||||||
|
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||||
|
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init md5_mod_init(void)
|
||||||
|
{
|
||||||
|
if (!octeon_has_crypto())
|
||||||
|
return -ENOTSUPP;
|
||||||
|
return crypto_register_shash(&alg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit md5_mod_fini(void)
|
||||||
|
{
|
||||||
|
crypto_unregister_shash(&alg);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(md5_mod_init);
|
||||||
|
module_exit(md5_mod_fini);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)");
|
||||||
|
MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
|
|
@ -27,6 +27,9 @@
|
||||||
|
|
||||||
#include <asm/octeon/octeon.h>
|
#include <asm/octeon/octeon.h>
|
||||||
|
|
||||||
|
enum octeon_feature_bits __octeon_feature_bits __read_mostly;
|
||||||
|
EXPORT_SYMBOL_GPL(__octeon_feature_bits);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read a byte of fuse data
|
* Read a byte of fuse data
|
||||||
* @byte_addr: address to read
|
* @byte_addr: address to read
|
||||||
|
@ -103,6 +106,9 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
|
||||||
else
|
else
|
||||||
suffix = "NSP";
|
suffix = "NSP";
|
||||||
|
|
||||||
|
if (!fus_dat2.s.nocrypto)
|
||||||
|
__octeon_feature_bits |= OCTEON_HAS_CRYPTO;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assume pass number is encoded using <5:3><2:0>. Exceptions
|
* Assume pass number is encoded using <5:3><2:0>. Exceptions
|
||||||
* will be fixed later.
|
* will be fixed later.
|
||||||
|
|
|
@ -46,8 +46,6 @@ enum octeon_feature {
|
||||||
OCTEON_FEATURE_SAAD,
|
OCTEON_FEATURE_SAAD,
|
||||||
/* Does this Octeon support the ZIP offload engine? */
|
/* Does this Octeon support the ZIP offload engine? */
|
||||||
OCTEON_FEATURE_ZIP,
|
OCTEON_FEATURE_ZIP,
|
||||||
/* Does this Octeon support crypto acceleration using COP2? */
|
|
||||||
OCTEON_FEATURE_CRYPTO,
|
|
||||||
OCTEON_FEATURE_DORM_CRYPTO,
|
OCTEON_FEATURE_DORM_CRYPTO,
|
||||||
/* Does this Octeon support PCI express? */
|
/* Does this Octeon support PCI express? */
|
||||||
OCTEON_FEATURE_PCIE,
|
OCTEON_FEATURE_PCIE,
|
||||||
|
@ -86,6 +84,21 @@ enum octeon_feature {
|
||||||
OCTEON_MAX_FEATURE
|
OCTEON_MAX_FEATURE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum octeon_feature_bits {
|
||||||
|
OCTEON_HAS_CRYPTO = 0x0001, /* Crypto acceleration using COP2 */
|
||||||
|
};
|
||||||
|
extern enum octeon_feature_bits __octeon_feature_bits;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* octeon_has_crypto() - Check if this OCTEON has crypto acceleration support.
|
||||||
|
*
|
||||||
|
* Returns: Non-zero if the feature exists. Zero if the feature does not exist.
|
||||||
|
*/
|
||||||
|
static inline int octeon_has_crypto(void)
|
||||||
|
{
|
||||||
|
return __octeon_feature_bits & OCTEON_HAS_CRYPTO;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determine if the current Octeon supports a specific feature. These
|
* Determine if the current Octeon supports a specific feature. These
|
||||||
* checks have been optimized to be fairly quick, but they should still
|
* checks have been optimized to be fairly quick, but they should still
|
||||||
|
|
|
@ -44,11 +44,6 @@ extern int octeon_get_boot_num_arguments(void);
|
||||||
extern const char *octeon_get_boot_argument(int arg);
|
extern const char *octeon_get_boot_argument(int arg);
|
||||||
extern void octeon_hal_setup_reserved32(void);
|
extern void octeon_hal_setup_reserved32(void);
|
||||||
extern void octeon_user_io_init(void);
|
extern void octeon_user_io_init(void);
|
||||||
struct octeon_cop2_state;
|
|
||||||
extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
|
|
||||||
extern void octeon_crypto_disable(struct octeon_cop2_state *state,
|
|
||||||
unsigned long flags);
|
|
||||||
extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
|
|
||||||
|
|
||||||
extern void octeon_init_cvmcount(void);
|
extern void octeon_init_cvmcount(void);
|
||||||
extern void octeon_setup_delays(void);
|
extern void octeon_setup_delays(void);
|
||||||
|
|
|
@ -497,7 +497,7 @@ module_init(aes_sparc64_mod_init);
|
||||||
module_exit(aes_sparc64_mod_fini);
|
module_exit(aes_sparc64_mod_fini);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, sparc64 aes opcode accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("aes");
|
MODULE_ALIAS_CRYPTO("aes");
|
||||||
|
|
||||||
|
|
|
@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
|
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("aes");
|
MODULE_ALIAS_CRYPTO("camellia");
|
||||||
|
|
||||||
#include "crop_devid.c"
|
#include "crop_devid.c"
|
||||||
|
|
|
@ -533,5 +533,6 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
|
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("des");
|
MODULE_ALIAS_CRYPTO("des");
|
||||||
|
MODULE_ALIAS_CRYPTO("des3_ede");
|
||||||
|
|
||||||
#include "crop_devid.c"
|
#include "crop_devid.c"
|
||||||
|
|
|
@ -183,7 +183,7 @@ module_init(md5_sparc64_mod_init);
|
||||||
module_exit(md5_sparc64_mod_fini);
|
module_exit(md5_sparc64_mod_fini);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
|
MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated");
|
||||||
|
|
||||||
MODULE_ALIAS_CRYPTO("md5");
|
MODULE_ALIAS_CRYPTO("md5");
|
||||||
|
|
||||||
|
|
|
@ -32,12 +32,23 @@
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/inst.h>
|
#include <asm/inst.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following macros are used to move an (un)aligned 16 byte value to/from
|
||||||
|
* an XMM register. This can done for either FP or integer values, for FP use
|
||||||
|
* movaps (move aligned packed single) or integer use movdqa (move double quad
|
||||||
|
* aligned). It doesn't make a performance difference which instruction is used
|
||||||
|
* since Nehalem (original Core i7) was released. However, the movaps is a byte
|
||||||
|
* shorter, so that is the one we'll use for now. (same for unaligned).
|
||||||
|
*/
|
||||||
|
#define MOVADQ movaps
|
||||||
|
#define MOVUDQ movups
|
||||||
|
|
||||||
#ifdef __x86_64__
|
#ifdef __x86_64__
|
||||||
|
|
||||||
.data
|
.data
|
||||||
.align 16
|
.align 16
|
||||||
.Lgf128mul_x_ble_mask:
|
.Lgf128mul_x_ble_mask:
|
||||||
.octa 0x00000000000000010000000000000087
|
.octa 0x00000000000000010000000000000087
|
||||||
|
|
||||||
POLY: .octa 0xC2000000000000000000000000000001
|
POLY: .octa 0xC2000000000000000000000000000001
|
||||||
TWOONE: .octa 0x00000001000000000000000000000001
|
TWOONE: .octa 0x00000001000000000000000000000001
|
||||||
|
|
||||||
|
@ -89,6 +100,7 @@ enc: .octa 0x2
|
||||||
#define arg8 STACK_OFFSET+16(%r14)
|
#define arg8 STACK_OFFSET+16(%r14)
|
||||||
#define arg9 STACK_OFFSET+24(%r14)
|
#define arg9 STACK_OFFSET+24(%r14)
|
||||||
#define arg10 STACK_OFFSET+32(%r14)
|
#define arg10 STACK_OFFSET+32(%r14)
|
||||||
|
#define keysize 2*15*16(%arg1)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -213,10 +225,12 @@ enc: .octa 0x2
|
||||||
|
|
||||||
.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
|
.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
|
||||||
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
|
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
|
||||||
|
MOVADQ SHUF_MASK(%rip), %xmm14
|
||||||
mov arg7, %r10 # %r10 = AAD
|
mov arg7, %r10 # %r10 = AAD
|
||||||
mov arg8, %r12 # %r12 = aadLen
|
mov arg8, %r12 # %r12 = aadLen
|
||||||
mov %r12, %r11
|
mov %r12, %r11
|
||||||
pxor %xmm\i, %xmm\i
|
pxor %xmm\i, %xmm\i
|
||||||
|
|
||||||
_get_AAD_loop\num_initial_blocks\operation:
|
_get_AAD_loop\num_initial_blocks\operation:
|
||||||
movd (%r10), \TMP1
|
movd (%r10), \TMP1
|
||||||
pslldq $12, \TMP1
|
pslldq $12, \TMP1
|
||||||
|
@ -225,16 +239,18 @@ _get_AAD_loop\num_initial_blocks\operation:
|
||||||
add $4, %r10
|
add $4, %r10
|
||||||
sub $4, %r12
|
sub $4, %r12
|
||||||
jne _get_AAD_loop\num_initial_blocks\operation
|
jne _get_AAD_loop\num_initial_blocks\operation
|
||||||
|
|
||||||
cmp $16, %r11
|
cmp $16, %r11
|
||||||
je _get_AAD_loop2_done\num_initial_blocks\operation
|
je _get_AAD_loop2_done\num_initial_blocks\operation
|
||||||
|
|
||||||
mov $16, %r12
|
mov $16, %r12
|
||||||
_get_AAD_loop2\num_initial_blocks\operation:
|
_get_AAD_loop2\num_initial_blocks\operation:
|
||||||
psrldq $4, %xmm\i
|
psrldq $4, %xmm\i
|
||||||
sub $4, %r12
|
sub $4, %r12
|
||||||
cmp %r11, %r12
|
cmp %r11, %r12
|
||||||
jne _get_AAD_loop2\num_initial_blocks\operation
|
jne _get_AAD_loop2\num_initial_blocks\operation
|
||||||
|
|
||||||
_get_AAD_loop2_done\num_initial_blocks\operation:
|
_get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
|
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
|
||||||
|
|
||||||
xor %r11, %r11 # initialise the data pointer offset as zero
|
xor %r11, %r11 # initialise the data pointer offset as zero
|
||||||
|
@ -243,59 +259,34 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
|
|
||||||
mov %arg5, %rax # %rax = *Y0
|
mov %arg5, %rax # %rax = *Y0
|
||||||
movdqu (%rax), \XMM0 # XMM0 = Y0
|
movdqu (%rax), \XMM0 # XMM0 = Y0
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM0
|
PSHUFB_XMM %xmm14, \XMM0
|
||||||
|
|
||||||
.if (\i == 5) || (\i == 6) || (\i == 7)
|
.if (\i == 5) || (\i == 6) || (\i == 7)
|
||||||
|
MOVADQ ONE(%RIP),\TMP1
|
||||||
|
MOVADQ (%arg1),\TMP2
|
||||||
.irpc index, \i_seq
|
.irpc index, \i_seq
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, %xmm\index
|
movdqa \XMM0, %xmm\index
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
|
||||||
|
pxor \TMP2, %xmm\index
|
||||||
|
.endr
|
||||||
|
lea 0x10(%arg1),%r10
|
||||||
|
mov keysize,%eax
|
||||||
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
|
add $5,%eax # 128->9, 192->11, 256->13
|
||||||
|
|
||||||
|
aes_loop_initial_dec\num_initial_blocks:
|
||||||
|
MOVADQ (%r10),\TMP1
|
||||||
|
.irpc index, \i_seq
|
||||||
|
AESENC \TMP1, %xmm\index
|
||||||
.endr
|
.endr
|
||||||
|
add $16,%r10
|
||||||
|
sub $1,%eax
|
||||||
|
jnz aes_loop_initial_dec\num_initial_blocks
|
||||||
|
|
||||||
|
MOVADQ (%r10), \TMP1
|
||||||
.irpc index, \i_seq
|
.irpc index, \i_seq
|
||||||
pxor 16*0(%arg1), %xmm\index
|
AESENCLAST \TMP1, %xmm\index # Last Round
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x10(%rdi), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 1
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x20(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x30(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x40(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x50(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x60(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x70(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x80(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x90(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0xa0(%arg1), \TMP1
|
|
||||||
AESENCLAST \TMP1, %xmm\index # Round 10
|
|
||||||
.endr
|
.endr
|
||||||
.irpc index, \i_seq
|
.irpc index, \i_seq
|
||||||
movdqu (%arg3 , %r11, 1), \TMP1
|
movdqu (%arg3 , %r11, 1), \TMP1
|
||||||
|
@ -305,10 +296,8 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
add $16, %r11
|
add $16, %r11
|
||||||
|
|
||||||
movdqa \TMP1, %xmm\index
|
movdqa \TMP1, %xmm\index
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, %xmm\index
|
PSHUFB_XMM %xmm14, %xmm\index
|
||||||
|
# prepare plaintext/ciphertext for GHASH computation
|
||||||
# prepare plaintext/ciphertext for GHASH computation
|
|
||||||
.endr
|
.endr
|
||||||
.endif
|
.endif
|
||||||
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
|
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
|
||||||
|
@ -338,30 +327,28 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
* Precomputations for HashKey parallel with encryption of first 4 blocks.
|
* Precomputations for HashKey parallel with encryption of first 4 blocks.
|
||||||
* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
|
* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
|
||||||
*/
|
*/
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
MOVADQ ONE(%rip), \TMP1
|
||||||
movdqa \XMM0, \XMM1
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
MOVADQ \XMM0, \XMM1
|
||||||
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
||||||
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, \XMM2
|
MOVADQ \XMM0, \XMM2
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
||||||
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, \XMM3
|
MOVADQ \XMM0, \XMM3
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
||||||
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, \XMM4
|
MOVADQ \XMM0, \XMM4
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
||||||
|
|
||||||
pxor 16*0(%arg1), \XMM1
|
MOVADQ 0(%arg1),\TMP1
|
||||||
pxor 16*0(%arg1), \XMM2
|
pxor \TMP1, \XMM1
|
||||||
pxor 16*0(%arg1), \XMM3
|
pxor \TMP1, \XMM2
|
||||||
pxor 16*0(%arg1), \XMM4
|
pxor \TMP1, \XMM3
|
||||||
|
pxor \TMP1, \XMM4
|
||||||
movdqa \TMP3, \TMP5
|
movdqa \TMP3, \TMP5
|
||||||
pshufd $78, \TMP3, \TMP1
|
pshufd $78, \TMP3, \TMP1
|
||||||
pxor \TMP3, \TMP1
|
pxor \TMP3, \TMP1
|
||||||
|
@ -399,7 +386,23 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
pshufd $78, \TMP5, \TMP1
|
pshufd $78, \TMP5, \TMP1
|
||||||
pxor \TMP5, \TMP1
|
pxor \TMP5, \TMP1
|
||||||
movdqa \TMP1, HashKey_4_k(%rsp)
|
movdqa \TMP1, HashKey_4_k(%rsp)
|
||||||
movaps 0xa0(%arg1), \TMP2
|
lea 0xa0(%arg1),%r10
|
||||||
|
mov keysize,%eax
|
||||||
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
|
sub $4,%eax # 128->0, 192->2, 256->4
|
||||||
|
jz aes_loop_pre_dec_done\num_initial_blocks
|
||||||
|
|
||||||
|
aes_loop_pre_dec\num_initial_blocks:
|
||||||
|
MOVADQ (%r10),\TMP2
|
||||||
|
.irpc index, 1234
|
||||||
|
AESENC \TMP2, %xmm\index
|
||||||
|
.endr
|
||||||
|
add $16,%r10
|
||||||
|
sub $1,%eax
|
||||||
|
jnz aes_loop_pre_dec\num_initial_blocks
|
||||||
|
|
||||||
|
aes_loop_pre_dec_done\num_initial_blocks:
|
||||||
|
MOVADQ (%r10), \TMP2
|
||||||
AESENCLAST \TMP2, \XMM1
|
AESENCLAST \TMP2, \XMM1
|
||||||
AESENCLAST \TMP2, \XMM2
|
AESENCLAST \TMP2, \XMM2
|
||||||
AESENCLAST \TMP2, \XMM3
|
AESENCLAST \TMP2, \XMM3
|
||||||
|
@ -421,15 +424,11 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
|
movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
|
||||||
movdqa \TMP1, \XMM4
|
movdqa \TMP1, \XMM4
|
||||||
add $64, %r11
|
add $64, %r11
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
||||||
pxor \XMMDst, \XMM1
|
pxor \XMMDst, \XMM1
|
||||||
# combine GHASHed value with the corresponding ciphertext
|
# combine GHASHed value with the corresponding ciphertext
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
||||||
|
|
||||||
_initial_blocks_done\num_initial_blocks\operation:
|
_initial_blocks_done\num_initial_blocks\operation:
|
||||||
|
@ -451,6 +450,7 @@ _initial_blocks_done\num_initial_blocks\operation:
|
||||||
|
|
||||||
.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
|
.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
|
||||||
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
|
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
|
||||||
|
MOVADQ SHUF_MASK(%rip), %xmm14
|
||||||
mov arg7, %r10 # %r10 = AAD
|
mov arg7, %r10 # %r10 = AAD
|
||||||
mov arg8, %r12 # %r12 = aadLen
|
mov arg8, %r12 # %r12 = aadLen
|
||||||
mov %r12, %r11
|
mov %r12, %r11
|
||||||
|
@ -472,7 +472,6 @@ _get_AAD_loop2\num_initial_blocks\operation:
|
||||||
cmp %r11, %r12
|
cmp %r11, %r12
|
||||||
jne _get_AAD_loop2\num_initial_blocks\operation
|
jne _get_AAD_loop2\num_initial_blocks\operation
|
||||||
_get_AAD_loop2_done\num_initial_blocks\operation:
|
_get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
|
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
|
||||||
|
|
||||||
xor %r11, %r11 # initialise the data pointer offset as zero
|
xor %r11, %r11 # initialise the data pointer offset as zero
|
||||||
|
@ -481,59 +480,35 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
|
|
||||||
mov %arg5, %rax # %rax = *Y0
|
mov %arg5, %rax # %rax = *Y0
|
||||||
movdqu (%rax), \XMM0 # XMM0 = Y0
|
movdqu (%rax), \XMM0 # XMM0 = Y0
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM0
|
PSHUFB_XMM %xmm14, \XMM0
|
||||||
|
|
||||||
.if (\i == 5) || (\i == 6) || (\i == 7)
|
.if (\i == 5) || (\i == 6) || (\i == 7)
|
||||||
.irpc index, \i_seq
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
|
||||||
movdqa \XMM0, %xmm\index
|
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
|
|
||||||
|
|
||||||
.endr
|
MOVADQ ONE(%RIP),\TMP1
|
||||||
|
MOVADQ 0(%arg1),\TMP2
|
||||||
.irpc index, \i_seq
|
.irpc index, \i_seq
|
||||||
pxor 16*0(%arg1), %xmm\index
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
|
MOVADQ \XMM0, %xmm\index
|
||||||
|
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
|
||||||
|
pxor \TMP2, %xmm\index
|
||||||
.endr
|
.endr
|
||||||
.irpc index, \i_seq
|
lea 0x10(%arg1),%r10
|
||||||
movaps 0x10(%rdi), \TMP1
|
mov keysize,%eax
|
||||||
AESENC \TMP1, %xmm\index # Round 1
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
|
add $5,%eax # 128->9, 192->11, 256->13
|
||||||
|
|
||||||
|
aes_loop_initial_enc\num_initial_blocks:
|
||||||
|
MOVADQ (%r10),\TMP1
|
||||||
|
.irpc index, \i_seq
|
||||||
|
AESENC \TMP1, %xmm\index
|
||||||
.endr
|
.endr
|
||||||
|
add $16,%r10
|
||||||
|
sub $1,%eax
|
||||||
|
jnz aes_loop_initial_enc\num_initial_blocks
|
||||||
|
|
||||||
|
MOVADQ (%r10), \TMP1
|
||||||
.irpc index, \i_seq
|
.irpc index, \i_seq
|
||||||
movaps 0x20(%arg1), \TMP1
|
AESENCLAST \TMP1, %xmm\index # Last Round
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x30(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x40(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x50(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x60(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x70(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x80(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0x90(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, %xmm\index # Round 2
|
|
||||||
.endr
|
|
||||||
.irpc index, \i_seq
|
|
||||||
movaps 0xa0(%arg1), \TMP1
|
|
||||||
AESENCLAST \TMP1, %xmm\index # Round 10
|
|
||||||
.endr
|
.endr
|
||||||
.irpc index, \i_seq
|
.irpc index, \i_seq
|
||||||
movdqu (%arg3 , %r11, 1), \TMP1
|
movdqu (%arg3 , %r11, 1), \TMP1
|
||||||
|
@ -541,8 +516,6 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
movdqu %xmm\index, (%arg2 , %r11, 1)
|
movdqu %xmm\index, (%arg2 , %r11, 1)
|
||||||
# write back plaintext/ciphertext for num_initial_blocks
|
# write back plaintext/ciphertext for num_initial_blocks
|
||||||
add $16, %r11
|
add $16, %r11
|
||||||
|
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, %xmm\index
|
PSHUFB_XMM %xmm14, %xmm\index
|
||||||
|
|
||||||
# prepare plaintext/ciphertext for GHASH computation
|
# prepare plaintext/ciphertext for GHASH computation
|
||||||
|
@ -575,30 +548,28 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
* Precomputations for HashKey parallel with encryption of first 4 blocks.
|
* Precomputations for HashKey parallel with encryption of first 4 blocks.
|
||||||
* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
|
* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
|
||||||
*/
|
*/
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
MOVADQ ONE(%RIP),\TMP1
|
||||||
movdqa \XMM0, \XMM1
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
MOVADQ \XMM0, \XMM1
|
||||||
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
||||||
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, \XMM2
|
MOVADQ \XMM0, \XMM2
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
||||||
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, \XMM3
|
MOVADQ \XMM0, \XMM3
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
||||||
|
|
||||||
paddd ONE(%rip), \XMM0 # INCR Y0
|
paddd \TMP1, \XMM0 # INCR Y0
|
||||||
movdqa \XMM0, \XMM4
|
MOVADQ \XMM0, \XMM4
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
||||||
|
|
||||||
pxor 16*0(%arg1), \XMM1
|
MOVADQ 0(%arg1),\TMP1
|
||||||
pxor 16*0(%arg1), \XMM2
|
pxor \TMP1, \XMM1
|
||||||
pxor 16*0(%arg1), \XMM3
|
pxor \TMP1, \XMM2
|
||||||
pxor 16*0(%arg1), \XMM4
|
pxor \TMP1, \XMM3
|
||||||
|
pxor \TMP1, \XMM4
|
||||||
movdqa \TMP3, \TMP5
|
movdqa \TMP3, \TMP5
|
||||||
pshufd $78, \TMP3, \TMP1
|
pshufd $78, \TMP3, \TMP1
|
||||||
pxor \TMP3, \TMP1
|
pxor \TMP3, \TMP1
|
||||||
|
@ -636,7 +607,23 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
pshufd $78, \TMP5, \TMP1
|
pshufd $78, \TMP5, \TMP1
|
||||||
pxor \TMP5, \TMP1
|
pxor \TMP5, \TMP1
|
||||||
movdqa \TMP1, HashKey_4_k(%rsp)
|
movdqa \TMP1, HashKey_4_k(%rsp)
|
||||||
movaps 0xa0(%arg1), \TMP2
|
lea 0xa0(%arg1),%r10
|
||||||
|
mov keysize,%eax
|
||||||
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
|
sub $4,%eax # 128->0, 192->2, 256->4
|
||||||
|
jz aes_loop_pre_enc_done\num_initial_blocks
|
||||||
|
|
||||||
|
aes_loop_pre_enc\num_initial_blocks:
|
||||||
|
MOVADQ (%r10),\TMP2
|
||||||
|
.irpc index, 1234
|
||||||
|
AESENC \TMP2, %xmm\index
|
||||||
|
.endr
|
||||||
|
add $16,%r10
|
||||||
|
sub $1,%eax
|
||||||
|
jnz aes_loop_pre_enc\num_initial_blocks
|
||||||
|
|
||||||
|
aes_loop_pre_enc_done\num_initial_blocks:
|
||||||
|
MOVADQ (%r10), \TMP2
|
||||||
AESENCLAST \TMP2, \XMM1
|
AESENCLAST \TMP2, \XMM1
|
||||||
AESENCLAST \TMP2, \XMM2
|
AESENCLAST \TMP2, \XMM2
|
||||||
AESENCLAST \TMP2, \XMM3
|
AESENCLAST \TMP2, \XMM3
|
||||||
|
@ -655,15 +642,11 @@ _get_AAD_loop2_done\num_initial_blocks\operation:
|
||||||
movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
|
movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
|
||||||
|
|
||||||
add $64, %r11
|
add $64, %r11
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
|
||||||
pxor \XMMDst, \XMM1
|
pxor \XMMDst, \XMM1
|
||||||
# combine GHASHed value with the corresponding ciphertext
|
# combine GHASHed value with the corresponding ciphertext
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
|
||||||
movdqa SHUF_MASK(%rip), %xmm14
|
|
||||||
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
|
||||||
|
|
||||||
_initial_blocks_done\num_initial_blocks\operation:
|
_initial_blocks_done\num_initial_blocks\operation:
|
||||||
|
@ -794,7 +777,23 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
||||||
AESENC \TMP3, \XMM3
|
AESENC \TMP3, \XMM3
|
||||||
AESENC \TMP3, \XMM4
|
AESENC \TMP3, \XMM4
|
||||||
PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
|
PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
|
||||||
movaps 0xa0(%arg1), \TMP3
|
lea 0xa0(%arg1),%r10
|
||||||
|
mov keysize,%eax
|
||||||
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
|
sub $4,%eax # 128->0, 192->2, 256->4
|
||||||
|
jz aes_loop_par_enc_done
|
||||||
|
|
||||||
|
aes_loop_par_enc:
|
||||||
|
MOVADQ (%r10),\TMP3
|
||||||
|
.irpc index, 1234
|
||||||
|
AESENC \TMP3, %xmm\index
|
||||||
|
.endr
|
||||||
|
add $16,%r10
|
||||||
|
sub $1,%eax
|
||||||
|
jnz aes_loop_par_enc
|
||||||
|
|
||||||
|
aes_loop_par_enc_done:
|
||||||
|
MOVADQ (%r10), \TMP3
|
||||||
AESENCLAST \TMP3, \XMM1 # Round 10
|
AESENCLAST \TMP3, \XMM1 # Round 10
|
||||||
AESENCLAST \TMP3, \XMM2
|
AESENCLAST \TMP3, \XMM2
|
||||||
AESENCLAST \TMP3, \XMM3
|
AESENCLAST \TMP3, \XMM3
|
||||||
|
@ -986,8 +985,24 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
||||||
AESENC \TMP3, \XMM3
|
AESENC \TMP3, \XMM3
|
||||||
AESENC \TMP3, \XMM4
|
AESENC \TMP3, \XMM4
|
||||||
PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
|
PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
|
||||||
movaps 0xa0(%arg1), \TMP3
|
lea 0xa0(%arg1),%r10
|
||||||
AESENCLAST \TMP3, \XMM1 # Round 10
|
mov keysize,%eax
|
||||||
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
|
sub $4,%eax # 128->0, 192->2, 256->4
|
||||||
|
jz aes_loop_par_dec_done
|
||||||
|
|
||||||
|
aes_loop_par_dec:
|
||||||
|
MOVADQ (%r10),\TMP3
|
||||||
|
.irpc index, 1234
|
||||||
|
AESENC \TMP3, %xmm\index
|
||||||
|
.endr
|
||||||
|
add $16,%r10
|
||||||
|
sub $1,%eax
|
||||||
|
jnz aes_loop_par_dec
|
||||||
|
|
||||||
|
aes_loop_par_dec_done:
|
||||||
|
MOVADQ (%r10), \TMP3
|
||||||
|
AESENCLAST \TMP3, \XMM1 # last round
|
||||||
AESENCLAST \TMP3, \XMM2
|
AESENCLAST \TMP3, \XMM2
|
||||||
AESENCLAST \TMP3, \XMM3
|
AESENCLAST \TMP3, \XMM3
|
||||||
AESENCLAST \TMP3, \XMM4
|
AESENCLAST \TMP3, \XMM4
|
||||||
|
@ -1155,33 +1170,29 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
|
||||||
pxor \TMP6, \XMMDst # reduced result is in XMMDst
|
pxor \TMP6, \XMMDst # reduced result is in XMMDst
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* Encryption of a single block done*/
|
|
||||||
|
/* Encryption of a single block
|
||||||
|
* uses eax & r10
|
||||||
|
*/
|
||||||
|
|
||||||
.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
|
.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
|
||||||
|
|
||||||
pxor (%arg1), \XMM0
|
pxor (%arg1), \XMM0
|
||||||
movaps 16(%arg1), \TMP1
|
mov keysize,%eax
|
||||||
AESENC \TMP1, \XMM0
|
shr $2,%eax # 128->4, 192->6, 256->8
|
||||||
movaps 32(%arg1), \TMP1
|
add $5,%eax # 128->9, 192->11, 256->13
|
||||||
AESENC \TMP1, \XMM0
|
lea 16(%arg1), %r10 # get first expanded key address
|
||||||
movaps 48(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, \XMM0
|
_esb_loop_\@:
|
||||||
movaps 64(%arg1), \TMP1
|
MOVADQ (%r10),\TMP1
|
||||||
AESENC \TMP1, \XMM0
|
AESENC \TMP1,\XMM0
|
||||||
movaps 80(%arg1), \TMP1
|
add $16,%r10
|
||||||
AESENC \TMP1, \XMM0
|
sub $1,%eax
|
||||||
movaps 96(%arg1), \TMP1
|
jnz _esb_loop_\@
|
||||||
AESENC \TMP1, \XMM0
|
|
||||||
movaps 112(%arg1), \TMP1
|
MOVADQ (%r10),\TMP1
|
||||||
AESENC \TMP1, \XMM0
|
AESENCLAST \TMP1,\XMM0
|
||||||
movaps 128(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, \XMM0
|
|
||||||
movaps 144(%arg1), \TMP1
|
|
||||||
AESENC \TMP1, \XMM0
|
|
||||||
movaps 160(%arg1), \TMP1
|
|
||||||
AESENCLAST \TMP1, \XMM0
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
|
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
|
||||||
* u8 *out, // Plaintext output. Encrypt in-place is allowed.
|
* u8 *out, // Plaintext output. Encrypt in-place is allowed.
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#include <asm/crypto/glue_helper.h>
|
#include <asm/crypto/glue_helper.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* This data is stored at the end of the crypto_tfm struct.
|
/* This data is stored at the end of the crypto_tfm struct.
|
||||||
* It's a type of per "session" data storage location.
|
* It's a type of per "session" data storage location.
|
||||||
* This needs to be 16 byte aligned.
|
* This needs to be 16 byte aligned.
|
||||||
|
@ -182,7 +183,8 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out,
|
||||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||||
u8 *auth_tag, unsigned long auth_tag_len)
|
u8 *auth_tag, unsigned long auth_tag_len)
|
||||||
{
|
{
|
||||||
if (plaintext_len < AVX_GEN2_OPTSIZE) {
|
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||||
|
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
|
||||||
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
|
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
|
||||||
aad_len, auth_tag, auth_tag_len);
|
aad_len, auth_tag, auth_tag_len);
|
||||||
} else {
|
} else {
|
||||||
|
@ -197,7 +199,8 @@ static void aesni_gcm_dec_avx(void *ctx, u8 *out,
|
||||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||||
u8 *auth_tag, unsigned long auth_tag_len)
|
u8 *auth_tag, unsigned long auth_tag_len)
|
||||||
{
|
{
|
||||||
if (ciphertext_len < AVX_GEN2_OPTSIZE) {
|
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||||
|
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
|
||||||
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
|
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
|
||||||
aad_len, auth_tag, auth_tag_len);
|
aad_len, auth_tag, auth_tag_len);
|
||||||
} else {
|
} else {
|
||||||
|
@ -231,7 +234,8 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
|
||||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||||
u8 *auth_tag, unsigned long auth_tag_len)
|
u8 *auth_tag, unsigned long auth_tag_len)
|
||||||
{
|
{
|
||||||
if (plaintext_len < AVX_GEN2_OPTSIZE) {
|
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||||
|
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
|
||||||
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
|
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
|
||||||
aad_len, auth_tag, auth_tag_len);
|
aad_len, auth_tag, auth_tag_len);
|
||||||
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
|
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
|
||||||
|
@ -250,7 +254,8 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
|
||||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||||
u8 *auth_tag, unsigned long auth_tag_len)
|
u8 *auth_tag, unsigned long auth_tag_len)
|
||||||
{
|
{
|
||||||
if (ciphertext_len < AVX_GEN2_OPTSIZE) {
|
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||||
|
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
|
||||||
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
|
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
|
||||||
aad, aad_len, auth_tag, auth_tag_len);
|
aad, aad_len, auth_tag, auth_tag_len);
|
||||||
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
|
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
|
||||||
|
@ -511,7 +516,7 @@ static int ctr_crypt(struct blkcipher_desc *desc,
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||||
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
||||||
nbytes & AES_BLOCK_MASK, walk.iv);
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
||||||
nbytes &= AES_BLOCK_SIZE - 1;
|
nbytes &= AES_BLOCK_SIZE - 1;
|
||||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||||
}
|
}
|
||||||
|
@ -902,7 +907,8 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
|
||||||
}
|
}
|
||||||
/*Account for 4 byte nonce at the end.*/
|
/*Account for 4 byte nonce at the end.*/
|
||||||
key_len -= 4;
|
key_len -= 4;
|
||||||
if (key_len != AES_KEYSIZE_128) {
|
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
|
||||||
|
key_len != AES_KEYSIZE_256) {
|
||||||
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -1013,6 +1019,7 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
|
||||||
__be32 counter = cpu_to_be32(1);
|
__be32 counter = cpu_to_be32(1);
|
||||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
||||||
|
u32 key_len = ctx->aes_key_expanded.key_length;
|
||||||
void *aes_ctx = &(ctx->aes_key_expanded);
|
void *aes_ctx = &(ctx->aes_key_expanded);
|
||||||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||||
u8 iv_tab[16+AESNI_ALIGN];
|
u8 iv_tab[16+AESNI_ALIGN];
|
||||||
|
@ -1027,6 +1034,13 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
|
||||||
/* to 8 or 12 bytes */
|
/* to 8 or 12 bytes */
|
||||||
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
|
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
|
||||||
|
return -EINVAL;
|
||||||
|
if (unlikely(key_len != AES_KEYSIZE_128 &&
|
||||||
|
key_len != AES_KEYSIZE_192 &&
|
||||||
|
key_len != AES_KEYSIZE_256))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* IV below built */
|
/* IV below built */
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
*(iv+i) = ctx->nonce[i];
|
*(iv+i) = ctx->nonce[i];
|
||||||
|
@ -1091,6 +1105,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||||
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
|
||||||
|
u32 key_len = ctx->aes_key_expanded.key_length;
|
||||||
void *aes_ctx = &(ctx->aes_key_expanded);
|
void *aes_ctx = &(ctx->aes_key_expanded);
|
||||||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||||
u8 iv_and_authTag[32+AESNI_ALIGN];
|
u8 iv_and_authTag[32+AESNI_ALIGN];
|
||||||
|
@ -1104,6 +1119,13 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
|
||||||
if (unlikely((req->cryptlen < auth_tag_len) ||
|
if (unlikely((req->cryptlen < auth_tag_len) ||
|
||||||
(req->assoclen != 8 && req->assoclen != 12)))
|
(req->assoclen != 8 && req->assoclen != 12)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
|
||||||
|
return -EINVAL;
|
||||||
|
if (unlikely(key_len != AES_KEYSIZE_128 &&
|
||||||
|
key_len != AES_KEYSIZE_192 &&
|
||||||
|
key_len != AES_KEYSIZE_256))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* Assuming we are supporting rfc4106 64-bit extended */
|
/* Assuming we are supporting rfc4106 64-bit extended */
|
||||||
/* sequence numbers We need to have the AAD length */
|
/* sequence numbers We need to have the AAD length */
|
||||||
/* equal to 8 or 12 bytes */
|
/* equal to 8 or 12 bytes */
|
||||||
|
|
|
@ -504,6 +504,4 @@ MODULE_LICENSE("GPL");
|
||||||
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
|
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
|
||||||
MODULE_ALIAS_CRYPTO("des3_ede");
|
MODULE_ALIAS_CRYPTO("des3_ede");
|
||||||
MODULE_ALIAS_CRYPTO("des3_ede-asm");
|
MODULE_ALIAS_CRYPTO("des3_ede-asm");
|
||||||
MODULE_ALIAS_CRYPTO("des");
|
|
||||||
MODULE_ALIAS_CRYPTO("des-asm");
|
|
||||||
MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
|
MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
|
||||||
|
|
|
@ -427,6 +427,15 @@ config CRYPTO_MD5
|
||||||
help
|
help
|
||||||
MD5 message digest algorithm (RFC1321).
|
MD5 message digest algorithm (RFC1321).
|
||||||
|
|
||||||
|
config CRYPTO_MD5_OCTEON
|
||||||
|
tristate "MD5 digest algorithm (OCTEON)"
|
||||||
|
depends on CPU_CAVIUM_OCTEON
|
||||||
|
select CRYPTO_MD5
|
||||||
|
select CRYPTO_HASH
|
||||||
|
help
|
||||||
|
MD5 message digest algorithm (RFC1321) implemented
|
||||||
|
using OCTEON crypto instructions, when available.
|
||||||
|
|
||||||
config CRYPTO_MD5_SPARC64
|
config CRYPTO_MD5_SPARC64
|
||||||
tristate "MD5 digest algorithm (SPARC64)"
|
tristate "MD5 digest algorithm (SPARC64)"
|
||||||
depends on SPARC64
|
depends on SPARC64
|
||||||
|
@ -1505,6 +1514,15 @@ config CRYPTO_USER_API_SKCIPHER
|
||||||
This option enables the user-spaces interface for symmetric
|
This option enables the user-spaces interface for symmetric
|
||||||
key cipher algorithms.
|
key cipher algorithms.
|
||||||
|
|
||||||
|
config CRYPTO_USER_API_RNG
|
||||||
|
tristate "User-space interface for random number generator algorithms"
|
||||||
|
depends on NET
|
||||||
|
select CRYPTO_RNG
|
||||||
|
select CRYPTO_USER_API
|
||||||
|
help
|
||||||
|
This option enables the user-spaces interface for random
|
||||||
|
number generator algorithms.
|
||||||
|
|
||||||
config CRYPTO_HASH_INFO
|
config CRYPTO_HASH_INFO
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
|
|
@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
|
||||||
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
|
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
|
||||||
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
|
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
|
||||||
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
|
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
|
||||||
|
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
|
||||||
|
|
||||||
#
|
#
|
||||||
# generic algorithms and the async_tx api
|
# generic algorithms and the async_tx api
|
||||||
|
|
|
@ -69,6 +69,7 @@ static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
|
||||||
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
|
static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
|
||||||
{
|
{
|
||||||
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
|
||||||
|
|
||||||
return max(start, end_page);
|
return max(start, end_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
|
||||||
if (n == len_this_page)
|
if (n == len_this_page)
|
||||||
break;
|
break;
|
||||||
n -= len_this_page;
|
n -= len_this_page;
|
||||||
scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
|
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
|
||||||
}
|
}
|
||||||
|
|
||||||
return bsize;
|
return bsize;
|
||||||
|
@ -284,6 +285,7 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
|
||||||
walk->iv = req->info;
|
walk->iv = req->info;
|
||||||
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
||||||
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -589,7 +591,8 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
|
||||||
if (IS_ERR(inst))
|
if (IS_ERR(inst))
|
||||||
goto put_tmpl;
|
goto put_tmpl;
|
||||||
|
|
||||||
if ((err = crypto_register_instance(tmpl, inst))) {
|
err = crypto_register_instance(tmpl, inst);
|
||||||
|
if (err) {
|
||||||
tmpl->free(inst);
|
tmpl->free(inst);
|
||||||
goto put_tmpl;
|
goto put_tmpl;
|
||||||
}
|
}
|
||||||
|
|
|
@ -448,7 +448,8 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
|
||||||
if (IS_ERR(inst))
|
if (IS_ERR(inst))
|
||||||
goto put_tmpl;
|
goto put_tmpl;
|
||||||
|
|
||||||
if ((err = crypto_register_instance(tmpl, inst))) {
|
err = crypto_register_instance(tmpl, inst);
|
||||||
|
if (err) {
|
||||||
tmpl->free(inst);
|
tmpl->free(inst);
|
||||||
goto put_tmpl;
|
goto put_tmpl;
|
||||||
}
|
}
|
||||||
|
|
|
@ -188,7 +188,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey,
|
||||||
err = type->setkey(ask->private, key, keylen);
|
err = type->setkey(ask->private, key, keylen);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
sock_kfree_s(sk, key, keylen);
|
sock_kzfree_s(sk, key, keylen);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -215,6 +215,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
err = alg_setkey(sk, optval, optlen);
|
err = alg_setkey(sk, optval, optlen);
|
||||||
|
break;
|
||||||
|
case ALG_SET_AEAD_AUTHSIZE:
|
||||||
|
if (sock->state == SS_CONNECTED)
|
||||||
|
goto unlock;
|
||||||
|
if (!type->setauthsize)
|
||||||
|
goto unlock;
|
||||||
|
err = type->setauthsize(ask->private, optlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
|
@ -387,7 +394,7 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
|
||||||
if (cmsg->cmsg_level != SOL_ALG)
|
if (cmsg->cmsg_level != SOL_ALG)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch(cmsg->cmsg_type) {
|
switch (cmsg->cmsg_type) {
|
||||||
case ALG_SET_IV:
|
case ALG_SET_IV:
|
||||||
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
|
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -55,6 +55,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
|
||||||
|
|
||||||
if (offset & alignmask) {
|
if (offset & alignmask) {
|
||||||
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
|
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
|
||||||
|
|
||||||
if (nbytes > unaligned)
|
if (nbytes > unaligned)
|
||||||
nbytes = unaligned;
|
nbytes = unaligned;
|
||||||
}
|
}
|
||||||
|
@ -120,7 +121,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
||||||
if (!walk->total)
|
if (!walk->total)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
walk->sg = scatterwalk_sg_next(walk->sg);
|
walk->sg = sg_next(walk->sg);
|
||||||
|
|
||||||
return hash_walk_new_entry(walk);
|
return hash_walk_new_entry(walk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -473,6 +473,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
|
||||||
list = &tmpl->instances;
|
list = &tmpl->instances;
|
||||||
hlist_for_each_entry(inst, list, list) {
|
hlist_for_each_entry(inst, list, list) {
|
||||||
int err = crypto_remove_alg(&inst->alg, &users);
|
int err = crypto_remove_alg(&inst->alg, &users);
|
||||||
|
|
||||||
BUG_ON(err);
|
BUG_ON(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,192 @@
|
||||||
|
/*
|
||||||
|
* algif_rng: User-space interface for random number generators
|
||||||
|
*
|
||||||
|
* This file provides the user-space API for random number generators.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
* 1. Redistributions of source code must retain the above copyright
|
||||||
|
* notice, and the entire permission notice in its entirety,
|
||||||
|
* including the disclaimer of warranties.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in the
|
||||||
|
* documentation and/or other materials provided with the distribution.
|
||||||
|
* 3. The name of the author may not be used to endorse or promote
|
||||||
|
* products derived from this software without specific prior
|
||||||
|
* written permission.
|
||||||
|
*
|
||||||
|
* ALTERNATIVELY, this product may be distributed under the terms of
|
||||||
|
* the GNU General Public License, in which case the provisions of the GPL2
|
||||||
|
* are required INSTEAD OF the above restrictions. (This clause is
|
||||||
|
* necessary due to a potential bad interaction between the GPL and
|
||||||
|
* the restrictions contained in a BSD-style copyright.)
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||||
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
|
||||||
|
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
|
||||||
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
|
||||||
|
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||||
|
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
|
||||||
|
* DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <crypto/rng.h>
|
||||||
|
#include <linux/random.h>
|
||||||
|
#include <crypto/if_alg.h>
|
||||||
|
#include <linux/net.h>
|
||||||
|
#include <net/sock.h>
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
|
||||||
|
MODULE_DESCRIPTION("User-space interface for random number generators");
|
||||||
|
|
||||||
|
struct rng_ctx {
|
||||||
|
#define MAXSIZE 128
|
||||||
|
unsigned int len;
|
||||||
|
struct crypto_rng *drng;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
|
||||||
|
struct msghdr *msg, size_t len, int flags)
|
||||||
|
{
|
||||||
|
struct sock *sk = sock->sk;
|
||||||
|
struct alg_sock *ask = alg_sk(sk);
|
||||||
|
struct rng_ctx *ctx = ask->private;
|
||||||
|
int err = -EFAULT;
|
||||||
|
int genlen = 0;
|
||||||
|
u8 result[MAXSIZE];
|
||||||
|
|
||||||
|
if (len == 0)
|
||||||
|
return 0;
|
||||||
|
if (len > MAXSIZE)
|
||||||
|
len = MAXSIZE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* although not strictly needed, this is a precaution against coding
|
||||||
|
* errors
|
||||||
|
*/
|
||||||
|
memset(result, 0, len);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The enforcement of a proper seeding of an RNG is done within an
|
||||||
|
* RNG implementation. Some RNGs (DRBG, krng) do not need specific
|
||||||
|
* seeding as they automatically seed. The X9.31 DRNG will return
|
||||||
|
* an error if it was not seeded properly.
|
||||||
|
*/
|
||||||
|
genlen = crypto_rng_get_bytes(ctx->drng, result, len);
|
||||||
|
if (genlen < 0)
|
||||||
|
return genlen;
|
||||||
|
|
||||||
|
err = memcpy_to_msg(msg, result, len);
|
||||||
|
memzero_explicit(result, genlen);
|
||||||
|
|
||||||
|
return err ? err : len;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct proto_ops algif_rng_ops = {
|
||||||
|
.family = PF_ALG,
|
||||||
|
|
||||||
|
.connect = sock_no_connect,
|
||||||
|
.socketpair = sock_no_socketpair,
|
||||||
|
.getname = sock_no_getname,
|
||||||
|
.ioctl = sock_no_ioctl,
|
||||||
|
.listen = sock_no_listen,
|
||||||
|
.shutdown = sock_no_shutdown,
|
||||||
|
.getsockopt = sock_no_getsockopt,
|
||||||
|
.mmap = sock_no_mmap,
|
||||||
|
.bind = sock_no_bind,
|
||||||
|
.accept = sock_no_accept,
|
||||||
|
.setsockopt = sock_no_setsockopt,
|
||||||
|
.poll = sock_no_poll,
|
||||||
|
.sendmsg = sock_no_sendmsg,
|
||||||
|
.sendpage = sock_no_sendpage,
|
||||||
|
|
||||||
|
.release = af_alg_release,
|
||||||
|
.recvmsg = rng_recvmsg,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void *rng_bind(const char *name, u32 type, u32 mask)
|
||||||
|
{
|
||||||
|
return crypto_alloc_rng(name, type, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rng_release(void *private)
|
||||||
|
{
|
||||||
|
crypto_free_rng(private);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rng_sock_destruct(struct sock *sk)
|
||||||
|
{
|
||||||
|
struct alg_sock *ask = alg_sk(sk);
|
||||||
|
struct rng_ctx *ctx = ask->private;
|
||||||
|
|
||||||
|
sock_kfree_s(sk, ctx, ctx->len);
|
||||||
|
af_alg_release_parent(sk);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rng_accept_parent(void *private, struct sock *sk)
|
||||||
|
{
|
||||||
|
struct rng_ctx *ctx;
|
||||||
|
struct alg_sock *ask = alg_sk(sk);
|
||||||
|
unsigned int len = sizeof(*ctx);
|
||||||
|
|
||||||
|
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||||
|
if (!ctx)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ctx->len = len;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No seeding done at that point -- if multiple accepts are
|
||||||
|
* done on one RNG instance, each resulting FD points to the same
|
||||||
|
* state of the RNG.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ctx->drng = private;
|
||||||
|
ask->private = ctx;
|
||||||
|
sk->sk_destruct = rng_sock_destruct;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Check whether seedlen is of sufficient size is done in RNG
|
||||||
|
* implementations.
|
||||||
|
*/
|
||||||
|
return crypto_rng_reset(private, (u8 *)seed, seedlen);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct af_alg_type algif_type_rng = {
|
||||||
|
.bind = rng_bind,
|
||||||
|
.release = rng_release,
|
||||||
|
.accept = rng_accept_parent,
|
||||||
|
.setkey = rng_setkey,
|
||||||
|
.ops = &algif_rng_ops,
|
||||||
|
.name = "rng",
|
||||||
|
.owner = THIS_MODULE
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init rng_init(void)
|
||||||
|
{
|
||||||
|
return af_alg_register_type(&algif_type_rng);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit rng_exit(void)
|
||||||
|
{
|
||||||
|
int err = af_alg_unregister_type(&algif_type_rng);
|
||||||
|
BUG_ON(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(rng_init);
|
||||||
|
module_exit(rng_exit);
|
|
@ -330,6 +330,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
|
||||||
|
|
||||||
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
|
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
|
||||||
sg = sgl->sg;
|
sg = sgl->sg;
|
||||||
|
sg_unmark_end(sg + sgl->cur);
|
||||||
do {
|
do {
|
||||||
i = sgl->cur;
|
i = sgl->cur;
|
||||||
plen = min_t(int, len, PAGE_SIZE);
|
plen = min_t(int, len, PAGE_SIZE);
|
||||||
|
@ -355,6 +356,9 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
|
||||||
sgl->cur++;
|
sgl->cur++;
|
||||||
} while (len && sgl->cur < MAX_SGL_ENTS);
|
} while (len && sgl->cur < MAX_SGL_ENTS);
|
||||||
|
|
||||||
|
if (!size)
|
||||||
|
sg_mark_end(sg + sgl->cur - 1);
|
||||||
|
|
||||||
ctx->merge = plen & (PAGE_SIZE - 1);
|
ctx->merge = plen & (PAGE_SIZE - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -401,6 +405,10 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
|
||||||
ctx->merge = 0;
|
ctx->merge = 0;
|
||||||
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
|
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
|
||||||
|
|
||||||
|
if (sgl->cur)
|
||||||
|
sg_unmark_end(sgl->sg + sgl->cur - 1);
|
||||||
|
|
||||||
|
sg_mark_end(sgl->sg + sgl->cur);
|
||||||
get_page(page);
|
get_page(page);
|
||||||
sg_set_page(sgl->sg + sgl->cur, page, size, offset);
|
sg_set_page(sgl->sg + sgl->cur, page, size, offset);
|
||||||
sgl->cur++;
|
sgl->cur++;
|
||||||
|
|
|
@ -290,6 +290,9 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
|
||||||
if (!is_power_of_2(alg->cra_blocksize))
|
if (!is_power_of_2(alg->cra_blocksize))
|
||||||
goto out_put_alg;
|
goto out_put_alg;
|
||||||
|
|
||||||
|
if (strncmp(alg->cra_name, "cbc(", 4))
|
||||||
|
goto out_put_alg;
|
||||||
|
|
||||||
inst = crypto_alloc_instance("cts", alg);
|
inst = crypto_alloc_instance("cts", alg);
|
||||||
if (IS_ERR(inst))
|
if (IS_ERR(inst))
|
||||||
goto out_put_alg;
|
goto out_put_alg;
|
||||||
|
@ -307,8 +310,6 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb)
|
||||||
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
||||||
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
||||||
|
|
||||||
inst->alg.cra_blkcipher.geniv = "seqiv";
|
|
||||||
|
|
||||||
inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
|
inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx);
|
||||||
|
|
||||||
inst->alg.cra_init = crypto_cts_init_tfm;
|
inst->alg.cra_init = crypto_cts_init_tfm;
|
||||||
|
|
|
@ -98,7 +98,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <crypto/drbg.h>
|
#include <crypto/drbg.h>
|
||||||
#include <linux/string.h>
|
|
||||||
|
|
||||||
/***************************************************************
|
/***************************************************************
|
||||||
* Backend cipher definitions available to DRBG
|
* Backend cipher definitions available to DRBG
|
||||||
|
@ -223,15 +222,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
|
||||||
* function. Thus, the function implicitly knows the size of the
|
* function. Thus, the function implicitly knows the size of the
|
||||||
* buffer.
|
* buffer.
|
||||||
*
|
*
|
||||||
* The FIPS test can be called in an endless loop until it returns
|
|
||||||
* true. Although the code looks like a potential for a deadlock, it
|
|
||||||
* is not the case, because returning a false cannot mathematically
|
|
||||||
* occur (except once when a reseed took place and the updated state
|
|
||||||
* would is now set up such that the generation of new value returns
|
|
||||||
* an identical one -- this is most unlikely and would happen only once).
|
|
||||||
* Thus, if this function repeatedly returns false and thus would cause
|
|
||||||
* a deadlock, the integrity of the entire kernel is lost.
|
|
||||||
*
|
|
||||||
* @drbg DRBG handle
|
* @drbg DRBG handle
|
||||||
* @buf output buffer of random data to be checked
|
* @buf output buffer of random data to be checked
|
||||||
*
|
*
|
||||||
|
@ -258,6 +248,8 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
|
ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
|
||||||
|
if (!ret)
|
||||||
|
panic("DRBG continuous self test failed\n");
|
||||||
memcpy(drbg->prev, buf, drbg_blocklen(drbg));
|
memcpy(drbg->prev, buf, drbg_blocklen(drbg));
|
||||||
/* the test shall pass when the two compared values are not equal */
|
/* the test shall pass when the two compared values are not equal */
|
||||||
return ret != 0;
|
return ret != 0;
|
||||||
|
@ -498,9 +490,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(iv, drbg_blocklen(drbg));
|
memset(iv, 0, drbg_blocklen(drbg));
|
||||||
memzero_explicit(temp, drbg_statelen(drbg));
|
memset(temp, 0, drbg_statelen(drbg));
|
||||||
memzero_explicit(pad, drbg_blocklen(drbg));
|
memset(pad, 0, drbg_blocklen(drbg));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -574,9 +566,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg));
|
memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
|
||||||
if (2 != reseed)
|
if (2 != reseed)
|
||||||
memzero_explicit(df_data, drbg_statelen(drbg));
|
memset(df_data, 0, drbg_statelen(drbg));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,7 +626,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
|
||||||
len = ret;
|
len = ret;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
|
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -872,7 +864,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(tmp, drbg_blocklen(drbg));
|
memset(tmp, 0, drbg_blocklen(drbg));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -916,7 +908,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
|
||||||
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
|
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(drbg->scratchpad, drbg_statelen(drbg));
|
memset(drbg->scratchpad, 0, drbg_statelen(drbg));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -951,7 +943,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
|
||||||
drbg->scratchpad, drbg_blocklen(drbg));
|
drbg->scratchpad, drbg_blocklen(drbg));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
|
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -998,7 +990,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(drbg->scratchpad,
|
memset(drbg->scratchpad, 0,
|
||||||
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
|
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
@ -1047,7 +1039,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
|
||||||
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
|
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
|
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
||||||
walk->offset += PAGE_SIZE - 1;
|
walk->offset += PAGE_SIZE - 1;
|
||||||
walk->offset &= PAGE_MASK;
|
walk->offset &= PAGE_MASK;
|
||||||
if (walk->offset >= walk->sg->offset + walk->sg->length)
|
if (walk->offset >= walk->sg->offset + walk->sg->length)
|
||||||
scatterwalk_start(walk, scatterwalk_sg_next(walk->sg));
|
scatterwalk_start(walk, sg_next(walk->sg));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
offset += sg->length;
|
offset += sg->length;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
scatterwalk_advance(&walk, start - offset);
|
scatterwalk_advance(&walk, start - offset);
|
||||||
|
@ -136,7 +136,7 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
|
||||||
do {
|
do {
|
||||||
offset += sg->length;
|
offset += sg->length;
|
||||||
n++;
|
n++;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
|
|
||||||
/* num_bytes is too large */
|
/* num_bytes is too large */
|
||||||
if (unlikely(!sg && (num_bytes < offset)))
|
if (unlikely(!sg && (num_bytes < offset)))
|
||||||
|
|
|
@ -267,6 +267,12 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
|
||||||
if (IS_ERR(inst))
|
if (IS_ERR(inst))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) {
|
||||||
|
skcipher_geniv_free(inst);
|
||||||
|
inst = ERR_PTR(-EINVAL);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
|
inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
|
||||||
|
|
||||||
inst->alg.cra_init = seqiv_init;
|
inst->alg.cra_init = seqiv_init;
|
||||||
|
@ -287,6 +293,12 @@ static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
|
||||||
if (IS_ERR(inst))
|
if (IS_ERR(inst))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (inst->alg.cra_aead.ivsize < sizeof(u64)) {
|
||||||
|
aead_geniv_free(inst);
|
||||||
|
inst = ERR_PTR(-EINVAL);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
|
inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
|
||||||
|
|
||||||
inst->alg.cra_init = seqiv_aead_init;
|
inst->alg.cra_init = seqiv_aead_init;
|
||||||
|
|
|
@ -250,19 +250,19 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
|
||||||
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
|
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
|
||||||
int k, rem;
|
int k, rem;
|
||||||
|
|
||||||
np = (np > XBUFSIZE) ? XBUFSIZE : np;
|
|
||||||
rem = buflen % PAGE_SIZE;
|
|
||||||
if (np > XBUFSIZE) {
|
if (np > XBUFSIZE) {
|
||||||
rem = PAGE_SIZE;
|
rem = PAGE_SIZE;
|
||||||
np = XBUFSIZE;
|
np = XBUFSIZE;
|
||||||
|
} else {
|
||||||
|
rem = buflen % PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_init_table(sg, np);
|
sg_init_table(sg, np);
|
||||||
for (k = 0; k < np; ++k) {
|
np--;
|
||||||
if (k == (np-1))
|
for (k = 0; k < np; k++)
|
||||||
sg_set_buf(&sg[k], xbuf[k], rem);
|
sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
|
||||||
else
|
|
||||||
sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
|
sg_set_buf(&sg[k], xbuf[k], rem);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||||
|
@ -280,16 +280,20 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||||
struct scatterlist *sgout;
|
struct scatterlist *sgout;
|
||||||
const char *e;
|
const char *e;
|
||||||
void *assoc;
|
void *assoc;
|
||||||
char iv[MAX_IVLEN];
|
char *iv;
|
||||||
char *xbuf[XBUFSIZE];
|
char *xbuf[XBUFSIZE];
|
||||||
char *xoutbuf[XBUFSIZE];
|
char *xoutbuf[XBUFSIZE];
|
||||||
char *axbuf[XBUFSIZE];
|
char *axbuf[XBUFSIZE];
|
||||||
unsigned int *b_size;
|
unsigned int *b_size;
|
||||||
unsigned int iv_len;
|
unsigned int iv_len;
|
||||||
|
|
||||||
|
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
|
||||||
|
if (!iv)
|
||||||
|
return;
|
||||||
|
|
||||||
if (aad_size >= PAGE_SIZE) {
|
if (aad_size >= PAGE_SIZE) {
|
||||||
pr_err("associate data length (%u) too big\n", aad_size);
|
pr_err("associate data length (%u) too big\n", aad_size);
|
||||||
return;
|
goto out_noxbuf;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (enc == ENCRYPT)
|
if (enc == ENCRYPT)
|
||||||
|
@ -355,7 +359,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||||
|
|
||||||
iv_len = crypto_aead_ivsize(tfm);
|
iv_len = crypto_aead_ivsize(tfm);
|
||||||
if (iv_len)
|
if (iv_len)
|
||||||
memset(&iv, 0xff, iv_len);
|
memset(iv, 0xff, iv_len);
|
||||||
|
|
||||||
crypto_aead_clear_flags(tfm, ~0);
|
crypto_aead_clear_flags(tfm, ~0);
|
||||||
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
|
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
|
||||||
|
@ -408,6 +412,7 @@ out_nooutbuf:
|
||||||
out_noaxbuf:
|
out_noaxbuf:
|
||||||
testmgr_free_buf(xbuf);
|
testmgr_free_buf(xbuf);
|
||||||
out_noxbuf:
|
out_noxbuf:
|
||||||
|
kfree(iv);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -764,10 +769,9 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
|
||||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||||
struct tcrypt_result *tr = req->base.data;
|
struct tcrypt_result *tr = req->base.data;
|
||||||
|
|
||||||
ret = wait_for_completion_interruptible(&tr->completion);
|
wait_for_completion(&tr->completion);
|
||||||
if (!ret)
|
|
||||||
ret = tr->err;
|
|
||||||
reinit_completion(&tr->completion);
|
reinit_completion(&tr->completion);
|
||||||
|
ret = tr->err;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -993,10 +997,9 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
|
||||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||||
struct tcrypt_result *tr = req->base.data;
|
struct tcrypt_result *tr = req->base.data;
|
||||||
|
|
||||||
ret = wait_for_completion_interruptible(&tr->completion);
|
wait_for_completion(&tr->completion);
|
||||||
if (!ret)
|
|
||||||
ret = tr->err;
|
|
||||||
reinit_completion(&tr->completion);
|
reinit_completion(&tr->completion);
|
||||||
|
ret = tr->err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -181,10 +181,9 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
|
||||||
static int wait_async_op(struct tcrypt_result *tr, int ret)
|
static int wait_async_op(struct tcrypt_result *tr, int ret)
|
||||||
{
|
{
|
||||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||||
ret = wait_for_completion_interruptible(&tr->completion);
|
wait_for_completion(&tr->completion);
|
||||||
if (!ret)
|
|
||||||
ret = tr->err;
|
|
||||||
reinit_completion(&tr->completion);
|
reinit_completion(&tr->completion);
|
||||||
|
ret = tr->err;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -353,12 +352,11 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
|
||||||
break;
|
break;
|
||||||
case -EINPROGRESS:
|
case -EINPROGRESS:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
ret = wait_for_completion_interruptible(
|
wait_for_completion(&tresult.completion);
|
||||||
&tresult.completion);
|
reinit_completion(&tresult.completion);
|
||||||
if (!ret && !(ret = tresult.err)) {
|
ret = tresult.err;
|
||||||
reinit_completion(&tresult.completion);
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
/* fall through */
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
printk(KERN_ERR "alg: hash: digest failed "
|
printk(KERN_ERR "alg: hash: digest failed "
|
||||||
|
@ -431,7 +429,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||||
struct scatterlist *sgout;
|
struct scatterlist *sgout;
|
||||||
const char *e, *d;
|
const char *e, *d;
|
||||||
struct tcrypt_result result;
|
struct tcrypt_result result;
|
||||||
unsigned int authsize;
|
unsigned int authsize, iv_len;
|
||||||
void *input;
|
void *input;
|
||||||
void *output;
|
void *output;
|
||||||
void *assoc;
|
void *assoc;
|
||||||
|
@ -502,10 +500,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||||
|
|
||||||
memcpy(input, template[i].input, template[i].ilen);
|
memcpy(input, template[i].input, template[i].ilen);
|
||||||
memcpy(assoc, template[i].assoc, template[i].alen);
|
memcpy(assoc, template[i].assoc, template[i].alen);
|
||||||
|
iv_len = crypto_aead_ivsize(tfm);
|
||||||
if (template[i].iv)
|
if (template[i].iv)
|
||||||
memcpy(iv, template[i].iv, MAX_IVLEN);
|
memcpy(iv, template[i].iv, iv_len);
|
||||||
else
|
else
|
||||||
memset(iv, 0, MAX_IVLEN);
|
memset(iv, 0, iv_len);
|
||||||
|
|
||||||
crypto_aead_clear_flags(tfm, ~0);
|
crypto_aead_clear_flags(tfm, ~0);
|
||||||
if (template[i].wk)
|
if (template[i].wk)
|
||||||
|
@ -569,12 +568,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||||
break;
|
break;
|
||||||
case -EINPROGRESS:
|
case -EINPROGRESS:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
ret = wait_for_completion_interruptible(
|
wait_for_completion(&result.completion);
|
||||||
&result.completion);
|
reinit_completion(&result.completion);
|
||||||
if (!ret && !(ret = result.err)) {
|
ret = result.err;
|
||||||
reinit_completion(&result.completion);
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
case -EBADMSG:
|
case -EBADMSG:
|
||||||
if (template[i].novrfy)
|
if (template[i].novrfy)
|
||||||
/* verification failure was expected */
|
/* verification failure was expected */
|
||||||
|
@ -720,12 +718,11 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||||
break;
|
break;
|
||||||
case -EINPROGRESS:
|
case -EINPROGRESS:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
ret = wait_for_completion_interruptible(
|
wait_for_completion(&result.completion);
|
||||||
&result.completion);
|
reinit_completion(&result.completion);
|
||||||
if (!ret && !(ret = result.err)) {
|
ret = result.err;
|
||||||
reinit_completion(&result.completion);
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
case -EBADMSG:
|
case -EBADMSG:
|
||||||
if (template[i].novrfy)
|
if (template[i].novrfy)
|
||||||
/* verification failure was expected */
|
/* verification failure was expected */
|
||||||
|
@ -1002,12 +999,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||||
break;
|
break;
|
||||||
case -EINPROGRESS:
|
case -EINPROGRESS:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
ret = wait_for_completion_interruptible(
|
wait_for_completion(&result.completion);
|
||||||
&result.completion);
|
reinit_completion(&result.completion);
|
||||||
if (!ret && !((ret = result.err))) {
|
ret = result.err;
|
||||||
reinit_completion(&result.completion);
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
/* fall through */
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
|
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
|
||||||
|
@ -1097,12 +1093,11 @@ static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
|
||||||
break;
|
break;
|
||||||
case -EINPROGRESS:
|
case -EINPROGRESS:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
ret = wait_for_completion_interruptible(
|
wait_for_completion(&result.completion);
|
||||||
&result.completion);
|
reinit_completion(&result.completion);
|
||||||
if (!ret && !((ret = result.err))) {
|
ret = result.err;
|
||||||
reinit_completion(&result.completion);
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
/* fall through */
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
|
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
|
||||||
|
@ -3299,6 +3294,7 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||||
}, {
|
}, {
|
||||||
.alg = "rfc4106(gcm(aes))",
|
.alg = "rfc4106(gcm(aes))",
|
||||||
.test = alg_test_aead,
|
.test = alg_test_aead,
|
||||||
|
.fips_allowed = 1,
|
||||||
.suite = {
|
.suite = {
|
||||||
.aead = {
|
.aead = {
|
||||||
.enc = {
|
.enc = {
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
|
#include <linux/err.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,7 +54,10 @@
|
||||||
static struct hwrng *current_rng;
|
static struct hwrng *current_rng;
|
||||||
static struct task_struct *hwrng_fill;
|
static struct task_struct *hwrng_fill;
|
||||||
static LIST_HEAD(rng_list);
|
static LIST_HEAD(rng_list);
|
||||||
|
/* Protects rng_list and current_rng */
|
||||||
static DEFINE_MUTEX(rng_mutex);
|
static DEFINE_MUTEX(rng_mutex);
|
||||||
|
/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
|
||||||
|
static DEFINE_MUTEX(reading_mutex);
|
||||||
static int data_avail;
|
static int data_avail;
|
||||||
static u8 *rng_buffer, *rng_fillbuf;
|
static u8 *rng_buffer, *rng_fillbuf;
|
||||||
static unsigned short current_quality;
|
static unsigned short current_quality;
|
||||||
|
@ -66,6 +70,8 @@ module_param(default_quality, ushort, 0644);
|
||||||
MODULE_PARM_DESC(default_quality,
|
MODULE_PARM_DESC(default_quality,
|
||||||
"default entropy content of hwrng per mill");
|
"default entropy content of hwrng per mill");
|
||||||
|
|
||||||
|
static void drop_current_rng(void);
|
||||||
|
static int hwrng_init(struct hwrng *rng);
|
||||||
static void start_khwrngd(void);
|
static void start_khwrngd(void);
|
||||||
|
|
||||||
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
|
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
|
||||||
|
@ -81,13 +87,83 @@ static void add_early_randomness(struct hwrng *rng)
|
||||||
unsigned char bytes[16];
|
unsigned char bytes[16];
|
||||||
int bytes_read;
|
int bytes_read;
|
||||||
|
|
||||||
|
mutex_lock(&reading_mutex);
|
||||||
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
|
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
|
||||||
|
mutex_unlock(&reading_mutex);
|
||||||
if (bytes_read > 0)
|
if (bytes_read > 0)
|
||||||
add_device_randomness(bytes, bytes_read);
|
add_device_randomness(bytes, bytes_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int hwrng_init(struct hwrng *rng)
|
static inline void cleanup_rng(struct kref *kref)
|
||||||
{
|
{
|
||||||
|
struct hwrng *rng = container_of(kref, struct hwrng, ref);
|
||||||
|
|
||||||
|
if (rng->cleanup)
|
||||||
|
rng->cleanup(rng);
|
||||||
|
|
||||||
|
complete(&rng->cleanup_done);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int set_current_rng(struct hwrng *rng)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
BUG_ON(!mutex_is_locked(&rng_mutex));
|
||||||
|
|
||||||
|
err = hwrng_init(rng);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
drop_current_rng();
|
||||||
|
current_rng = rng;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void drop_current_rng(void)
|
||||||
|
{
|
||||||
|
BUG_ON(!mutex_is_locked(&rng_mutex));
|
||||||
|
if (!current_rng)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* decrease last reference for triggering the cleanup */
|
||||||
|
kref_put(¤t_rng->ref, cleanup_rng);
|
||||||
|
current_rng = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns ERR_PTR(), NULL or refcounted hwrng */
|
||||||
|
static struct hwrng *get_current_rng(void)
|
||||||
|
{
|
||||||
|
struct hwrng *rng;
|
||||||
|
|
||||||
|
if (mutex_lock_interruptible(&rng_mutex))
|
||||||
|
return ERR_PTR(-ERESTARTSYS);
|
||||||
|
|
||||||
|
rng = current_rng;
|
||||||
|
if (rng)
|
||||||
|
kref_get(&rng->ref);
|
||||||
|
|
||||||
|
mutex_unlock(&rng_mutex);
|
||||||
|
return rng;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void put_rng(struct hwrng *rng)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Hold rng_mutex here so we serialize in case they set_current_rng
|
||||||
|
* on rng again immediately.
|
||||||
|
*/
|
||||||
|
mutex_lock(&rng_mutex);
|
||||||
|
if (rng)
|
||||||
|
kref_put(&rng->ref, cleanup_rng);
|
||||||
|
mutex_unlock(&rng_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hwrng_init(struct hwrng *rng)
|
||||||
|
{
|
||||||
|
if (kref_get_unless_zero(&rng->ref))
|
||||||
|
goto skip_init;
|
||||||
|
|
||||||
if (rng->init) {
|
if (rng->init) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -95,6 +171,11 @@ static inline int hwrng_init(struct hwrng *rng)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kref_init(&rng->ref);
|
||||||
|
reinit_completion(&rng->cleanup_done);
|
||||||
|
|
||||||
|
skip_init:
|
||||||
add_early_randomness(rng);
|
add_early_randomness(rng);
|
||||||
|
|
||||||
current_quality = rng->quality ? : default_quality;
|
current_quality = rng->quality ? : default_quality;
|
||||||
|
@ -108,12 +189,6 @@ static inline int hwrng_init(struct hwrng *rng)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void hwrng_cleanup(struct hwrng *rng)
|
|
||||||
{
|
|
||||||
if (rng && rng->cleanup)
|
|
||||||
rng->cleanup(rng);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int rng_dev_open(struct inode *inode, struct file *filp)
|
static int rng_dev_open(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
/* enforce read-only access to this chrdev */
|
/* enforce read-only access to this chrdev */
|
||||||
|
@ -128,6 +203,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
|
||||||
int wait) {
|
int wait) {
|
||||||
int present;
|
int present;
|
||||||
|
|
||||||
|
BUG_ON(!mutex_is_locked(&reading_mutex));
|
||||||
if (rng->read)
|
if (rng->read)
|
||||||
return rng->read(rng, (void *)buffer, size, wait);
|
return rng->read(rng, (void *)buffer, size, wait);
|
||||||
|
|
||||||
|
@ -148,25 +224,27 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||||
ssize_t ret = 0;
|
ssize_t ret = 0;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int bytes_read, len;
|
int bytes_read, len;
|
||||||
|
struct hwrng *rng;
|
||||||
|
|
||||||
while (size) {
|
while (size) {
|
||||||
if (mutex_lock_interruptible(&rng_mutex)) {
|
rng = get_current_rng();
|
||||||
err = -ERESTARTSYS;
|
if (IS_ERR(rng)) {
|
||||||
|
err = PTR_ERR(rng);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (!rng) {
|
||||||
|
err = -ENODEV;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!current_rng) {
|
mutex_lock(&reading_mutex);
|
||||||
err = -ENODEV;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!data_avail) {
|
if (!data_avail) {
|
||||||
bytes_read = rng_get_data(current_rng, rng_buffer,
|
bytes_read = rng_get_data(rng, rng_buffer,
|
||||||
rng_buffer_size(),
|
rng_buffer_size(),
|
||||||
!(filp->f_flags & O_NONBLOCK));
|
!(filp->f_flags & O_NONBLOCK));
|
||||||
if (bytes_read < 0) {
|
if (bytes_read < 0) {
|
||||||
err = bytes_read;
|
err = bytes_read;
|
||||||
goto out_unlock;
|
goto out_unlock_reading;
|
||||||
}
|
}
|
||||||
data_avail = bytes_read;
|
data_avail = bytes_read;
|
||||||
}
|
}
|
||||||
|
@ -174,7 +252,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||||
if (!data_avail) {
|
if (!data_avail) {
|
||||||
if (filp->f_flags & O_NONBLOCK) {
|
if (filp->f_flags & O_NONBLOCK) {
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
goto out_unlock;
|
goto out_unlock_reading;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
len = data_avail;
|
len = data_avail;
|
||||||
|
@ -186,14 +264,15 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||||
if (copy_to_user(buf + ret, rng_buffer + data_avail,
|
if (copy_to_user(buf + ret, rng_buffer + data_avail,
|
||||||
len)) {
|
len)) {
|
||||||
err = -EFAULT;
|
err = -EFAULT;
|
||||||
goto out_unlock;
|
goto out_unlock_reading;
|
||||||
}
|
}
|
||||||
|
|
||||||
size -= len;
|
size -= len;
|
||||||
ret += len;
|
ret += len;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&rng_mutex);
|
mutex_unlock(&reading_mutex);
|
||||||
|
put_rng(rng);
|
||||||
|
|
||||||
if (need_resched())
|
if (need_resched())
|
||||||
schedule_timeout_interruptible(1);
|
schedule_timeout_interruptible(1);
|
||||||
|
@ -205,8 +284,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return ret ? : err;
|
return ret ? : err;
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&rng_mutex);
|
out_unlock_reading:
|
||||||
|
mutex_unlock(&reading_mutex);
|
||||||
|
put_rng(rng);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,16 +320,9 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
list_for_each_entry(rng, &rng_list, list) {
|
list_for_each_entry(rng, &rng_list, list) {
|
||||||
if (strcmp(rng->name, buf) == 0) {
|
if (strcmp(rng->name, buf) == 0) {
|
||||||
if (rng == current_rng) {
|
|
||||||
err = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
err = hwrng_init(rng);
|
|
||||||
if (err)
|
|
||||||
break;
|
|
||||||
hwrng_cleanup(current_rng);
|
|
||||||
current_rng = rng;
|
|
||||||
err = 0;
|
err = 0;
|
||||||
|
if (rng != current_rng)
|
||||||
|
err = set_current_rng(rng);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -261,17 +335,15 @@ static ssize_t hwrng_attr_current_show(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
int err;
|
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
const char *name = "none";
|
struct hwrng *rng;
|
||||||
|
|
||||||
err = mutex_lock_interruptible(&rng_mutex);
|
rng = get_current_rng();
|
||||||
if (err)
|
if (IS_ERR(rng))
|
||||||
return -ERESTARTSYS;
|
return PTR_ERR(rng);
|
||||||
if (current_rng)
|
|
||||||
name = current_rng->name;
|
ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
|
||||||
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
|
put_rng(rng);
|
||||||
mutex_unlock(&rng_mutex);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -305,14 +377,14 @@ static DEVICE_ATTR(rng_available, S_IRUGO,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
|
|
||||||
static void unregister_miscdev(void)
|
static void __exit unregister_miscdev(void)
|
||||||
{
|
{
|
||||||
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
|
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
|
||||||
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
|
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
|
||||||
misc_deregister(&rng_miscdev);
|
misc_deregister(&rng_miscdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int register_miscdev(void)
|
static int __init register_miscdev(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -342,15 +414,22 @@ static int hwrng_fillfn(void *unused)
|
||||||
long rc;
|
long rc;
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
if (!current_rng)
|
struct hwrng *rng;
|
||||||
|
|
||||||
|
rng = get_current_rng();
|
||||||
|
if (IS_ERR(rng) || !rng)
|
||||||
break;
|
break;
|
||||||
rc = rng_get_data(current_rng, rng_fillbuf,
|
mutex_lock(&reading_mutex);
|
||||||
|
rc = rng_get_data(rng, rng_fillbuf,
|
||||||
rng_buffer_size(), 1);
|
rng_buffer_size(), 1);
|
||||||
|
mutex_unlock(&reading_mutex);
|
||||||
|
put_rng(rng);
|
||||||
if (rc <= 0) {
|
if (rc <= 0) {
|
||||||
pr_warn("hwrng: no data available\n");
|
pr_warn("hwrng: no data available\n");
|
||||||
msleep_interruptible(10000);
|
msleep_interruptible(10000);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
/* Outside lock, sure, but y'know: randomness. */
|
||||||
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
|
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
|
||||||
rc * current_quality * 8 >> 10);
|
rc * current_quality * 8 >> 10);
|
||||||
}
|
}
|
||||||
|
@ -400,23 +479,16 @@ int hwrng_register(struct hwrng *rng)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init_completion(&rng->cleanup_done);
|
||||||
|
complete(&rng->cleanup_done);
|
||||||
|
|
||||||
old_rng = current_rng;
|
old_rng = current_rng;
|
||||||
if (!old_rng) {
|
|
||||||
err = hwrng_init(rng);
|
|
||||||
if (err)
|
|
||||||
goto out_unlock;
|
|
||||||
current_rng = rng;
|
|
||||||
}
|
|
||||||
err = 0;
|
err = 0;
|
||||||
if (!old_rng) {
|
if (!old_rng) {
|
||||||
err = register_miscdev();
|
err = set_current_rng(rng);
|
||||||
if (err) {
|
if (err)
|
||||||
hwrng_cleanup(rng);
|
|
||||||
current_rng = NULL;
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(&rng->list);
|
|
||||||
list_add_tail(&rng->list, &rng_list);
|
list_add_tail(&rng->list, &rng_list);
|
||||||
|
|
||||||
if (old_rng && !rng->init) {
|
if (old_rng && !rng->init) {
|
||||||
|
@ -439,42 +511,49 @@ EXPORT_SYMBOL_GPL(hwrng_register);
|
||||||
|
|
||||||
void hwrng_unregister(struct hwrng *rng)
|
void hwrng_unregister(struct hwrng *rng)
|
||||||
{
|
{
|
||||||
int err;
|
|
||||||
|
|
||||||
mutex_lock(&rng_mutex);
|
mutex_lock(&rng_mutex);
|
||||||
|
|
||||||
list_del(&rng->list);
|
list_del(&rng->list);
|
||||||
if (current_rng == rng) {
|
if (current_rng == rng) {
|
||||||
hwrng_cleanup(rng);
|
drop_current_rng();
|
||||||
if (list_empty(&rng_list)) {
|
if (!list_empty(&rng_list)) {
|
||||||
current_rng = NULL;
|
struct hwrng *tail;
|
||||||
} else {
|
|
||||||
current_rng = list_entry(rng_list.prev, struct hwrng, list);
|
tail = list_entry(rng_list.prev, struct hwrng, list);
|
||||||
err = hwrng_init(current_rng);
|
|
||||||
if (err)
|
set_current_rng(tail);
|
||||||
current_rng = NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (list_empty(&rng_list)) {
|
if (list_empty(&rng_list)) {
|
||||||
unregister_miscdev();
|
mutex_unlock(&rng_mutex);
|
||||||
if (hwrng_fill)
|
if (hwrng_fill)
|
||||||
kthread_stop(hwrng_fill);
|
kthread_stop(hwrng_fill);
|
||||||
}
|
} else
|
||||||
|
mutex_unlock(&rng_mutex);
|
||||||
|
|
||||||
mutex_unlock(&rng_mutex);
|
wait_for_completion(&rng->cleanup_done);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwrng_unregister);
|
EXPORT_SYMBOL_GPL(hwrng_unregister);
|
||||||
|
|
||||||
static void __exit hwrng_exit(void)
|
static int __init hwrng_modinit(void)
|
||||||
|
{
|
||||||
|
return register_miscdev();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit hwrng_modexit(void)
|
||||||
{
|
{
|
||||||
mutex_lock(&rng_mutex);
|
mutex_lock(&rng_mutex);
|
||||||
BUG_ON(current_rng);
|
BUG_ON(current_rng);
|
||||||
kfree(rng_buffer);
|
kfree(rng_buffer);
|
||||||
kfree(rng_fillbuf);
|
kfree(rng_fillbuf);
|
||||||
mutex_unlock(&rng_mutex);
|
mutex_unlock(&rng_mutex);
|
||||||
|
|
||||||
|
unregister_miscdev();
|
||||||
}
|
}
|
||||||
|
|
||||||
module_exit(hwrng_exit);
|
module_init(hwrng_modinit);
|
||||||
|
module_exit(hwrng_modexit);
|
||||||
|
|
||||||
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
|
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
@ -39,7 +39,6 @@ struct virtrng_info {
|
||||||
bool hwrng_removed;
|
bool hwrng_removed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static void random_recv_done(struct virtqueue *vq)
|
static void random_recv_done(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
struct virtrng_info *vi = vq->vdev->priv;
|
struct virtrng_info *vi = vq->vdev->priv;
|
||||||
|
|
|
@ -34,29 +34,6 @@
|
||||||
#include "crypto4xx_sa.h"
|
#include "crypto4xx_sa.h"
|
||||||
#include "crypto4xx_core.h"
|
#include "crypto4xx_core.h"
|
||||||
|
|
||||||
u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx)
|
|
||||||
{
|
|
||||||
u32 offset;
|
|
||||||
union dynamic_sa_contents cts;
|
|
||||||
|
|
||||||
if (ctx->direction == DIR_INBOUND)
|
|
||||||
cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
|
|
||||||
else
|
|
||||||
cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
|
|
||||||
offset = cts.bf.key_size
|
|
||||||
+ cts.bf.inner_size
|
|
||||||
+ cts.bf.outer_size
|
|
||||||
+ cts.bf.spi
|
|
||||||
+ cts.bf.seq_num0
|
|
||||||
+ cts.bf.seq_num1
|
|
||||||
+ cts.bf.seq_num_mask0
|
|
||||||
+ cts.bf.seq_num_mask1
|
|
||||||
+ cts.bf.seq_num_mask2
|
|
||||||
+ cts.bf.seq_num_mask3;
|
|
||||||
|
|
||||||
return sizeof(struct dynamic_sa_ctl) + offset * 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
|
u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
|
||||||
{
|
{
|
||||||
u32 offset;
|
u32 offset;
|
||||||
|
|
|
@ -673,9 +673,9 @@ err_map_out:
|
||||||
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
|
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
err_map_in:
|
err_map_in:
|
||||||
|
err_alloc:
|
||||||
free_page((unsigned long)dd->buf_out);
|
free_page((unsigned long)dd->buf_out);
|
||||||
free_page((unsigned long)dd->buf_in);
|
free_page((unsigned long)dd->buf_in);
|
||||||
err_alloc:
|
|
||||||
if (err)
|
if (err)
|
||||||
pr_err("error: %d\n", err);
|
pr_err("error: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -102,10 +102,6 @@ struct atmel_sha_ctx {
|
||||||
struct atmel_sha_dev *dd;
|
struct atmel_sha_dev *dd;
|
||||||
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* fallback stuff */
|
|
||||||
struct crypto_shash *fallback;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ATMEL_SHA_QUEUE_LENGTH 50
|
#define ATMEL_SHA_QUEUE_LENGTH 50
|
||||||
|
@ -974,19 +970,8 @@ static int atmel_sha_digest(struct ahash_request *req)
|
||||||
return atmel_sha_init(req) ?: atmel_sha_finup(req);
|
return atmel_sha_init(req) ?: atmel_sha_finup(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
|
static int atmel_sha_cra_init(struct crypto_tfm *tfm)
|
||||||
{
|
{
|
||||||
struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
|
|
||||||
const char *alg_name = crypto_tfm_alg_name(tfm);
|
|
||||||
|
|
||||||
/* Allocate a fallback and abort if it failed. */
|
|
||||||
tctx->fallback = crypto_alloc_shash(alg_name, 0,
|
|
||||||
CRYPTO_ALG_NEED_FALLBACK);
|
|
||||||
if (IS_ERR(tctx->fallback)) {
|
|
||||||
pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
|
|
||||||
alg_name);
|
|
||||||
return PTR_ERR(tctx->fallback);
|
|
||||||
}
|
|
||||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||||
sizeof(struct atmel_sha_reqctx) +
|
sizeof(struct atmel_sha_reqctx) +
|
||||||
SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
|
SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
|
||||||
|
@ -994,19 +979,6 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atmel_sha_cra_init(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
return atmel_sha_cra_init_alg(tfm, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
|
|
||||||
{
|
|
||||||
struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
|
|
||||||
|
|
||||||
crypto_free_shash(tctx->fallback);
|
|
||||||
tctx->fallback = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ahash_alg sha_1_256_algs[] = {
|
static struct ahash_alg sha_1_256_algs[] = {
|
||||||
{
|
{
|
||||||
.init = atmel_sha_init,
|
.init = atmel_sha_init,
|
||||||
|
@ -1020,14 +992,12 @@ static struct ahash_alg sha_1_256_algs[] = {
|
||||||
.cra_name = "sha1",
|
.cra_name = "sha1",
|
||||||
.cra_driver_name = "atmel-sha1",
|
.cra_driver_name = "atmel-sha1",
|
||||||
.cra_priority = 100,
|
.cra_priority = 100,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||||
CRYPTO_ALG_NEED_FALLBACK,
|
|
||||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
||||||
.cra_alignmask = 0,
|
.cra_alignmask = 0,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_init = atmel_sha_cra_init,
|
.cra_init = atmel_sha_cra_init,
|
||||||
.cra_exit = atmel_sha_cra_exit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1043,14 +1013,12 @@ static struct ahash_alg sha_1_256_algs[] = {
|
||||||
.cra_name = "sha256",
|
.cra_name = "sha256",
|
||||||
.cra_driver_name = "atmel-sha256",
|
.cra_driver_name = "atmel-sha256",
|
||||||
.cra_priority = 100,
|
.cra_priority = 100,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||||
CRYPTO_ALG_NEED_FALLBACK,
|
|
||||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
||||||
.cra_alignmask = 0,
|
.cra_alignmask = 0,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_init = atmel_sha_cra_init,
|
.cra_init = atmel_sha_cra_init,
|
||||||
.cra_exit = atmel_sha_cra_exit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1068,14 +1036,12 @@ static struct ahash_alg sha_224_alg = {
|
||||||
.cra_name = "sha224",
|
.cra_name = "sha224",
|
||||||
.cra_driver_name = "atmel-sha224",
|
.cra_driver_name = "atmel-sha224",
|
||||||
.cra_priority = 100,
|
.cra_priority = 100,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||||
CRYPTO_ALG_NEED_FALLBACK,
|
|
||||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
||||||
.cra_alignmask = 0,
|
.cra_alignmask = 0,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_init = atmel_sha_cra_init,
|
.cra_init = atmel_sha_cra_init,
|
||||||
.cra_exit = atmel_sha_cra_exit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1093,14 +1059,12 @@ static struct ahash_alg sha_384_512_algs[] = {
|
||||||
.cra_name = "sha384",
|
.cra_name = "sha384",
|
||||||
.cra_driver_name = "atmel-sha384",
|
.cra_driver_name = "atmel-sha384",
|
||||||
.cra_priority = 100,
|
.cra_priority = 100,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||||
CRYPTO_ALG_NEED_FALLBACK,
|
|
||||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
||||||
.cra_alignmask = 0x3,
|
.cra_alignmask = 0x3,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_init = atmel_sha_cra_init,
|
.cra_init = atmel_sha_cra_init,
|
||||||
.cra_exit = atmel_sha_cra_exit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -1116,14 +1080,12 @@ static struct ahash_alg sha_384_512_algs[] = {
|
||||||
.cra_name = "sha512",
|
.cra_name = "sha512",
|
||||||
.cra_driver_name = "atmel-sha512",
|
.cra_driver_name = "atmel-sha512",
|
||||||
.cra_priority = 100,
|
.cra_priority = 100,
|
||||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||||
CRYPTO_ALG_NEED_FALLBACK,
|
|
||||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
|
||||||
.cra_alignmask = 0x3,
|
.cra_alignmask = 0x3,
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_init = atmel_sha_cra_init,
|
.cra_init = atmel_sha_cra_init,
|
||||||
.cra_exit = atmel_sha_cra_exit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -376,9 +376,9 @@ err_map_out:
|
||||||
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
|
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
err_map_in:
|
err_map_in:
|
||||||
|
err_alloc:
|
||||||
free_page((unsigned long)dd->buf_out);
|
free_page((unsigned long)dd->buf_out);
|
||||||
free_page((unsigned long)dd->buf_in);
|
free_page((unsigned long)dd->buf_in);
|
||||||
err_alloc:
|
|
||||||
if (err)
|
if (err)
|
||||||
pr_err("error: %d\n", err);
|
pr_err("error: %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -110,7 +110,7 @@ static int sg_count(struct scatterlist *sg_list)
|
||||||
|
|
||||||
while (!sg_is_last(sg)) {
|
while (!sg_is_last(sg)) {
|
||||||
sg_nents++;
|
sg_nents++;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sg_nents;
|
return sg_nents;
|
||||||
|
@ -744,7 +744,7 @@ static int __init bfin_crypto_crc_mod_init(void)
|
||||||
|
|
||||||
ret = platform_driver_register(&bfin_crypto_crc_driver);
|
ret = platform_driver_register(&bfin_crypto_crc_driver);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info(KERN_ERR "unable to register driver\n");
|
pr_err("unable to register driver\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2532,7 +2532,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
|
||||||
in_options = 0;
|
in_options = 0;
|
||||||
} else {
|
} else {
|
||||||
src_dma = edesc->sec4_sg_dma;
|
src_dma = edesc->sec4_sg_dma;
|
||||||
sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
|
sec4_sg_index += edesc->src_nents + 1;
|
||||||
in_options = LDST_SGF;
|
in_options = LDST_SGF;
|
||||||
}
|
}
|
||||||
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
|
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
|
||||||
|
@ -2714,10 +2714,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||||
if (!all_contig) {
|
if (!all_contig) {
|
||||||
if (!is_gcm) {
|
if (!is_gcm) {
|
||||||
sg_to_sec4_sg(req->assoc,
|
sg_to_sec4_sg(req->assoc,
|
||||||
(assoc_nents ? : 1),
|
assoc_nents,
|
||||||
edesc->sec4_sg +
|
edesc->sec4_sg +
|
||||||
sec4_sg_index, 0);
|
sec4_sg_index, 0);
|
||||||
sec4_sg_index += assoc_nents ? : 1;
|
sec4_sg_index += assoc_nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
|
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
|
||||||
|
@ -2726,17 +2726,17 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||||
|
|
||||||
if (is_gcm) {
|
if (is_gcm) {
|
||||||
sg_to_sec4_sg(req->assoc,
|
sg_to_sec4_sg(req->assoc,
|
||||||
(assoc_nents ? : 1),
|
assoc_nents,
|
||||||
edesc->sec4_sg +
|
edesc->sec4_sg +
|
||||||
sec4_sg_index, 0);
|
sec4_sg_index, 0);
|
||||||
sec4_sg_index += assoc_nents ? : 1;
|
sec4_sg_index += assoc_nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_to_sec4_sg_last(req->src,
|
sg_to_sec4_sg_last(req->src,
|
||||||
(src_nents ? : 1),
|
src_nents,
|
||||||
edesc->sec4_sg +
|
edesc->sec4_sg +
|
||||||
sec4_sg_index, 0);
|
sec4_sg_index, 0);
|
||||||
sec4_sg_index += src_nents ? : 1;
|
sec4_sg_index += src_nents;
|
||||||
}
|
}
|
||||||
if (dst_nents) {
|
if (dst_nents) {
|
||||||
sg_to_sec4_sg_last(req->dst, dst_nents,
|
sg_to_sec4_sg_last(req->dst, dst_nents,
|
||||||
|
|
|
@ -175,13 +175,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
||||||
{
|
{
|
||||||
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
|
||||||
struct caam_ctrl __iomem *ctrl;
|
struct caam_ctrl __iomem *ctrl;
|
||||||
struct rng4tst __iomem *r4tst;
|
|
||||||
u32 *desc, status, rdsta_val;
|
u32 *desc, status, rdsta_val;
|
||||||
int ret = 0, sh_idx;
|
int ret = 0, sh_idx;
|
||||||
|
|
||||||
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
|
||||||
r4tst = &ctrl->r4tst[0];
|
|
||||||
|
|
||||||
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
|
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -209,8 +206,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
|
||||||
* without any error (HW optimizations for later
|
* without any error (HW optimizations for later
|
||||||
* CAAM eras), then try again.
|
* CAAM eras), then try again.
|
||||||
*/
|
*/
|
||||||
rdsta_val =
|
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
|
||||||
rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
|
|
||||||
if (status || !(rdsta_val & (1 << sh_idx)))
|
if (status || !(rdsta_val & (1 << sh_idx)))
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -151,10 +151,15 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
|
||||||
else
|
else
|
||||||
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
|
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
|
||||||
|
|
||||||
dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
|
/*
|
||||||
status, error, idx_str, idx,
|
* CCB ICV check failures are part of normal operation life;
|
||||||
cha_str, cha_err_code,
|
* we leave the upper layers to do what they want with them.
|
||||||
err_str, err_err_code);
|
*/
|
||||||
|
if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
|
||||||
|
dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
|
||||||
|
status, error, idx_str, idx,
|
||||||
|
cha_str, cha_err_code,
|
||||||
|
err_str, err_err_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void report_jump_status(struct device *jrdev, const u32 status,
|
static void report_jump_status(struct device *jrdev, const u32 status,
|
||||||
|
|
|
@ -384,30 +384,28 @@ static int caam_jr_init(struct device *dev)
|
||||||
if (error) {
|
if (error) {
|
||||||
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
|
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
|
||||||
jrp->ridx, jrp->irq);
|
jrp->ridx, jrp->irq);
|
||||||
irq_dispose_mapping(jrp->irq);
|
goto out_kill_deq;
|
||||||
jrp->irq = 0;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
error = caam_reset_hw_jr(dev);
|
error = caam_reset_hw_jr(dev);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
goto out_free_irq;
|
||||||
|
|
||||||
|
error = -ENOMEM;
|
||||||
jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
|
jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
|
||||||
&inpbusaddr, GFP_KERNEL);
|
&inpbusaddr, GFP_KERNEL);
|
||||||
|
if (!jrp->inpring)
|
||||||
|
goto out_free_irq;
|
||||||
|
|
||||||
jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
|
jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
|
||||||
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
|
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
|
||||||
|
if (!jrp->outring)
|
||||||
|
goto out_free_inpring;
|
||||||
|
|
||||||
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
|
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (!jrp->entinfo)
|
||||||
if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
|
goto out_free_outring;
|
||||||
(jrp->entinfo == NULL)) {
|
|
||||||
dev_err(dev, "can't allocate job rings for %d\n",
|
|
||||||
jrp->ridx);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < JOBR_DEPTH; i++)
|
for (i = 0; i < JOBR_DEPTH; i++)
|
||||||
jrp->entinfo[i].desc_addr_dma = !0;
|
jrp->entinfo[i].desc_addr_dma = !0;
|
||||||
|
@ -434,6 +432,19 @@ static int caam_jr_init(struct device *dev)
|
||||||
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
|
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_free_outring:
|
||||||
|
dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
|
||||||
|
jrp->outring, outbusaddr);
|
||||||
|
out_free_inpring:
|
||||||
|
dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
|
||||||
|
jrp->inpring, inpbusaddr);
|
||||||
|
dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
|
||||||
|
out_free_irq:
|
||||||
|
free_irq(jrp->irq, dev);
|
||||||
|
out_kill_deq:
|
||||||
|
tasklet_kill(&jrp->irqtask);
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -484,8 +495,10 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
/* Now do the platform independent part */
|
/* Now do the platform independent part */
|
||||||
error = caam_jr_init(jrdev); /* now turn on hardware */
|
error = caam_jr_init(jrdev); /* now turn on hardware */
|
||||||
if (error)
|
if (error) {
|
||||||
|
irq_dispose_mapping(jrpriv->irq);
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
jrpriv->dev = jrdev;
|
jrpriv->dev = jrdev;
|
||||||
spin_lock(&driver_data.jr_alloc_lock);
|
spin_lock(&driver_data.jr_alloc_lock);
|
||||||
|
|
|
@ -37,7 +37,7 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
|
||||||
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
|
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
|
||||||
sg_dma_len(sg), offset);
|
sg_dma_len(sg), offset);
|
||||||
sec4_sg_ptr++;
|
sec4_sg_ptr++;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
sg_count--;
|
sg_count--;
|
||||||
}
|
}
|
||||||
return sec4_sg_ptr - 1;
|
return sec4_sg_ptr - 1;
|
||||||
|
@ -67,7 +67,7 @@ static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
|
||||||
nbytes -= sg->length;
|
nbytes -= sg->length;
|
||||||
if (!sg_is_last(sg) && (sg + 1)->length == 0)
|
if (!sg_is_last(sg) && (sg + 1)->length == 0)
|
||||||
*chained = true;
|
*chained = true;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sg_nents;
|
return sg_nents;
|
||||||
|
@ -93,7 +93,7 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < nents; i++) {
|
for (i = 0; i < nents; i++) {
|
||||||
dma_map_sg(dev, sg, 1, dir);
|
dma_map_sg(dev, sg, 1, dir);
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dma_map_sg(dev, sg, nents, dir);
|
dma_map_sg(dev, sg, nents, dir);
|
||||||
|
@ -109,7 +109,7 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < nents; i++) {
|
for (i = 0; i < nents; i++) {
|
||||||
dma_unmap_sg(dev, sg, 1, dir);
|
dma_unmap_sg(dev, sg, 1, dir);
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dma_unmap_sg(dev, sg, nents, dir);
|
dma_unmap_sg(dev, sg, nents, dir);
|
||||||
|
|
|
@ -583,6 +583,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
static const struct x86_cpu_id ccp_support[] = {
|
static const struct x86_cpu_id ccp_support[] = {
|
||||||
{ X86_VENDOR_AMD, 22, },
|
{ X86_VENDOR_AMD, 22, },
|
||||||
|
{ },
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -784,7 +784,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
|
||||||
struct buffer_desc *buf, gfp_t flags,
|
struct buffer_desc *buf, gfp_t flags,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
|
for (; nbytes > 0; sg = sg_next(sg)) {
|
||||||
unsigned len = min(nbytes, sg->length);
|
unsigned len = min(nbytes, sg->length);
|
||||||
struct buffer_desc *next_buf;
|
struct buffer_desc *next_buf;
|
||||||
u32 next_buf_phys;
|
u32 next_buf_phys;
|
||||||
|
@ -982,7 +982,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
offset += sg->length;
|
offset += sg->length;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
return (start + nbytes > offset + sg->length);
|
return (start + nbytes > offset + sg->length);
|
||||||
}
|
}
|
||||||
|
|
|
@ -177,7 +177,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
offset += sg_src->length;
|
offset += sg_src->length;
|
||||||
sg_src = scatterwalk_sg_next(sg_src);
|
sg_src = sg_next(sg_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* start - offset is the number of bytes to advance in the scatterlist
|
/* start - offset is the number of bytes to advance in the scatterlist
|
||||||
|
@ -187,9 +187,9 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
|
||||||
while (len && (nx_sg - nx_dst) < sglen) {
|
while (len && (nx_sg - nx_dst) < sglen) {
|
||||||
n = scatterwalk_clamp(&walk, len);
|
n = scatterwalk_clamp(&walk, len);
|
||||||
if (!n) {
|
if (!n) {
|
||||||
/* In cases where we have scatterlist chain scatterwalk_sg_next
|
/* In cases where we have scatterlist chain sg_next
|
||||||
* handles with it properly */
|
* handles with it properly */
|
||||||
scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
|
scatterwalk_start(&walk, sg_next(walk.sg));
|
||||||
n = scatterwalk_clamp(&walk, len);
|
n = scatterwalk_clamp(&walk, len);
|
||||||
}
|
}
|
||||||
dst = scatterwalk_map(&walk);
|
dst = scatterwalk_map(&walk);
|
||||||
|
|
|
@ -994,7 +994,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
|
||||||
|
|
||||||
scatterwalk_advance(&dd->in_walk, 4);
|
scatterwalk_advance(&dd->in_walk, 4);
|
||||||
if (dd->in_sg->length == _calc_walked(in)) {
|
if (dd->in_sg->length == _calc_walked(in)) {
|
||||||
dd->in_sg = scatterwalk_sg_next(dd->in_sg);
|
dd->in_sg = sg_next(dd->in_sg);
|
||||||
if (dd->in_sg) {
|
if (dd->in_sg) {
|
||||||
scatterwalk_start(&dd->in_walk,
|
scatterwalk_start(&dd->in_walk,
|
||||||
dd->in_sg);
|
dd->in_sg);
|
||||||
|
@ -1026,7 +1026,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
|
||||||
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
|
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
|
||||||
scatterwalk_advance(&dd->out_walk, 4);
|
scatterwalk_advance(&dd->out_walk, 4);
|
||||||
if (dd->out_sg->length == _calc_walked(out)) {
|
if (dd->out_sg->length == _calc_walked(out)) {
|
||||||
dd->out_sg = scatterwalk_sg_next(dd->out_sg);
|
dd->out_sg = sg_next(dd->out_sg);
|
||||||
if (dd->out_sg) {
|
if (dd->out_sg) {
|
||||||
scatterwalk_start(&dd->out_walk,
|
scatterwalk_start(&dd->out_walk,
|
||||||
dd->out_sg);
|
dd->out_sg);
|
||||||
|
|
|
@ -921,7 +921,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
|
||||||
|
|
||||||
scatterwalk_advance(&dd->in_walk, 4);
|
scatterwalk_advance(&dd->in_walk, 4);
|
||||||
if (dd->in_sg->length == _calc_walked(in)) {
|
if (dd->in_sg->length == _calc_walked(in)) {
|
||||||
dd->in_sg = scatterwalk_sg_next(dd->in_sg);
|
dd->in_sg = sg_next(dd->in_sg);
|
||||||
if (dd->in_sg) {
|
if (dd->in_sg) {
|
||||||
scatterwalk_start(&dd->in_walk,
|
scatterwalk_start(&dd->in_walk,
|
||||||
dd->in_sg);
|
dd->in_sg);
|
||||||
|
@ -953,7 +953,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
|
||||||
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
|
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
|
||||||
scatterwalk_advance(&dd->out_walk, 4);
|
scatterwalk_advance(&dd->out_walk, 4);
|
||||||
if (dd->out_sg->length == _calc_walked(out)) {
|
if (dd->out_sg->length == _calc_walked(out)) {
|
||||||
dd->out_sg = scatterwalk_sg_next(dd->out_sg);
|
dd->out_sg = sg_next(dd->out_sg);
|
||||||
if (dd->out_sg) {
|
if (dd->out_sg) {
|
||||||
scatterwalk_start(&dd->out_walk,
|
scatterwalk_start(&dd->out_walk,
|
||||||
dd->out_sg);
|
dd->out_sg);
|
||||||
|
@ -965,9 +965,9 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dd->total -= DES_BLOCK_SIZE;
|
BUG_ON(dd->total < DES_BLOCK_SIZE);
|
||||||
|
|
||||||
BUG_ON(dd->total < 0);
|
dd->total -= DES_BLOCK_SIZE;
|
||||||
|
|
||||||
/* Clear IRQ status */
|
/* Clear IRQ status */
|
||||||
status &= ~DES_REG_IRQ_DATA_OUT;
|
status &= ~DES_REG_IRQ_DATA_OUT;
|
||||||
|
|
|
@ -47,7 +47,6 @@
|
||||||
#ifndef ADF_ACCEL_DEVICES_H_
|
#ifndef ADF_ACCEL_DEVICES_H_
|
||||||
#define ADF_ACCEL_DEVICES_H_
|
#define ADF_ACCEL_DEVICES_H_
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/atomic.h>
|
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
@ -148,6 +147,11 @@ struct adf_hw_device_data {
|
||||||
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
|
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
|
||||||
void (*free_irq)(struct adf_accel_dev *accel_dev);
|
void (*free_irq)(struct adf_accel_dev *accel_dev);
|
||||||
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
|
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
|
||||||
|
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
|
||||||
|
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
|
||||||
|
int (*init_arb)(struct adf_accel_dev *accel_dev);
|
||||||
|
void (*exit_arb)(struct adf_accel_dev *accel_dev);
|
||||||
|
void (*enable_ints)(struct adf_accel_dev *accel_dev);
|
||||||
const char *fw_name;
|
const char *fw_name;
|
||||||
uint32_t pci_dev_id;
|
uint32_t pci_dev_id;
|
||||||
uint32_t fuses;
|
uint32_t fuses;
|
||||||
|
|
|
@ -82,28 +82,15 @@ struct adf_reset_dev_data {
|
||||||
struct work_struct reset_work;
|
struct work_struct reset_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PPDSTAT_OFFSET 0x7E
|
|
||||||
static void adf_dev_restore(struct adf_accel_dev *accel_dev)
|
static void adf_dev_restore(struct adf_accel_dev *accel_dev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||||
struct pci_dev *parent = pdev->bus->self;
|
struct pci_dev *parent = pdev->bus->self;
|
||||||
uint16_t ppdstat = 0, bridge_ctl = 0;
|
uint16_t bridge_ctl = 0;
|
||||||
int pending = 0;
|
|
||||||
|
|
||||||
pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
|
pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
|
||||||
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
|
|
||||||
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
|
|
||||||
if (pending) {
|
|
||||||
int ctr = 0;
|
|
||||||
|
|
||||||
do {
|
if (!pci_wait_for_pending_transaction(pdev))
|
||||||
msleep(100);
|
|
||||||
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
|
|
||||||
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
|
|
||||||
} while (pending && ctr++ < 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pending)
|
|
||||||
pr_info("QAT: Transaction still in progress. Proceeding\n");
|
pr_info("QAT: Transaction still in progress. Proceeding\n");
|
||||||
|
|
||||||
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
|
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
|
||||||
|
@ -125,8 +112,9 @@ static void adf_device_reset_worker(struct work_struct *work)
|
||||||
|
|
||||||
adf_dev_restarting_notify(accel_dev);
|
adf_dev_restarting_notify(accel_dev);
|
||||||
adf_dev_stop(accel_dev);
|
adf_dev_stop(accel_dev);
|
||||||
|
adf_dev_shutdown(accel_dev);
|
||||||
adf_dev_restore(accel_dev);
|
adf_dev_restore(accel_dev);
|
||||||
if (adf_dev_start(accel_dev)) {
|
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
|
||||||
/* The device hanged and we can't restart it so stop here */
|
/* The device hanged and we can't restart it so stop here */
|
||||||
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
|
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
|
||||||
kfree(reset_data);
|
kfree(reset_data);
|
||||||
|
@ -148,8 +136,8 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
|
||||||
{
|
{
|
||||||
struct adf_reset_dev_data *reset_data;
|
struct adf_reset_dev_data *reset_data;
|
||||||
|
|
||||||
if (adf_dev_started(accel_dev) &&
|
if (!adf_dev_started(accel_dev) ||
|
||||||
!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
|
test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include "adf_accel_devices.h"
|
#include "adf_accel_devices.h"
|
||||||
#include "adf_cfg.h"
|
#include "adf_cfg.h"
|
||||||
|
#include "adf_common_drv.h"
|
||||||
|
|
||||||
static DEFINE_MUTEX(qat_cfg_read_lock);
|
static DEFINE_MUTEX(qat_cfg_read_lock);
|
||||||
|
|
||||||
|
@ -159,6 +160,7 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
|
||||||
down_write(&dev_cfg_data->lock);
|
down_write(&dev_cfg_data->lock);
|
||||||
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
|
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
|
||||||
up_write(&dev_cfg_data->lock);
|
up_write(&dev_cfg_data->lock);
|
||||||
|
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -93,7 +93,7 @@ int adf_service_unregister(struct service_hndl *service);
|
||||||
int adf_dev_init(struct adf_accel_dev *accel_dev);
|
int adf_dev_init(struct adf_accel_dev *accel_dev);
|
||||||
int adf_dev_start(struct adf_accel_dev *accel_dev);
|
int adf_dev_start(struct adf_accel_dev *accel_dev);
|
||||||
int adf_dev_stop(struct adf_accel_dev *accel_dev);
|
int adf_dev_stop(struct adf_accel_dev *accel_dev);
|
||||||
int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
|
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
|
||||||
|
|
||||||
int adf_ctl_dev_register(void);
|
int adf_ctl_dev_register(void);
|
||||||
void adf_ctl_dev_unregister(void);
|
void adf_ctl_dev_unregister(void);
|
||||||
|
|
|
@ -282,6 +282,8 @@ static int adf_ctl_stop_devices(uint32_t id)
|
||||||
if (adf_dev_stop(accel_dev)) {
|
if (adf_dev_stop(accel_dev)) {
|
||||||
pr_err("QAT: Failed to stop qat_dev%d\n", id);
|
pr_err("QAT: Failed to stop qat_dev%d\n", id);
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
|
} else {
|
||||||
|
adf_dev_shutdown(accel_dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -343,7 +345,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
|
||||||
if (!adf_dev_started(accel_dev)) {
|
if (!adf_dev_started(accel_dev)) {
|
||||||
pr_info("QAT: Starting acceleration device qat_dev%d.\n",
|
pr_info("QAT: Starting acceleration device qat_dev%d.\n",
|
||||||
ctl_data->device_id);
|
ctl_data->device_id);
|
||||||
ret = adf_dev_start(accel_dev);
|
ret = adf_dev_init(accel_dev);
|
||||||
|
if (!ret)
|
||||||
|
ret = adf_dev_start(accel_dev);
|
||||||
} else {
|
} else {
|
||||||
pr_info("QAT: Acceleration device qat_dev%d already started.\n",
|
pr_info("QAT: Acceleration device qat_dev%d already started.\n",
|
||||||
ctl_data->device_id);
|
ctl_data->device_id);
|
||||||
|
@ -351,6 +355,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
|
pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
|
||||||
adf_dev_stop(accel_dev);
|
adf_dev_stop(accel_dev);
|
||||||
|
adf_dev_shutdown(accel_dev);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
kfree(ctl_data);
|
kfree(ctl_data);
|
||||||
|
|
|
@ -108,26 +108,47 @@ int adf_service_unregister(struct service_hndl *service)
|
||||||
EXPORT_SYMBOL_GPL(adf_service_unregister);
|
EXPORT_SYMBOL_GPL(adf_service_unregister);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* adf_dev_start() - Start acceleration service for the given accel device
|
* adf_dev_init() - Init data structures and services for the given accel device
|
||||||
* @accel_dev: Pointer to acceleration device.
|
* @accel_dev: Pointer to acceleration device.
|
||||||
*
|
*
|
||||||
* Function notifies all the registered services that the acceleration device
|
* Initialize the ring data structures and the admin comms and arbitration
|
||||||
* is ready to be used.
|
* services.
|
||||||
* To be used by QAT device specific drivers.
|
|
||||||
*
|
*
|
||||||
* Return: 0 on success, error code othewise.
|
* Return: 0 on success, error code othewise.
|
||||||
*/
|
*/
|
||||||
int adf_dev_start(struct adf_accel_dev *accel_dev)
|
int adf_dev_init(struct adf_accel_dev *accel_dev)
|
||||||
{
|
{
|
||||||
struct service_hndl *service;
|
struct service_hndl *service;
|
||||||
struct list_head *list_itr;
|
struct list_head *list_itr;
|
||||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||||
|
|
||||||
|
if (!hw_data) {
|
||||||
|
dev_err(&GET_DEV(accel_dev),
|
||||||
|
"QAT: Failed to init device - hw_data not set\n");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
|
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
|
||||||
pr_info("QAT: Device not configured\n");
|
pr_info("QAT: Device not configured\n");
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
|
||||||
|
if (adf_init_etr_data(accel_dev)) {
|
||||||
|
dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
|
||||||
|
dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
|
||||||
|
dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
hw_data->enable_ints(accel_dev);
|
||||||
|
|
||||||
if (adf_ae_init(accel_dev)) {
|
if (adf_ae_init(accel_dev)) {
|
||||||
pr_err("QAT: Failed to initialise Acceleration Engine\n");
|
pr_err("QAT: Failed to initialise Acceleration Engine\n");
|
||||||
|
@ -178,6 +199,27 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
|
||||||
|
|
||||||
hw_data->enable_error_correction(accel_dev);
|
hw_data->enable_error_correction(accel_dev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(adf_dev_init);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adf_dev_start() - Start acceleration service for the given accel device
|
||||||
|
* @accel_dev: Pointer to acceleration device.
|
||||||
|
*
|
||||||
|
* Function notifies all the registered services that the acceleration device
|
||||||
|
* is ready to be used.
|
||||||
|
* To be used by QAT device specific drivers.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, error code othewise.
|
||||||
|
*/
|
||||||
|
int adf_dev_start(struct adf_accel_dev *accel_dev)
|
||||||
|
{
|
||||||
|
struct service_hndl *service;
|
||||||
|
struct list_head *list_itr;
|
||||||
|
|
||||||
|
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||||
|
|
||||||
if (adf_ae_start(accel_dev)) {
|
if (adf_ae_start(accel_dev)) {
|
||||||
pr_err("QAT: AE Start Failed\n");
|
pr_err("QAT: AE Start Failed\n");
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -232,16 +274,15 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
|
||||||
*/
|
*/
|
||||||
int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||||
{
|
{
|
||||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
|
||||||
struct service_hndl *service;
|
struct service_hndl *service;
|
||||||
struct list_head *list_itr;
|
struct list_head *list_itr;
|
||||||
int ret, wait = 0;
|
bool wait = false;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!adf_dev_started(accel_dev) &&
|
if (!adf_dev_started(accel_dev) &&
|
||||||
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
|
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
|
||||||
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||||
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||||
|
|
||||||
|
@ -258,7 +299,7 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||||
} else if (ret == -EAGAIN) {
|
} else if (ret == -EAGAIN) {
|
||||||
wait = 1;
|
wait = true;
|
||||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -278,13 +319,36 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||||
if (wait)
|
if (wait)
|
||||||
msleep(100);
|
msleep(100);
|
||||||
|
|
||||||
if (adf_dev_started(accel_dev)) {
|
if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
|
||||||
if (adf_ae_stop(accel_dev))
|
if (adf_ae_stop(accel_dev))
|
||||||
pr_err("QAT: failed to stop AE\n");
|
pr_err("QAT: failed to stop AE\n");
|
||||||
else
|
else
|
||||||
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
|
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(adf_dev_stop);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* adf_dev_shutdown() - shutdown acceleration services and data strucutures
|
||||||
|
* @accel_dev: Pointer to acceleration device
|
||||||
|
*
|
||||||
|
* Cleanup the ring data structures and the admin comms and arbitration
|
||||||
|
* services.
|
||||||
|
*/
|
||||||
|
void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
|
||||||
|
{
|
||||||
|
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||||
|
struct service_hndl *service;
|
||||||
|
struct list_head *list_itr;
|
||||||
|
|
||||||
|
if (!hw_data) {
|
||||||
|
dev_err(&GET_DEV(accel_dev),
|
||||||
|
"QAT: Failed to shutdown device - hw_data not set\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
|
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
|
||||||
if (adf_ae_fw_release(accel_dev))
|
if (adf_ae_fw_release(accel_dev))
|
||||||
pr_err("QAT: Failed to release the ucode\n");
|
pr_err("QAT: Failed to release the ucode\n");
|
||||||
|
@ -335,9 +399,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||||
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
|
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
|
||||||
adf_cfg_del_all(accel_dev);
|
adf_cfg_del_all(accel_dev);
|
||||||
|
|
||||||
return 0;
|
if (hw_data->exit_arb)
|
||||||
|
hw_data->exit_arb(accel_dev);
|
||||||
|
|
||||||
|
if (hw_data->exit_admin_comms)
|
||||||
|
hw_data->exit_admin_comms(accel_dev);
|
||||||
|
|
||||||
|
adf_cleanup_etr_data(accel_dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(adf_dev_stop);
|
EXPORT_SYMBOL_GPL(adf_dev_shutdown);
|
||||||
|
|
||||||
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
|
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -48,7 +48,6 @@
|
||||||
#define ADF_TRANSPORT_INTRN_H
|
#define ADF_TRANSPORT_INTRN_H
|
||||||
|
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/atomic.h>
|
|
||||||
#include <linux/spinlock_types.h>
|
#include <linux/spinlock_types.h>
|
||||||
#include "adf_transport.h"
|
#include "adf_transport.h"
|
||||||
|
|
||||||
|
|
|
@ -301,5 +301,5 @@ struct icp_qat_hw_cipher_aes256_f8 {
|
||||||
|
|
||||||
struct icp_qat_hw_cipher_algo_blk {
|
struct icp_qat_hw_cipher_algo_blk {
|
||||||
struct icp_qat_hw_cipher_aes256_f8 aes;
|
struct icp_qat_hw_cipher_aes256_f8 aes;
|
||||||
};
|
} __aligned(64);
|
||||||
#endif
|
#endif
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -72,12 +72,24 @@ struct qat_crypto_request_buffs {
|
||||||
struct qat_alg_buf_list *blout;
|
struct qat_alg_buf_list *blout;
|
||||||
dma_addr_t bloutp;
|
dma_addr_t bloutp;
|
||||||
size_t sz;
|
size_t sz;
|
||||||
|
size_t sz_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct qat_crypto_request;
|
||||||
|
|
||||||
struct qat_crypto_request {
|
struct qat_crypto_request {
|
||||||
struct icp_qat_fw_la_bulk_req req;
|
struct icp_qat_fw_la_bulk_req req;
|
||||||
struct qat_alg_session_ctx *ctx;
|
union {
|
||||||
struct aead_request *areq;
|
struct qat_alg_aead_ctx *aead_ctx;
|
||||||
|
struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
|
||||||
|
};
|
||||||
|
union {
|
||||||
|
struct aead_request *aead_req;
|
||||||
|
struct ablkcipher_request *ablkcipher_req;
|
||||||
|
};
|
||||||
struct qat_crypto_request_buffs buf;
|
struct qat_crypto_request_buffs buf;
|
||||||
|
void (*cb)(struct icp_qat_fw_la_resp *resp,
|
||||||
|
struct qat_crypto_request *req);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
*/
|
*/
|
||||||
#include <adf_accel_devices.h>
|
#include <adf_accel_devices.h>
|
||||||
#include "adf_dh895xcc_hw_data.h"
|
#include "adf_dh895xcc_hw_data.h"
|
||||||
|
#include "adf_common_drv.h"
|
||||||
#include "adf_drv.h"
|
#include "adf_drv.h"
|
||||||
|
|
||||||
/* Worker thread to service arbiter mappings based on dev SKUs */
|
/* Worker thread to service arbiter mappings based on dev SKUs */
|
||||||
|
@ -182,6 +183,19 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
|
||||||
|
{
|
||||||
|
void __iomem *addr;
|
||||||
|
|
||||||
|
addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
|
||||||
|
|
||||||
|
/* Enable bundle and misc interrupts */
|
||||||
|
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
|
||||||
|
ADF_DH895XCC_SMIA0_MASK);
|
||||||
|
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
|
||||||
|
ADF_DH895XCC_SMIA1_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||||
{
|
{
|
||||||
hw_data->dev_class = &dh895xcc_class;
|
hw_data->dev_class = &dh895xcc_class;
|
||||||
|
@ -206,6 +220,11 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||||
hw_data->get_misc_bar_id = get_misc_bar_id;
|
hw_data->get_misc_bar_id = get_misc_bar_id;
|
||||||
hw_data->get_sku = get_sku;
|
hw_data->get_sku = get_sku;
|
||||||
hw_data->fw_name = ADF_DH895XCC_FW;
|
hw_data->fw_name = ADF_DH895XCC_FW;
|
||||||
|
hw_data->init_admin_comms = adf_init_admin_comms;
|
||||||
|
hw_data->exit_admin_comms = adf_exit_admin_comms;
|
||||||
|
hw_data->init_arb = adf_init_arb;
|
||||||
|
hw_data->exit_arb = adf_exit_arb;
|
||||||
|
hw_data->enable_ints = adf_enable_ints;
|
||||||
}
|
}
|
||||||
|
|
||||||
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||||
|
|
|
@ -90,9 +90,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
|
||||||
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
|
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
adf_exit_admin_comms(accel_dev);
|
adf_dev_shutdown(accel_dev);
|
||||||
adf_exit_arb(accel_dev);
|
|
||||||
adf_cleanup_etr_data(accel_dev);
|
|
||||||
|
|
||||||
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
|
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
|
||||||
|
@ -119,7 +117,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
|
||||||
kfree(accel_dev);
|
kfree(accel_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_dev_start(struct adf_accel_dev *accel_dev)
|
static int adf_dev_configure(struct adf_accel_dev *accel_dev)
|
||||||
{
|
{
|
||||||
int cpus = num_online_cpus();
|
int cpus = num_online_cpus();
|
||||||
int banks = GET_MAX_BANKS(accel_dev);
|
int banks = GET_MAX_BANKS(accel_dev);
|
||||||
|
@ -206,7 +204,7 @@ static int qat_dev_start(struct adf_accel_dev *accel_dev)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
||||||
return adf_dev_start(accel_dev);
|
return 0;
|
||||||
err:
|
err:
|
||||||
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
|
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -217,7 +215,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_accel_dev *accel_dev;
|
struct adf_accel_dev *accel_dev;
|
||||||
struct adf_accel_pci *accel_pci_dev;
|
struct adf_accel_pci *accel_pci_dev;
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
void __iomem *pmisc_bar_addr = NULL;
|
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -347,8 +344,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
if (i == ADF_DH895XCC_PMISC_BAR)
|
|
||||||
pmisc_bar_addr = bar->virt_addr;
|
|
||||||
}
|
}
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
|
@ -358,36 +353,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adf_init_etr_data(accel_dev)) {
|
|
||||||
dev_err(&pdev->dev, "Failed initialize etr\n");
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adf_init_admin_comms(accel_dev)) {
|
|
||||||
dev_err(&pdev->dev, "Failed initialize admin comms\n");
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adf_init_arb(accel_dev)) {
|
|
||||||
dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
if (pci_save_state(pdev)) {
|
if (pci_save_state(pdev)) {
|
||||||
dev_err(&pdev->dev, "Failed to save pci state\n");
|
dev_err(&pdev->dev, "Failed to save pci state\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable bundle and misc interrupts */
|
ret = adf_dev_configure(accel_dev);
|
||||||
ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
|
if (ret)
|
||||||
ADF_DH895XCC_SMIA0_MASK);
|
goto out_err;
|
||||||
ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
|
|
||||||
ADF_DH895XCC_SMIA1_MASK);
|
|
||||||
|
|
||||||
ret = qat_dev_start(accel_dev);
|
ret = adf_dev_init(accel_dev);
|
||||||
|
if (ret)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
ret = adf_dev_start(accel_dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
adf_dev_stop(accel_dev);
|
adf_dev_stop(accel_dev);
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
|
@ -64,7 +64,7 @@ int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
err = dma_map_sg(dev, sg, 1, dir);
|
err = dma_map_sg(dev, sg, 1, dir);
|
||||||
if (!err)
|
if (!err)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = dma_map_sg(dev, sg, nents, dir);
|
err = dma_map_sg(dev, sg, nents, dir);
|
||||||
|
@ -81,7 +81,7 @@ void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
if (chained)
|
if (chained)
|
||||||
while (sg) {
|
while (sg) {
|
||||||
dma_unmap_sg(dev, sg, 1, dir);
|
dma_unmap_sg(dev, sg, 1, dir);
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
dma_unmap_sg(dev, sg, nents, dir);
|
dma_unmap_sg(dev, sg, nents, dir);
|
||||||
|
@ -100,7 +100,7 @@ int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
|
||||||
nbytes -= sg->length;
|
nbytes -= sg->length;
|
||||||
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
|
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
|
||||||
*chained = true;
|
*chained = true;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return nents;
|
return nents;
|
||||||
|
|
|
@ -285,7 +285,7 @@ static int qce_ahash_update(struct ahash_request *req)
|
||||||
break;
|
break;
|
||||||
len += sg_dma_len(sg);
|
len += sg_dma_len(sg);
|
||||||
sg_last = sg;
|
sg_last = sg;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sg_last)
|
if (!sg_last)
|
||||||
|
|
|
@ -940,7 +940,7 @@ static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
nbytes -= sg->length;
|
nbytes -= sg->length;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return nbytes;
|
return nbytes;
|
||||||
|
|
|
@ -743,7 +743,7 @@ static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
if (unlikely(chained))
|
if (unlikely(chained))
|
||||||
while (sg) {
|
while (sg) {
|
||||||
dma_map_sg(dev, sg, 1, dir);
|
dma_map_sg(dev, sg, 1, dir);
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
dma_map_sg(dev, sg, nents, dir);
|
dma_map_sg(dev, sg, nents, dir);
|
||||||
|
@ -755,7 +755,7 @@ static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
|
||||||
{
|
{
|
||||||
while (sg) {
|
while (sg) {
|
||||||
dma_unmap_sg(dev, sg, 1, dir);
|
dma_unmap_sg(dev, sg, 1, dir);
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,7 +915,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
|
||||||
link_tbl_ptr->j_extent = 0;
|
link_tbl_ptr->j_extent = 0;
|
||||||
link_tbl_ptr++;
|
link_tbl_ptr++;
|
||||||
cryptlen -= sg_dma_len(sg);
|
cryptlen -= sg_dma_len(sg);
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* adjust (decrease) last one (or two) entry's len to cryptlen */
|
/* adjust (decrease) last one (or two) entry's len to cryptlen */
|
||||||
|
@ -1102,7 +1102,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
|
||||||
nbytes -= sg->length;
|
nbytes -= sg->length;
|
||||||
if (!sg_is_last(sg) && (sg + 1)->length == 0)
|
if (!sg_is_last(sg) && (sg + 1)->length == 0)
|
||||||
*chained = true;
|
*chained = true;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sg_nents;
|
return sg_nents;
|
||||||
|
|
|
@ -479,13 +479,13 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
|
||||||
.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
|
.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
|
||||||
.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
|
.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
|
||||||
.dst_maxburst = 4,
|
.dst_maxburst = 4,
|
||||||
};
|
};
|
||||||
struct dma_slave_config cryp2mem = {
|
struct dma_slave_config cryp2mem = {
|
||||||
.direction = DMA_DEV_TO_MEM,
|
.direction = DMA_DEV_TO_MEM,
|
||||||
.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
|
.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
|
||||||
.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
|
.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
|
||||||
.src_maxburst = 4,
|
.src_maxburst = 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
dma_cap_zero(device_data->dma.mask);
|
dma_cap_zero(device_data->dma.mask);
|
||||||
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
|
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
|
||||||
|
@ -814,7 +814,7 @@ static int get_nents(struct scatterlist *sg, int nbytes)
|
||||||
|
|
||||||
while (nbytes > 0) {
|
while (nbytes > 0) {
|
||||||
nbytes -= sg->length;
|
nbytes -= sg->length;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
nents++;
|
nents++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1774,8 +1774,8 @@ static int ux500_cryp_resume(struct device *dev)
|
||||||
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
|
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
|
||||||
|
|
||||||
static const struct of_device_id ux500_cryp_match[] = {
|
static const struct of_device_id ux500_cryp_match[] = {
|
||||||
{ .compatible = "stericsson,ux500-cryp" },
|
{ .compatible = "stericsson,ux500-cryp" },
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_driver cryp_driver = {
|
static struct platform_driver cryp_driver = {
|
||||||
|
|
|
@ -50,6 +50,7 @@ struct af_alg_type {
|
||||||
void (*release)(void *private);
|
void (*release)(void *private);
|
||||||
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
|
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
|
||||||
int (*accept)(void *private, struct sock *sk);
|
int (*accept)(void *private, struct sock *sk);
|
||||||
|
int (*setauthsize)(void *private, unsigned int authsize);
|
||||||
|
|
||||||
struct proto_ops *ops;
|
struct proto_ops *ops;
|
||||||
struct module *owner;
|
struct module *owner;
|
||||||
|
|
|
@ -33,21 +33,13 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
|
||||||
sg1[num - 1].page_link |= 0x01;
|
sg1[num - 1].page_link |= 0x01;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
|
|
||||||
{
|
|
||||||
if (sg_is_last(sg))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return (++sg)->length ? sg : sg_chain_ptr(sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
|
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
|
||||||
struct scatterlist *sg,
|
struct scatterlist *sg,
|
||||||
int chain, int num)
|
int chain, int num)
|
||||||
{
|
{
|
||||||
if (chain) {
|
if (chain) {
|
||||||
head->length += sg->length;
|
head->length += sg->length;
|
||||||
sg = scatterwalk_sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sg)
|
if (sg)
|
||||||
|
|
|
@ -1147,7 +1147,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
|
||||||
* cipher operation completes.
|
* cipher operation completes.
|
||||||
*
|
*
|
||||||
* The callback function is registered with the ablkcipher_request handle and
|
* The callback function is registered with the ablkcipher_request handle and
|
||||||
* must comply with the following template:
|
* must comply with the following template
|
||||||
*
|
*
|
||||||
* void callback_function(struct crypto_async_request *req, int error)
|
* void callback_function(struct crypto_async_request *req, int error)
|
||||||
*/
|
*/
|
||||||
|
@ -1174,7 +1174,7 @@ static inline void ablkcipher_request_set_callback(
|
||||||
*
|
*
|
||||||
* For encryption, the source is treated as the plaintext and the
|
* For encryption, the source is treated as the plaintext and the
|
||||||
* destination is the ciphertext. For a decryption operation, the use is
|
* destination is the ciphertext. For a decryption operation, the use is
|
||||||
* reversed: the source is the ciphertext and the destination is the plaintext.
|
* reversed - the source is the ciphertext and the destination is the plaintext.
|
||||||
*/
|
*/
|
||||||
static inline void ablkcipher_request_set_crypt(
|
static inline void ablkcipher_request_set_crypt(
|
||||||
struct ablkcipher_request *req,
|
struct ablkcipher_request *req,
|
||||||
|
@ -1412,6 +1412,9 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
|
||||||
*/
|
*/
|
||||||
static inline int crypto_aead_decrypt(struct aead_request *req)
|
static inline int crypto_aead_decrypt(struct aead_request *req)
|
||||||
{
|
{
|
||||||
|
if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
|
return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1506,7 +1509,7 @@ static inline void aead_request_free(struct aead_request *req)
|
||||||
* completes
|
* completes
|
||||||
*
|
*
|
||||||
* The callback function is registered with the aead_request handle and
|
* The callback function is registered with the aead_request handle and
|
||||||
* must comply with the following template:
|
* must comply with the following template
|
||||||
*
|
*
|
||||||
* void callback_function(struct crypto_async_request *req, int error)
|
* void callback_function(struct crypto_async_request *req, int error)
|
||||||
*/
|
*/
|
||||||
|
@ -1533,7 +1536,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
|
||||||
*
|
*
|
||||||
* For encryption, the source is treated as the plaintext and the
|
* For encryption, the source is treated as the plaintext and the
|
||||||
* destination is the ciphertext. For a decryption operation, the use is
|
* destination is the ciphertext. For a decryption operation, the use is
|
||||||
* reversed: the source is the ciphertext and the destination is the plaintext.
|
* reversed - the source is the ciphertext and the destination is the plaintext.
|
||||||
*
|
*
|
||||||
* IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
|
* IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
|
||||||
* the caller must concatenate the ciphertext followed by the
|
* the caller must concatenate the ciphertext followed by the
|
||||||
|
|
|
@ -12,8 +12,10 @@
|
||||||
#ifndef LINUX_HWRANDOM_H_
|
#ifndef LINUX_HWRANDOM_H_
|
||||||
#define LINUX_HWRANDOM_H_
|
#define LINUX_HWRANDOM_H_
|
||||||
|
|
||||||
|
#include <linux/completion.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/kref.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct hwrng - Hardware Random Number Generator driver
|
* struct hwrng - Hardware Random Number Generator driver
|
||||||
|
@ -44,6 +46,8 @@ struct hwrng {
|
||||||
|
|
||||||
/* internal. */
|
/* internal. */
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
struct kref ref;
|
||||||
|
struct completion cleanup_done;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Register a new Hardware Random Number Generator driver. */
|
/** Register a new Hardware Random Number Generator driver. */
|
||||||
|
|
|
@ -596,6 +596,11 @@ EXPORT_SYMBOL(memset);
|
||||||
* @s: Pointer to the start of the area.
|
* @s: Pointer to the start of the area.
|
||||||
* @count: The size of the area.
|
* @count: The size of the area.
|
||||||
*
|
*
|
||||||
|
* Note: usually using memset() is just fine (!), but in cases
|
||||||
|
* where clearing out _local_ data at the end of a scope is
|
||||||
|
* necessary, memzero_explicit() should be used instead in
|
||||||
|
* order to prevent the compiler from optimising away zeroing.
|
||||||
|
*
|
||||||
* memzero_explicit() doesn't need an arch-specific version as
|
* memzero_explicit() doesn't need an arch-specific version as
|
||||||
* it just invokes the one of memset() implicitly.
|
* it just invokes the one of memset() implicitly.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue