Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes three issues: - if ccp is loaded on a machine without ccp, it will incorrectly activate causing all requests to fail. Fixed by preventing ccp from loading if hardware isn't available. - not all IRQs were enabled for the qat driver, leading to potential stalls when it is used - disabled buggy AVX CTR implementation in aesni" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: aesni - disable "by8" AVX CTR optimization crypto: ccp - Check for CCP before registering crypto algs crypto: qat - Enable all 32 IRQs
This commit is contained in:
commit
2368a9426f
|
@ -481,7 +481,7 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
|
|||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AS_AVX
|
||||
#if 0 /* temporary disabled due to failing crypto tests */
|
||||
static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv)
|
||||
{
|
||||
|
@ -1522,7 +1522,7 @@ static int __init aesni_init(void)
|
|||
aesni_gcm_dec_tfm = aesni_gcm_dec;
|
||||
}
|
||||
aesni_ctr_enc_tfm = aesni_ctr_enc;
|
||||
#ifdef CONFIG_AS_AVX
|
||||
#if 0 /* temporary disabled due to failing crypto tests */
|
||||
if (cpu_has_avx) {
|
||||
/* optimize performance of ctr mode encryption transform */
|
||||
aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
|
||||
|
|
|
@ -367,6 +367,10 @@ static int ccp_crypto_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = ccp_present();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&req_queue_lock);
|
||||
INIT_LIST_HEAD(&req_queue.cmds);
|
||||
req_queue.backlog = &req_queue.cmds;
|
||||
|
|
|
@ -54,6 +54,20 @@ static inline void ccp_del_device(struct ccp_device *ccp)
|
|||
ccp_dev = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccp_present - check if a CCP device is present
|
||||
*
|
||||
* Returns zero if a CCP device is present, -ENODEV otherwise.
|
||||
*/
|
||||
int ccp_present(void)
|
||||
{
|
||||
if (ccp_get_device())
|
||||
return 0;
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ccp_present);
|
||||
|
||||
/**
|
||||
* ccp_enqueue_cmd - queue an operation for processing by the CCP
|
||||
*
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
#define ADF_DH895XCC_ETR_MAX_BANKS 32
|
||||
#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
|
||||
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
|
||||
#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
|
||||
#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
|
||||
#define ADF_DH895XCC_SMIA1_MASK 0x1
|
||||
/* Error detection and correction */
|
||||
#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
|
||||
|
|
|
@ -26,6 +26,13 @@ struct ccp_cmd;
|
|||
#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
|
||||
defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
|
||||
|
||||
/**
|
||||
* ccp_present - check if a CCP device is present
|
||||
*
|
||||
* Returns zero if a CCP device is present, -ENODEV otherwise.
|
||||
*/
|
||||
int ccp_present(void);
|
||||
|
||||
/**
|
||||
* ccp_enqueue_cmd - queue an operation for processing by the CCP
|
||||
*
|
||||
|
@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd);
|
|||
|
||||
#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
|
||||
|
||||
static inline int ccp_present(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
|
Loading…
Reference in New Issue