staging: ccree: remove ccree staging driver copy
The ccree driver is now in the cryptodev tree, so remove it from drivers/staging as it's no longer needed here. Based on a patch from Gilad, but the mailing list didn't like it :( Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
45b7c73226
commit
8b02179654
|
@ -114,8 +114,6 @@ source "drivers/staging/greybus/Kconfig"
|
|||
|
||||
source "drivers/staging/vc04_services/Kconfig"
|
||||
|
||||
source "drivers/staging/ccree/Kconfig"
|
||||
|
||||
source "drivers/staging/typec/Kconfig"
|
||||
|
||||
source "drivers/staging/vboxvideo/Kconfig"
|
||||
|
|
|
@ -49,6 +49,5 @@ obj-$(CONFIG_MOST) += most/
|
|||
obj-$(CONFIG_KS7010) += ks7010/
|
||||
obj-$(CONFIG_GREYBUS) += greybus/
|
||||
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
|
||||
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
|
||||
obj-$(CONFIG_PI433) += pi433/
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
config CRYPTO_DEV_CCREE
|
||||
tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
|
||||
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
|
||||
default n
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Say 'Y' to enable a driver for the Arm TrustZone CryptoCell
|
||||
C7xx. Currently only the CryptoCell 712 REE is supported.
|
||||
Choose this if you wish to use hardware acceleration of
|
||||
cryptographic operations on the system REE.
|
||||
If unsure say Y.
|
|
@ -1,7 +0,0 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
|
||||
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
|
||||
ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
|
||||
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
|
||||
ccree-$(CONFIG_PM) += cc_pm.o
|
|
@ -1,10 +0,0 @@
|
|||
|
||||
|
||||
*************************************************************************
|
||||
* *
|
||||
* Arm Trust Zone CryptoCell REE Linux driver upstreaming TODO items *
|
||||
* *
|
||||
*************************************************************************
|
||||
|
||||
1. ???
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,109 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_aead.h
|
||||
* ARM CryptoCell AEAD Crypto API
|
||||
*/
|
||||
|
||||
#ifndef __CC_AEAD_H__
|
||||
#define __CC_AEAD_H__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ctr.h>
|
||||
|
||||
/* mac_cmp - HW writes 8 B but all bytes hold the same value */
|
||||
#define ICV_CMP_SIZE 8
|
||||
#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
|
||||
#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
|
||||
|
||||
/* defines for AES GCM configuration buffer */
|
||||
#define GCM_BLOCK_LEN_SIZE 8
|
||||
|
||||
#define GCM_BLOCK_RFC4_IV_OFFSET 4
|
||||
#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
|
||||
#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
|
||||
#define GCM_BLOCK_RFC4_NONCE_SIZE 4
|
||||
|
||||
/* Offsets into AES CCM configuration buffer */
|
||||
#define CCM_B0_OFFSET 0
|
||||
#define CCM_A0_OFFSET 16
|
||||
#define CCM_CTR_COUNT_0_OFFSET 32
|
||||
/* CCM B0 and CTR_COUNT constants. */
|
||||
#define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */
|
||||
#define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */
|
||||
#define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */
|
||||
#define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
|
||||
|
||||
enum aead_ccm_header_size {
|
||||
ccm_header_size_null = -1,
|
||||
ccm_header_size_zero = 0,
|
||||
ccm_header_size_2 = 2,
|
||||
ccm_header_size_6 = 6,
|
||||
ccm_header_size_max = S32_MAX
|
||||
};
|
||||
|
||||
struct aead_req_ctx {
|
||||
/* Allocate cache line although only 4 bytes are needed to
|
||||
* assure next field falls @ cache line
|
||||
* Used for both: digest HW compare and CCM/GCM MAC value
|
||||
*/
|
||||
u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
|
||||
u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
|
||||
|
||||
//used in gcm
|
||||
u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
|
||||
u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
|
||||
u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
|
||||
struct {
|
||||
u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
|
||||
u8 len_c[GCM_BLOCK_LEN_SIZE];
|
||||
} gcm_len_block;
|
||||
|
||||
u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
|
||||
/* HW actual size input */
|
||||
unsigned int hw_iv_size ____cacheline_aligned;
|
||||
/* used to prevent cache coherence problem */
|
||||
u8 backup_mac[MAX_MAC_SIZE];
|
||||
u8 *backup_iv; /*store iv for generated IV flow*/
|
||||
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
|
||||
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
|
||||
/* buffer for internal ccm configurations */
|
||||
dma_addr_t ccm_iv0_dma_addr;
|
||||
dma_addr_t icv_dma_addr; /* Phys. address of ICV */
|
||||
|
||||
//used in gcm
|
||||
/* buffer for internal gcm configurations */
|
||||
dma_addr_t gcm_iv_inc1_dma_addr;
|
||||
/* buffer for internal gcm configurations */
|
||||
dma_addr_t gcm_iv_inc2_dma_addr;
|
||||
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
|
||||
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
|
||||
bool is_gcm4543;
|
||||
|
||||
u8 *icv_virt_addr; /* Virt. address of ICV */
|
||||
struct async_gen_req_ctx gen_ctx;
|
||||
struct cc_mlli assoc;
|
||||
struct cc_mlli src;
|
||||
struct cc_mlli dst;
|
||||
struct scatterlist *src_sgl;
|
||||
struct scatterlist *dst_sgl;
|
||||
unsigned int src_offset;
|
||||
unsigned int dst_offset;
|
||||
enum cc_req_dma_buf_type assoc_buff_type;
|
||||
enum cc_req_dma_buf_type data_buff_type;
|
||||
struct mlli_params mlli_params;
|
||||
unsigned int cryptlen;
|
||||
struct scatterlist ccm_adata_sg;
|
||||
enum aead_ccm_header_size ccm_hdr_size;
|
||||
unsigned int req_authsize;
|
||||
enum drv_cipher_mode cipher_mode;
|
||||
bool is_icv_fragmented;
|
||||
bool is_single_pass;
|
||||
bool plaintext_authenticate_only; //for gcm_rfc4543
|
||||
};
|
||||
|
||||
int cc_aead_alloc(struct cc_drvdata *drvdata);
|
||||
int cc_aead_free(struct cc_drvdata *drvdata);
|
||||
|
||||
#endif /*__CC_AEAD_H__*/
|
File diff suppressed because it is too large
Load Diff
|
@ -1,74 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_buffer_mgr.h
|
||||
* Buffer Manager
|
||||
*/
|
||||
|
||||
#ifndef __CC_BUFFER_MGR_H__
|
||||
#define __CC_BUFFER_MGR_H__
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
#include "cc_driver.h"
|
||||
|
||||
enum cc_req_dma_buf_type {
|
||||
CC_DMA_BUF_NULL = 0,
|
||||
CC_DMA_BUF_DLLI,
|
||||
CC_DMA_BUF_MLLI
|
||||
};
|
||||
|
||||
enum cc_sg_cpy_direct {
|
||||
CC_SG_TO_BUF = 0,
|
||||
CC_SG_FROM_BUF = 1
|
||||
};
|
||||
|
||||
struct cc_mlli {
|
||||
cc_sram_addr_t sram_addr;
|
||||
unsigned int nents; //sg nents
|
||||
unsigned int mlli_nents; //mlli nents might be different than the above
|
||||
};
|
||||
|
||||
struct mlli_params {
|
||||
struct dma_pool *curr_pool;
|
||||
u8 *mlli_virt_addr;
|
||||
dma_addr_t mlli_dma_addr;
|
||||
u32 mlli_len;
|
||||
};
|
||||
|
||||
int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
|
||||
|
||||
int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||
unsigned int ivsize, unsigned int nbytes,
|
||||
void *info, struct scatterlist *src,
|
||||
struct scatterlist *dst, gfp_t flags);
|
||||
|
||||
void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
|
||||
unsigned int ivsize,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst);
|
||||
|
||||
int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
|
||||
|
||||
void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
|
||||
|
||||
int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
bool do_update, gfp_t flags);
|
||||
|
||||
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
unsigned int block_size, gfp_t flags);
|
||||
|
||||
void cc_unmap_hash_request(struct device *dev, void *ctx,
|
||||
struct scatterlist *src, bool do_revert);
|
||||
|
||||
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
|
||||
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
|
||||
|
||||
void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
|
||||
|
||||
#endif /*__BUFFER_MGR_H__*/
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,74 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_cipher.h
|
||||
* ARM CryptoCell Cipher Crypto API
|
||||
*/
|
||||
|
||||
#ifndef __CC_CIPHER_H__
|
||||
#define __CC_CIPHER_H__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
|
||||
/* Crypto cipher flags */
|
||||
#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
|
||||
#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
|
||||
#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
|
||||
#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
|
||||
#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
|
||||
|
||||
#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
|
||||
CC_CRYPTO_CIPHER_KEY_KFDE1 | \
|
||||
CC_CRYPTO_CIPHER_KEY_KFDE2 | \
|
||||
CC_CRYPTO_CIPHER_KEY_KFDE3)
|
||||
|
||||
struct blkcipher_req_ctx {
|
||||
struct async_gen_req_ctx gen_ctx;
|
||||
enum cc_req_dma_buf_type dma_buf_type;
|
||||
u32 in_nents;
|
||||
u32 in_mlli_nents;
|
||||
u32 out_nents;
|
||||
u32 out_mlli_nents;
|
||||
u8 *backup_info; /*store iv for generated IV flow*/
|
||||
u8 *iv;
|
||||
bool is_giv;
|
||||
struct mlli_params mlli_params;
|
||||
};
|
||||
|
||||
int cc_cipher_alloc(struct cc_drvdata *drvdata);
|
||||
|
||||
int cc_cipher_free(struct cc_drvdata *drvdata);
|
||||
|
||||
#ifndef CRYPTO_ALG_BULK_MASK
|
||||
|
||||
#define CRYPTO_ALG_BULK_DU_512 0x00002000
|
||||
#define CRYPTO_ALG_BULK_DU_4096 0x00004000
|
||||
#define CRYPTO_ALG_BULK_MASK (CRYPTO_ALG_BULK_DU_512 |\
|
||||
CRYPTO_ALG_BULK_DU_4096)
|
||||
#endif /* CRYPTO_ALG_BULK_MASK */
|
||||
|
||||
#ifdef CRYPTO_TFM_REQ_HW_KEY
|
||||
|
||||
static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
|
||||
{
|
||||
return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
struct arm_hw_key_info {
|
||||
int hw_key1;
|
||||
int hw_key2;
|
||||
};
|
||||
|
||||
static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CRYPTO_TFM_REQ_HW_KEY */
|
||||
|
||||
#endif /*__CC_CIPHER_H__*/
|
|
@ -1,170 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef _CC_CRYPTO_CTX_H_
|
||||
#define _CC_CRYPTO_CTX_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* context size */
|
||||
#ifndef CC_CTX_SIZE_LOG2
|
||||
#if (CC_DEV_SHA_MAX > 256)
|
||||
#define CC_CTX_SIZE_LOG2 8
|
||||
#else
|
||||
#define CC_CTX_SIZE_LOG2 7
|
||||
#endif
|
||||
#endif
|
||||
#define CC_CTX_SIZE BIT(CC_CTX_SIZE_LOG2)
|
||||
#define CC_DRV_CTX_SIZE_WORDS (CC_CTX_SIZE >> 2)
|
||||
|
||||
#define CC_DRV_DES_IV_SIZE 8
|
||||
#define CC_DRV_DES_BLOCK_SIZE 8
|
||||
|
||||
#define CC_DRV_DES_ONE_KEY_SIZE 8
|
||||
#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
|
||||
#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
|
||||
#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
|
||||
|
||||
#define CC_AES_IV_SIZE 16
|
||||
#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
|
||||
|
||||
#define CC_AES_BLOCK_SIZE 16
|
||||
#define CC_AES_BLOCK_SIZE_WORDS 4
|
||||
|
||||
#define CC_AES_128_BIT_KEY_SIZE 16
|
||||
#define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2)
|
||||
#define CC_AES_192_BIT_KEY_SIZE 24
|
||||
#define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2)
|
||||
#define CC_AES_256_BIT_KEY_SIZE 32
|
||||
#define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2)
|
||||
#define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE
|
||||
#define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2)
|
||||
|
||||
#define CC_MD5_DIGEST_SIZE 16
|
||||
#define CC_SHA1_DIGEST_SIZE 20
|
||||
#define CC_SHA224_DIGEST_SIZE 28
|
||||
#define CC_SHA256_DIGEST_SIZE 32
|
||||
#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
|
||||
#define CC_SHA384_DIGEST_SIZE 48
|
||||
#define CC_SHA512_DIGEST_SIZE 64
|
||||
|
||||
#define CC_SHA1_BLOCK_SIZE 64
|
||||
#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
|
||||
#define CC_MD5_BLOCK_SIZE 64
|
||||
#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
|
||||
#define CC_SHA224_BLOCK_SIZE 64
|
||||
#define CC_SHA256_BLOCK_SIZE 64
|
||||
#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
|
||||
#define CC_SHA1_224_256_BLOCK_SIZE 64
|
||||
#define CC_SHA384_BLOCK_SIZE 128
|
||||
#define CC_SHA512_BLOCK_SIZE 128
|
||||
|
||||
#if (CC_DEV_SHA_MAX > 256)
|
||||
#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
|
||||
#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
|
||||
#else /* Only up to SHA256 */
|
||||
#define CC_DIGEST_SIZE_MAX CC_SHA256_DIGEST_SIZE
|
||||
#define CC_HASH_BLOCK_SIZE_MAX CC_SHA256_BLOCK_SIZE /*512b*/
|
||||
#endif
|
||||
|
||||
#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
|
||||
|
||||
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
|
||||
|
||||
enum drv_engine_type {
|
||||
DRV_ENGINE_NULL = 0,
|
||||
DRV_ENGINE_AES = 1,
|
||||
DRV_ENGINE_DES = 2,
|
||||
DRV_ENGINE_HASH = 3,
|
||||
DRV_ENGINE_RC4 = 4,
|
||||
DRV_ENGINE_DOUT = 5,
|
||||
DRV_ENGINE_RESERVE32B = S32_MAX,
|
||||
};
|
||||
|
||||
enum drv_crypto_alg {
|
||||
DRV_CRYPTO_ALG_NULL = -1,
|
||||
DRV_CRYPTO_ALG_AES = 0,
|
||||
DRV_CRYPTO_ALG_DES = 1,
|
||||
DRV_CRYPTO_ALG_HASH = 2,
|
||||
DRV_CRYPTO_ALG_C2 = 3,
|
||||
DRV_CRYPTO_ALG_HMAC = 4,
|
||||
DRV_CRYPTO_ALG_AEAD = 5,
|
||||
DRV_CRYPTO_ALG_BYPASS = 6,
|
||||
DRV_CRYPTO_ALG_NUM = 7,
|
||||
DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
enum drv_crypto_direction {
|
||||
DRV_CRYPTO_DIRECTION_NULL = -1,
|
||||
DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
|
||||
DRV_CRYPTO_DIRECTION_DECRYPT = 1,
|
||||
DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
|
||||
DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
enum drv_cipher_mode {
|
||||
DRV_CIPHER_NULL_MODE = -1,
|
||||
DRV_CIPHER_ECB = 0,
|
||||
DRV_CIPHER_CBC = 1,
|
||||
DRV_CIPHER_CTR = 2,
|
||||
DRV_CIPHER_CBC_MAC = 3,
|
||||
DRV_CIPHER_XTS = 4,
|
||||
DRV_CIPHER_XCBC_MAC = 5,
|
||||
DRV_CIPHER_OFB = 6,
|
||||
DRV_CIPHER_CMAC = 7,
|
||||
DRV_CIPHER_CCM = 8,
|
||||
DRV_CIPHER_CBC_CTS = 11,
|
||||
DRV_CIPHER_GCTR = 12,
|
||||
DRV_CIPHER_ESSIV = 13,
|
||||
DRV_CIPHER_BITLOCKER = 14,
|
||||
DRV_CIPHER_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
enum drv_hash_mode {
|
||||
DRV_HASH_NULL = -1,
|
||||
DRV_HASH_SHA1 = 0,
|
||||
DRV_HASH_SHA256 = 1,
|
||||
DRV_HASH_SHA224 = 2,
|
||||
DRV_HASH_SHA512 = 3,
|
||||
DRV_HASH_SHA384 = 4,
|
||||
DRV_HASH_MD5 = 5,
|
||||
DRV_HASH_CBC_MAC = 6,
|
||||
DRV_HASH_XCBC_MAC = 7,
|
||||
DRV_HASH_CMAC = 8,
|
||||
DRV_HASH_MODE_NUM = 9,
|
||||
DRV_HASH_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
enum drv_hash_hw_mode {
|
||||
DRV_HASH_HW_MD5 = 0,
|
||||
DRV_HASH_HW_SHA1 = 1,
|
||||
DRV_HASH_HW_SHA256 = 2,
|
||||
DRV_HASH_HW_SHA224 = 10,
|
||||
DRV_HASH_HW_SHA512 = 4,
|
||||
DRV_HASH_HW_SHA384 = 12,
|
||||
DRV_HASH_HW_GHASH = 6,
|
||||
DRV_HASH_HW_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
/* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
|
||||
/* drv_crypto_key_type[2] is mapped to cipher_config2 */
|
||||
enum drv_crypto_key_type {
|
||||
DRV_NULL_KEY = -1,
|
||||
DRV_USER_KEY = 0, /* 0x000 */
|
||||
DRV_ROOT_KEY = 1, /* 0x001 */
|
||||
DRV_PROVISIONING_KEY = 2, /* 0x010 */
|
||||
DRV_SESSION_KEY = 3, /* 0x011 */
|
||||
DRV_APPLET_KEY = 4, /* NA */
|
||||
DRV_PLATFORM_KEY = 5, /* 0x101 */
|
||||
DRV_CUSTOMER_KEY = 6, /* 0x110 */
|
||||
DRV_END_OF_KEYS = S32_MAX,
|
||||
};
|
||||
|
||||
enum drv_crypto_padding_type {
|
||||
DRV_PADDING_NONE = 0,
|
||||
DRV_PADDING_PKCS7 = 1,
|
||||
DRV_PADDING_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
#endif /* _CC_CRYPTO_CTX_H_ */
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/stringify.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_crypto_ctx.h"
|
||||
#include "cc_debugfs.h"
|
||||
|
||||
struct cc_debugfs_ctx {
|
||||
struct dentry *dir;
|
||||
};
|
||||
|
||||
#define CC_DEBUG_REG(_X) { \
|
||||
.name = __stringify(_X),\
|
||||
.offset = CC_REG(_X) \
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a global var for the dentry of the
|
||||
* debugfs ccree/ dir. It is not tied down to
|
||||
* a specific instance of ccree, hence it is
|
||||
* global.
|
||||
*/
|
||||
static struct dentry *cc_debugfs_dir;
|
||||
|
||||
static struct debugfs_reg32 debug_regs[] = {
|
||||
CC_DEBUG_REG(HOST_SIGNATURE),
|
||||
CC_DEBUG_REG(HOST_IRR),
|
||||
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
|
||||
CC_DEBUG_REG(AXIM_MON_ERR),
|
||||
CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
|
||||
CC_DEBUG_REG(HOST_IMR),
|
||||
CC_DEBUG_REG(AXIM_CFG),
|
||||
CC_DEBUG_REG(AXIM_CACHE_PARAMS),
|
||||
CC_DEBUG_REG(HOST_VERSION),
|
||||
CC_DEBUG_REG(GPR_HOST),
|
||||
CC_DEBUG_REG(AXIM_MON_COMP),
|
||||
};
|
||||
|
||||
int __init cc_debugfs_global_init(void)
|
||||
{
|
||||
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
|
||||
|
||||
return !cc_debugfs_dir;
|
||||
}
|
||||
|
||||
void __exit cc_debugfs_global_fini(void)
|
||||
{
|
||||
debugfs_remove(cc_debugfs_dir);
|
||||
}
|
||||
|
||||
int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct cc_debugfs_ctx *ctx;
|
||||
struct debugfs_regset32 *regset;
|
||||
struct dentry *file;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
|
||||
if (!regset)
|
||||
return -ENOMEM;
|
||||
|
||||
regset->regs = debug_regs;
|
||||
regset->nregs = ARRAY_SIZE(debug_regs);
|
||||
regset->base = drvdata->cc_base;
|
||||
|
||||
ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
|
||||
if (!ctx->dir)
|
||||
return -ENFILE;
|
||||
|
||||
file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
|
||||
if (!file) {
|
||||
debugfs_remove(ctx->dir);
|
||||
return -ENFILE;
|
||||
}
|
||||
|
||||
file = debugfs_create_bool("coherent", 0400, ctx->dir,
|
||||
&drvdata->coherent);
|
||||
|
||||
if (!file) {
|
||||
debugfs_remove_recursive(ctx->dir);
|
||||
return -ENFILE;
|
||||
}
|
||||
|
||||
drvdata->debugfs = ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cc_debugfs_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
|
||||
|
||||
debugfs_remove_recursive(ctx->dir);
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_DEBUGFS_H__
|
||||
#define __CC_DEBUGFS_H__
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int cc_debugfs_global_init(void);
|
||||
void cc_debugfs_global_fini(void);
|
||||
|
||||
int cc_debugfs_init(struct cc_drvdata *drvdata);
|
||||
void cc_debugfs_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
#else
|
||||
|
||||
static inline int cc_debugfs_global_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cc_debugfs_global_fini(void) {}
|
||||
|
||||
static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*__CC_SYSFS_H__*/
|
|
@ -1,474 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include "cc_driver.h"
|
||||
#include "cc_request_mgr.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
#include "cc_debugfs.h"
|
||||
#include "cc_cipher.h"
|
||||
#include "cc_aead.h"
|
||||
#include "cc_hash.h"
|
||||
#include "cc_ivgen.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
#include "cc_pm.h"
|
||||
#include "cc_fips.h"
|
||||
|
||||
bool cc_dump_desc;
|
||||
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
|
||||
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
|
||||
|
||||
bool cc_dump_bytes;
|
||||
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
|
||||
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
|
||||
|
||||
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
|
||||
{
|
||||
char prefix[64];
|
||||
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
|
||||
|
||||
print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
|
||||
len, false);
|
||||
}
|
||||
|
||||
static irqreturn_t cc_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
u32 irr;
|
||||
u32 imr;
|
||||
|
||||
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
|
||||
|
||||
/* read the interrupt status */
|
||||
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
|
||||
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
|
||||
if (irr == 0) { /* Probably shared interrupt line */
|
||||
dev_err(dev, "Got interrupt with empty IRR\n");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
|
||||
|
||||
/* clear interrupt - must be before processing events */
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
|
||||
|
||||
drvdata->irq = irr;
|
||||
/* Completion interrupt - most probable */
|
||||
if (irr & CC_COMP_IRQ_MASK) {
|
||||
/* Mask AXI completion interrupt - will be unmasked in
|
||||
* Deferred service handler
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
|
||||
irr &= ~CC_COMP_IRQ_MASK;
|
||||
complete_request(drvdata);
|
||||
}
|
||||
#ifdef CONFIG_CRYPTO_FIPS
|
||||
/* TEE FIPS interrupt */
|
||||
if (irr & CC_GPR0_IRQ_MASK) {
|
||||
/* Mask interrupt - will be unmasked in Deferred service
|
||||
* handler
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
|
||||
irr &= ~CC_GPR0_IRQ_MASK;
|
||||
fips_handler(drvdata);
|
||||
}
|
||||
#endif
|
||||
/* AXI error interrupt */
|
||||
if (irr & CC_AXI_ERR_IRQ_MASK) {
|
||||
u32 axi_err;
|
||||
|
||||
/* Read the AXI error ID */
|
||||
axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
|
||||
dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
|
||||
axi_err);
|
||||
|
||||
irr &= ~CC_AXI_ERR_IRQ_MASK;
|
||||
}
|
||||
|
||||
if (irr) {
|
||||
dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
|
||||
irr);
|
||||
/* Just warning */
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
|
||||
{
|
||||
unsigned int val, cache_params;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
/* Unmask all AXI interrupt sources AXI_CFG1 register */
|
||||
val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
|
||||
cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
|
||||
dev_dbg(dev, "AXIM_CFG=0x%08X\n",
|
||||
cc_ioread(drvdata, CC_REG(AXIM_CFG)));
|
||||
|
||||
/* Clear all pending interrupts */
|
||||
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
|
||||
dev_dbg(dev, "IRR=0x%08X\n", val);
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
|
||||
|
||||
/* Unmask relevant interrupt cause */
|
||||
val = (unsigned int)(~(CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK |
|
||||
CC_GPR0_IRQ_MASK));
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
|
||||
|
||||
cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
|
||||
|
||||
val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
|
||||
|
||||
if (is_probe)
|
||||
dev_info(dev, "Cache params previous: 0x%08X\n", val);
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
|
||||
val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
|
||||
|
||||
if (is_probe)
|
||||
dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
|
||||
val, cache_params);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_cc_resources(struct platform_device *plat_dev)
|
||||
{
|
||||
struct resource *req_mem_cc_regs = NULL;
|
||||
struct cc_drvdata *new_drvdata;
|
||||
struct device *dev = &plat_dev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
u32 signature_val;
|
||||
u64 dma_mask;
|
||||
int rc = 0;
|
||||
|
||||
new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
|
||||
if (!new_drvdata)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(plat_dev, new_drvdata);
|
||||
new_drvdata->plat_dev = plat_dev;
|
||||
|
||||
new_drvdata->clk = of_clk_get(np, 0);
|
||||
new_drvdata->coherent = of_dma_is_coherent(np);
|
||||
|
||||
/* Get device resources */
|
||||
/* First CC registers space */
|
||||
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
|
||||
/* Map registers space */
|
||||
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
|
||||
if (IS_ERR(new_drvdata->cc_base))
|
||||
return PTR_ERR(new_drvdata->cc_base);
|
||||
|
||||
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
|
||||
req_mem_cc_regs);
|
||||
dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
|
||||
&req_mem_cc_regs->start, new_drvdata->cc_base);
|
||||
|
||||
/* Then IRQ */
|
||||
new_drvdata->irq = platform_get_irq(plat_dev, 0);
|
||||
if (new_drvdata->irq < 0) {
|
||||
dev_err(dev, "Failed getting IRQ resource\n");
|
||||
return new_drvdata->irq;
|
||||
}
|
||||
|
||||
rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
|
||||
IRQF_SHARED, "arm_cc7x", new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "Could not register to interrupt %d\n",
|
||||
new_drvdata->irq);
|
||||
return rc;
|
||||
}
|
||||
dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
|
||||
|
||||
init_completion(&new_drvdata->hw_queue_avail);
|
||||
|
||||
if (!plat_dev->dev.dma_mask)
|
||||
plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
|
||||
|
||||
dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
|
||||
while (dma_mask > 0x7fffffffUL) {
|
||||
if (dma_supported(&plat_dev->dev, dma_mask)) {
|
||||
rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
|
||||
if (!rc)
|
||||
break;
|
||||
}
|
||||
dma_mask >>= 1;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed in dma_set_mask, mask=%par\n", &dma_mask);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cc_clk_on(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to enable clock");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Verify correct mapping */
|
||||
signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
|
||||
if (signature_val != CC_DEV_SIGNATURE) {
|
||||
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
|
||||
signature_val, (u32)CC_DEV_SIGNATURE);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
}
|
||||
dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
|
||||
|
||||
/* Display HW versions */
|
||||
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
|
||||
CC_DEV_NAME_STR,
|
||||
cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
|
||||
DRV_MODULE_VERSION);
|
||||
|
||||
rc = init_cc_regs(new_drvdata, true);
|
||||
if (rc) {
|
||||
dev_err(dev, "init_cc_regs failed\n");
|
||||
goto post_clk_err;
|
||||
}
|
||||
|
||||
rc = cc_debugfs_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed registering debugfs interface\n");
|
||||
goto post_regs_err;
|
||||
}
|
||||
|
||||
rc = cc_fips_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
|
||||
goto post_debugfs_err;
|
||||
}
|
||||
rc = cc_sram_mgr_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_sram_mgr_init failed\n");
|
||||
goto post_fips_init_err;
|
||||
}
|
||||
|
||||
new_drvdata->mlli_sram_addr =
|
||||
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
|
||||
if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
|
||||
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
|
||||
rc = -ENOMEM;
|
||||
goto post_sram_mgr_err;
|
||||
}
|
||||
|
||||
rc = cc_req_mgr_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_req_mgr_init failed\n");
|
||||
goto post_sram_mgr_err;
|
||||
}
|
||||
|
||||
rc = cc_buffer_mgr_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "buffer_mgr_init failed\n");
|
||||
goto post_req_mgr_err;
|
||||
}
|
||||
|
||||
rc = cc_pm_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "ssi_power_mgr_init failed\n");
|
||||
goto post_buf_mgr_err;
|
||||
}
|
||||
|
||||
rc = cc_ivgen_init(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_ivgen_init failed\n");
|
||||
goto post_power_mgr_err;
|
||||
}
|
||||
|
||||
/* Allocate crypto algs */
|
||||
rc = cc_cipher_alloc(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_cipher_alloc failed\n");
|
||||
goto post_ivgen_err;
|
||||
}
|
||||
|
||||
/* hash must be allocated before aead since hash exports APIs */
|
||||
rc = cc_hash_alloc(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_hash_alloc failed\n");
|
||||
goto post_cipher_err;
|
||||
}
|
||||
|
||||
rc = cc_aead_alloc(new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_aead_alloc failed\n");
|
||||
goto post_hash_err;
|
||||
}
|
||||
|
||||
/* If we got here and FIPS mode is enabled
|
||||
* it means all FIPS test passed, so let TEE
|
||||
* know we're good.
|
||||
*/
|
||||
cc_set_ree_fips_status(new_drvdata, true);
|
||||
|
||||
return 0;
|
||||
|
||||
post_hash_err:
|
||||
cc_hash_free(new_drvdata);
|
||||
post_cipher_err:
|
||||
cc_cipher_free(new_drvdata);
|
||||
post_ivgen_err:
|
||||
cc_ivgen_fini(new_drvdata);
|
||||
post_power_mgr_err:
|
||||
cc_pm_fini(new_drvdata);
|
||||
post_buf_mgr_err:
|
||||
cc_buffer_mgr_fini(new_drvdata);
|
||||
post_req_mgr_err:
|
||||
cc_req_mgr_fini(new_drvdata);
|
||||
post_sram_mgr_err:
|
||||
cc_sram_mgr_fini(new_drvdata);
|
||||
post_fips_init_err:
|
||||
cc_fips_fini(new_drvdata);
|
||||
post_debugfs_err:
|
||||
cc_debugfs_fini(new_drvdata);
|
||||
post_regs_err:
|
||||
fini_cc_regs(new_drvdata);
|
||||
post_clk_err:
|
||||
cc_clk_off(new_drvdata);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void fini_cc_regs(struct cc_drvdata *drvdata)
|
||||
{
|
||||
/* Mask all interrupts */
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
static void cleanup_cc_resources(struct platform_device *plat_dev)
|
||||
{
|
||||
struct cc_drvdata *drvdata =
|
||||
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
|
||||
|
||||
cc_aead_free(drvdata);
|
||||
cc_hash_free(drvdata);
|
||||
cc_cipher_free(drvdata);
|
||||
cc_ivgen_fini(drvdata);
|
||||
cc_pm_fini(drvdata);
|
||||
cc_buffer_mgr_fini(drvdata);
|
||||
cc_req_mgr_fini(drvdata);
|
||||
cc_sram_mgr_fini(drvdata);
|
||||
cc_fips_fini(drvdata);
|
||||
cc_debugfs_fini(drvdata);
|
||||
fini_cc_regs(drvdata);
|
||||
cc_clk_off(drvdata);
|
||||
}
|
||||
|
||||
int cc_clk_on(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct clk *clk = drvdata->clk;
|
||||
int rc;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
/* Not all devices have a clock associated with CCREE */
|
||||
return 0;
|
||||
|
||||
rc = clk_prepare_enable(clk);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cc_clk_off(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct clk *clk = drvdata->clk;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
/* Not all devices have a clock associated with CCREE */
|
||||
return;
|
||||
|
||||
clk_disable_unprepare(clk);
|
||||
}
|
||||
|
||||
static int cc7x_probe(struct platform_device *plat_dev)
|
||||
{
|
||||
int rc;
|
||||
struct device *dev = &plat_dev->dev;
|
||||
|
||||
/* Map registers space */
|
||||
rc = init_cc_resources(plat_dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev_info(dev, "ARM ccree device initialized\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cc7x_remove(struct platform_device *plat_dev)
|
||||
{
|
||||
struct device *dev = &plat_dev->dev;
|
||||
|
||||
dev_dbg(dev, "Releasing cc7x resources...\n");
|
||||
|
||||
cleanup_cc_resources(plat_dev);
|
||||
|
||||
dev_info(dev, "ARM ccree device terminated\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id arm_cc7x_dev_of_match[] = {
|
||||
{.compatible = "arm,cryptocell-712-ree"},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
|
||||
|
||||
static struct platform_driver cc7x_driver = {
|
||||
.driver = {
|
||||
.name = "cc7xree",
|
||||
.of_match_table = arm_cc7x_dev_of_match,
|
||||
#ifdef CONFIG_PM
|
||||
.pm = &ccree_pm,
|
||||
#endif
|
||||
},
|
||||
.probe = cc7x_probe,
|
||||
.remove = cc7x_remove,
|
||||
};
|
||||
|
||||
static int __init ccree_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
cc_hash_global_init();
|
||||
|
||||
ret = cc_debugfs_global_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return platform_driver_register(&cc7x_driver);
|
||||
}
|
||||
module_init(ccree_init);
|
||||
|
||||
static void __exit ccree_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&cc7x_driver);
|
||||
cc_debugfs_global_fini();
|
||||
}
|
||||
module_exit(ccree_exit);
|
||||
|
||||
/* Module description */
|
||||
MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
MODULE_AUTHOR("ARM");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -1,194 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_driver.h
|
||||
* ARM CryptoCell Linux Crypto Driver
|
||||
*/
|
||||
|
||||
#ifndef __CC_DRIVER_H__
|
||||
#define __CC_DRIVER_H__
|
||||
|
||||
#ifdef COMP_IN_WQ
|
||||
#include <linux/workqueue.h>
|
||||
#else
|
||||
#include <linux/interrupt.h>
|
||||
#endif
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/* Registers definitions from shared/hw/ree_include */
|
||||
#include "cc_host_regs.h"
|
||||
#define CC_DEV_SHA_MAX 512
|
||||
#include "cc_crypto_ctx.h"
|
||||
#include "cc_hw_queue_defs.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
|
||||
extern bool cc_dump_desc;
|
||||
extern bool cc_dump_bytes;
|
||||
|
||||
#define DRV_MODULE_VERSION "3.0"
|
||||
|
||||
#define CC_DEV_NAME_STR "cc715ree"
|
||||
#define CC_COHERENT_CACHE_PARAMS 0xEEE
|
||||
|
||||
/* Maximum DMA mask supported by IP */
|
||||
#define DMA_BIT_MASK_LEN 48
|
||||
|
||||
#define CC_DEV_SIGNATURE 0xDCC71200UL
|
||||
|
||||
#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
|
||||
(1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
|
||||
(1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
|
||||
(1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
|
||||
|
||||
#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
|
||||
|
||||
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
|
||||
|
||||
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
|
||||
|
||||
/* Register name mangling macro */
|
||||
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
|
||||
|
||||
/* TEE FIPS status interrupt */
|
||||
#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
|
||||
|
||||
#define CC_CRA_PRIO 3000
|
||||
|
||||
#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
|
||||
|
||||
#define MAX_REQUEST_QUEUE_SIZE 4096
|
||||
#define MAX_MLLI_BUFF_SIZE 2080
|
||||
#define MAX_ICV_NENTS_SUPPORTED 2
|
||||
|
||||
/* Definitions for HW descriptors DIN/DOUT fields */
|
||||
#define NS_BIT 1
|
||||
#define AXI_ID 0
|
||||
/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
|
||||
* field in the HW descriptor. The DMA engine +8 that value.
|
||||
*/
|
||||
|
||||
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
|
||||
struct cc_crypto_req {
|
||||
void (*user_cb)(struct device *dev, void *req, int err);
|
||||
void *user_arg;
|
||||
dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
|
||||
/* For the first 'ivgen_dma_addr_len' addresses of this array,
|
||||
* generated IV would be placed in it by send_request().
|
||||
* Same generated IV for all addresses!
|
||||
*/
|
||||
/* Amount of 'ivgen_dma_addr' elements to be filled. */
|
||||
unsigned int ivgen_dma_addr_len;
|
||||
/* The generated IV size required, 8/16 B allowed. */
|
||||
unsigned int ivgen_size;
|
||||
struct completion seq_compl; /* request completion */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cc_drvdata - driver private data context
|
||||
* @cc_base: virt address of the CC registers
|
||||
* @irq: device IRQ number
|
||||
* @irq_mask: Interrupt mask shadow (1 for masked interrupts)
|
||||
* @fw_ver: SeP loaded firmware version
|
||||
*/
|
||||
struct cc_drvdata {
|
||||
void __iomem *cc_base;
|
||||
int irq;
|
||||
u32 irq_mask;
|
||||
u32 fw_ver;
|
||||
struct completion hw_queue_avail; /* wait for HW queue availability */
|
||||
struct platform_device *plat_dev;
|
||||
cc_sram_addr_t mlli_sram_addr;
|
||||
void *buff_mgr_handle;
|
||||
void *hash_handle;
|
||||
void *aead_handle;
|
||||
void *blkcipher_handle;
|
||||
void *request_mgr_handle;
|
||||
void *fips_handle;
|
||||
void *ivgen_handle;
|
||||
void *sram_mgr_handle;
|
||||
void *debugfs;
|
||||
struct clk *clk;
|
||||
bool coherent;
|
||||
};
|
||||
|
||||
struct cc_crypto_alg {
|
||||
struct list_head entry;
|
||||
int cipher_mode;
|
||||
int flow_mode; /* Note: currently, refers to the cipher mode only. */
|
||||
int auth_mode;
|
||||
struct cc_drvdata *drvdata;
|
||||
struct crypto_alg crypto_alg;
|
||||
struct aead_alg aead_alg;
|
||||
};
|
||||
|
||||
struct cc_alg_template {
|
||||
char name[CRYPTO_MAX_ALG_NAME];
|
||||
char driver_name[CRYPTO_MAX_ALG_NAME];
|
||||
unsigned int blocksize;
|
||||
u32 type;
|
||||
union {
|
||||
struct ablkcipher_alg ablkcipher;
|
||||
struct aead_alg aead;
|
||||
struct blkcipher_alg blkcipher;
|
||||
struct cipher_alg cipher;
|
||||
struct compress_alg compress;
|
||||
} template_u;
|
||||
int cipher_mode;
|
||||
int flow_mode; /* Note: currently, refers to the cipher mode only. */
|
||||
int auth_mode;
|
||||
struct cc_drvdata *drvdata;
|
||||
};
|
||||
|
||||
struct async_gen_req_ctx {
|
||||
dma_addr_t iv_dma_addr;
|
||||
enum drv_crypto_direction op_type;
|
||||
};
|
||||
|
||||
static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
|
||||
{
|
||||
return &drvdata->plat_dev->dev;
|
||||
}
|
||||
|
||||
void __dump_byte_array(const char *name, const u8 *buf, size_t len);
|
||||
static inline void dump_byte_array(const char *name, const u8 *the_array,
|
||||
size_t size)
|
||||
{
|
||||
if (cc_dump_bytes)
|
||||
__dump_byte_array(name, the_array, size);
|
||||
}
|
||||
|
||||
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
|
||||
void fini_cc_regs(struct cc_drvdata *drvdata);
|
||||
int cc_clk_on(struct cc_drvdata *drvdata);
|
||||
void cc_clk_off(struct cc_drvdata *drvdata);
|
||||
|
||||
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
|
||||
{
|
||||
iowrite32(val, (drvdata->cc_base + reg));
|
||||
}
|
||||
|
||||
static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
|
||||
{
|
||||
return ioread32(drvdata->cc_base + reg);
|
||||
}
|
||||
|
||||
static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
|
||||
{
|
||||
return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
}
|
||||
|
||||
#endif /*__CC_DRIVER_H__*/
|
||||
|
|
@ -1,111 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fips.h>
|
||||
|
||||
#include "cc_driver.h"
|
||||
#include "cc_fips.h"
|
||||
|
||||
static void fips_dsr(unsigned long devarg);
|
||||
|
||||
struct cc_fips_handle {
|
||||
struct tasklet_struct tasklet;
|
||||
};
|
||||
|
||||
/* The function called once at driver entry point to check
|
||||
* whether TEE FIPS error occurred.
|
||||
*/
|
||||
static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
|
||||
return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function should push the FIPS REE library status towards the TEE library
|
||||
* by writing the error state to HOST_GPR0 register.
|
||||
*/
|
||||
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
|
||||
{
|
||||
int val = CC_FIPS_SYNC_REE_STATUS;
|
||||
|
||||
val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
|
||||
}
|
||||
|
||||
void cc_fips_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_fips_handle *fips_h = drvdata->fips_handle;
|
||||
|
||||
if (!fips_h)
|
||||
return; /* Not allocated */
|
||||
|
||||
/* Kill tasklet */
|
||||
tasklet_kill(&fips_h->tasklet);
|
||||
|
||||
kfree(fips_h);
|
||||
drvdata->fips_handle = NULL;
|
||||
}
|
||||
|
||||
void fips_handler(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
|
||||
|
||||
tasklet_schedule(&fips_handle_ptr->tasklet);
|
||||
}
|
||||
|
||||
static inline void tee_fips_error(struct device *dev)
|
||||
{
|
||||
if (fips_enabled)
|
||||
panic("ccree: TEE reported cryptographic error in fips mode!\n");
|
||||
else
|
||||
dev_err(dev, "TEE reported error!\n");
|
||||
}
|
||||
|
||||
/* Deferred service handler, run as interrupt-fired tasklet */
|
||||
static void fips_dsr(unsigned long devarg)
|
||||
{
|
||||
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
u32 irq, state, val;
|
||||
|
||||
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
|
||||
|
||||
if (irq) {
|
||||
state = cc_ioread(drvdata, CC_REG(GPR_HOST));
|
||||
|
||||
if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
|
||||
tee_fips_error(dev);
|
||||
}
|
||||
|
||||
/* after verifing that there is nothing to do,
|
||||
* unmask AXI completion interrupt.
|
||||
*/
|
||||
val = (CC_REG(HOST_IMR) & ~irq);
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
|
||||
}
|
||||
|
||||
/* The function called once at driver entry point .*/
|
||||
int cc_fips_init(struct cc_drvdata *p_drvdata)
|
||||
{
|
||||
struct cc_fips_handle *fips_h;
|
||||
struct device *dev = drvdata_to_dev(p_drvdata);
|
||||
|
||||
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
|
||||
if (!fips_h)
|
||||
return -ENOMEM;
|
||||
|
||||
p_drvdata->fips_handle = fips_h;
|
||||
|
||||
dev_dbg(dev, "Initializing fips tasklet\n");
|
||||
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
|
||||
|
||||
if (!cc_get_tee_fips_status(p_drvdata))
|
||||
tee_fips_error(dev);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_FIPS_H__
|
||||
#define __CC_FIPS_H__
|
||||
|
||||
#ifdef CONFIG_CRYPTO_FIPS
|
||||
|
||||
enum cc_fips_status {
|
||||
CC_FIPS_SYNC_MODULE_OK = 0x0,
|
||||
CC_FIPS_SYNC_MODULE_ERROR = 0x1,
|
||||
CC_FIPS_SYNC_REE_STATUS = 0x4,
|
||||
CC_FIPS_SYNC_TEE_STATUS = 0x8,
|
||||
CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
|
||||
};
|
||||
|
||||
int cc_fips_init(struct cc_drvdata *p_drvdata);
|
||||
void cc_fips_fini(struct cc_drvdata *drvdata);
|
||||
void fips_handler(struct cc_drvdata *drvdata);
|
||||
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
|
||||
|
||||
#else /* CONFIG_CRYPTO_FIPS */
|
||||
|
||||
static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
|
||||
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
|
||||
bool ok) {}
|
||||
static inline void fips_handler(struct cc_drvdata *drvdata) {}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_FIPS */
|
||||
|
||||
#endif /*__CC_FIPS_H__*/
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,114 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_hash.h
|
||||
* ARM CryptoCell Hash Crypto API
|
||||
*/
|
||||
|
||||
#ifndef __CC_HASH_H__
|
||||
#define __CC_HASH_H__
|
||||
|
||||
#include "cc_buffer_mgr.h"
|
||||
|
||||
#define HMAC_IPAD_CONST 0x36363636
|
||||
#define HMAC_OPAD_CONST 0x5C5C5C5C
|
||||
#if (CC_DEV_SHA_MAX > 256)
|
||||
#define HASH_LEN_SIZE 16
|
||||
#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
|
||||
#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
|
||||
#else
|
||||
#define HASH_LEN_SIZE 8
|
||||
#define CC_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
|
||||
#define CC_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
|
||||
#endif
|
||||
|
||||
#define XCBC_MAC_K1_OFFSET 0
|
||||
#define XCBC_MAC_K2_OFFSET 16
|
||||
#define XCBC_MAC_K3_OFFSET 32
|
||||
|
||||
#define CC_EXPORT_MAGIC 0xC2EE1070U
|
||||
|
||||
/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
|
||||
* for xcbc/cmac statesize
|
||||
*/
|
||||
struct aeshash_state {
|
||||
u8 state[AES_BLOCK_SIZE];
|
||||
unsigned int count;
|
||||
u8 buffer[AES_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
/* ahash state */
|
||||
struct ahash_req_ctx {
|
||||
u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
|
||||
u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
|
||||
u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
|
||||
u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
|
||||
u8 digest_bytes_len[HASH_LEN_SIZE] ____cacheline_aligned;
|
||||
struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
|
||||
enum cc_req_dma_buf_type data_dma_buf_type;
|
||||
dma_addr_t opad_digest_dma_addr;
|
||||
dma_addr_t digest_buff_dma_addr;
|
||||
dma_addr_t digest_bytes_len_dma_addr;
|
||||
dma_addr_t digest_result_dma_addr;
|
||||
u32 buf_cnt[2];
|
||||
u32 buff_index;
|
||||
u32 xcbc_count; /* count xcbc update operatations */
|
||||
struct scatterlist buff_sg[2];
|
||||
struct scatterlist *curr_sg;
|
||||
u32 in_nents;
|
||||
u32 mlli_nents;
|
||||
struct mlli_params mlli_params;
|
||||
};
|
||||
|
||||
static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
|
||||
{
|
||||
return &state->buf_cnt[state->buff_index];
|
||||
}
|
||||
|
||||
static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
|
||||
{
|
||||
return state->buffers[state->buff_index];
|
||||
}
|
||||
|
||||
static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
|
||||
{
|
||||
return &state->buf_cnt[state->buff_index ^ 1];
|
||||
}
|
||||
|
||||
static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
|
||||
{
|
||||
return state->buffers[state->buff_index ^ 1];
|
||||
}
|
||||
|
||||
int cc_hash_alloc(struct cc_drvdata *drvdata);
|
||||
int cc_init_hash_sram(struct cc_drvdata *drvdata);
|
||||
int cc_hash_free(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Gets the initial digest length
|
||||
*
|
||||
* \param drvdata
|
||||
* \param mode The Hash mode. Supported modes:
|
||||
* MD5/SHA1/SHA224/SHA256/SHA384/SHA512
|
||||
*
|
||||
* \return u32 returns the address of the initial digest length in SRAM
|
||||
*/
|
||||
cc_sram_addr_t
|
||||
cc_digest_len_addr(void *drvdata, u32 mode);
|
||||
|
||||
/*!
|
||||
* Gets the address of the initial digest in SRAM
|
||||
* according to the given hash mode
|
||||
*
|
||||
* \param drvdata
|
||||
* \param mode The Hash mode. Supported modes:
|
||||
* MD5/SHA1/SHA224/SHA256/SHA384/SHA512
|
||||
*
|
||||
* \return u32 The address of the initial digest in SRAM
|
||||
*/
|
||||
cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
|
||||
|
||||
void cc_hash_global_init(void);
|
||||
|
||||
#endif /*__CC_HASH_H__*/
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_HOST_H__
|
||||
#define __CC_HOST_H__
|
||||
|
||||
// --------------------------------------
|
||||
// BLOCK: HOST_P
|
||||
// --------------------------------------
|
||||
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
|
||||
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
|
||||
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
|
||||
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
|
||||
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REG_OFFSET 0xA04UL
|
||||
#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
|
||||
#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
|
||||
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
|
||||
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
|
||||
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
|
||||
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
|
||||
#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
|
||||
#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
|
||||
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
|
||||
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
|
||||
#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
|
||||
#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
|
||||
#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
|
||||
#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
|
||||
#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
|
||||
#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
|
||||
#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
|
||||
#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
|
||||
#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
|
||||
#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
|
||||
#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
|
||||
#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
|
||||
#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
|
||||
#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
|
||||
#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
|
||||
#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
|
||||
#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
|
||||
#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
|
||||
#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
|
||||
#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
|
||||
#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
|
||||
#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
|
||||
#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
|
||||
#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
|
||||
#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
|
||||
#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
|
||||
#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
|
||||
#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
|
||||
#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
|
||||
#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
|
||||
#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
|
||||
#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
|
||||
#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_GPR_HOST_REG_OFFSET 0xA74UL
|
||||
#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
|
||||
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
|
||||
// --------------------------------------
|
||||
// BLOCK: HOST_SRAM
|
||||
// --------------------------------------
|
||||
#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
|
||||
#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
|
||||
#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
|
||||
#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
|
||||
#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
|
||||
|
||||
#endif //__CC_HOST_H__
|
|
@ -1,590 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_HW_QUEUE_DEFS_H__
|
||||
#define __CC_HW_QUEUE_DEFS_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "cc_kernel_regs.h"
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
/******************************************************************************
|
||||
* DEFINITIONS
|
||||
******************************************************************************/
|
||||
|
||||
#define HW_DESC_SIZE_WORDS 6
|
||||
/* Define max. available slots in HW queue */
|
||||
#define HW_QUEUE_SLOTS_MAX 15
|
||||
|
||||
#define CC_REG_LOW(word, name) \
|
||||
(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
|
||||
|
||||
#define CC_REG_HIGH(word, name) \
|
||||
(CC_REG_LOW(word, name) + \
|
||||
CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
|
||||
|
||||
#define CC_GENMASK(word, name) \
|
||||
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
|
||||
|
||||
#define WORD0_VALUE CC_GENMASK(0, VALUE)
|
||||
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
|
||||
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
|
||||
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
|
||||
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
|
||||
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
|
||||
#define WORD2_VALUE CC_GENMASK(2, VALUE)
|
||||
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
|
||||
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
|
||||
#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
|
||||
#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
|
||||
#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
|
||||
#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
|
||||
#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
|
||||
#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
|
||||
#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
|
||||
#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
|
||||
#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
|
||||
#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
|
||||
#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
|
||||
#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
|
||||
#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
|
||||
#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
|
||||
#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
|
||||
#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
|
||||
#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
|
||||
#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
|
||||
|
||||
/******************************************************************************
|
||||
* TYPE DEFINITIONS
|
||||
******************************************************************************/
|
||||
|
||||
struct cc_hw_desc {
|
||||
union {
|
||||
u32 word[HW_DESC_SIZE_WORDS];
|
||||
u16 hword[HW_DESC_SIZE_WORDS * 2];
|
||||
};
|
||||
};
|
||||
|
||||
enum cc_axi_sec {
|
||||
AXI_SECURE = 0,
|
||||
AXI_NOT_SECURE = 1
|
||||
};
|
||||
|
||||
enum cc_desc_direction {
|
||||
DESC_DIRECTION_ILLEGAL = -1,
|
||||
DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
|
||||
DESC_DIRECTION_DECRYPT_DECRYPT = 1,
|
||||
DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
|
||||
DESC_DIRECTION_END = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_dma_mode {
|
||||
DMA_MODE_NULL = -1,
|
||||
NO_DMA = 0,
|
||||
DMA_SRAM = 1,
|
||||
DMA_DLLI = 2,
|
||||
DMA_MLLI = 3,
|
||||
DMA_MODE_END = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_flow_mode {
|
||||
FLOW_MODE_NULL = -1,
|
||||
/* data flows */
|
||||
BYPASS = 0,
|
||||
DIN_AES_DOUT = 1,
|
||||
AES_to_HASH = 2,
|
||||
AES_and_HASH = 3,
|
||||
DIN_DES_DOUT = 4,
|
||||
DES_to_HASH = 5,
|
||||
DES_and_HASH = 6,
|
||||
DIN_HASH = 7,
|
||||
DIN_HASH_and_BYPASS = 8,
|
||||
AESMAC_and_BYPASS = 9,
|
||||
AES_to_HASH_and_DOUT = 10,
|
||||
DIN_RC4_DOUT = 11,
|
||||
DES_to_HASH_and_DOUT = 12,
|
||||
AES_to_AES_to_HASH_and_DOUT = 13,
|
||||
AES_to_AES_to_HASH = 14,
|
||||
AES_to_HASH_and_AES = 15,
|
||||
DIN_AES_AESMAC = 17,
|
||||
HASH_to_DOUT = 18,
|
||||
/* setup flows */
|
||||
S_DIN_to_AES = 32,
|
||||
S_DIN_to_AES2 = 33,
|
||||
S_DIN_to_DES = 34,
|
||||
S_DIN_to_RC4 = 35,
|
||||
S_DIN_to_HASH = 37,
|
||||
S_AES_to_DOUT = 38,
|
||||
S_AES2_to_DOUT = 39,
|
||||
S_RC4_to_DOUT = 41,
|
||||
S_DES_to_DOUT = 42,
|
||||
S_HASH_to_DOUT = 43,
|
||||
SET_FLOW_ID = 44,
|
||||
FLOW_MODE_END = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_tunnel_op {
|
||||
TUNNEL_OP_INVALID = -1,
|
||||
TUNNEL_OFF = 0,
|
||||
TUNNEL_ON = 1,
|
||||
TUNNEL_OP_END = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_setup_op {
|
||||
SETUP_LOAD_NOP = 0,
|
||||
SETUP_LOAD_STATE0 = 1,
|
||||
SETUP_LOAD_STATE1 = 2,
|
||||
SETUP_LOAD_STATE2 = 3,
|
||||
SETUP_LOAD_KEY0 = 4,
|
||||
SETUP_LOAD_XEX_KEY = 5,
|
||||
SETUP_WRITE_STATE0 = 8,
|
||||
SETUP_WRITE_STATE1 = 9,
|
||||
SETUP_WRITE_STATE2 = 10,
|
||||
SETUP_WRITE_STATE3 = 11,
|
||||
SETUP_OP_END = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_aes_mac_selector {
|
||||
AES_SK = 1,
|
||||
AES_CMAC_INIT = 2,
|
||||
AES_CMAC_SIZE0 = 3,
|
||||
AES_MAC_END = S32_MAX,
|
||||
};
|
||||
|
||||
#define HW_KEY_MASK_CIPHER_DO 0x3
|
||||
#define HW_KEY_SHIFT_CIPHER_CFG2 2
|
||||
|
||||
/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
|
||||
/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
|
||||
enum cc_hw_crypto_key {
|
||||
USER_KEY = 0, /* 0x0000 */
|
||||
ROOT_KEY = 1, /* 0x0001 */
|
||||
PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */
|
||||
SESSION_KEY = 3, /* 0x0011 */
|
||||
RESERVED_KEY = 4, /* NA */
|
||||
PLATFORM_KEY = 5, /* 0x0101 */
|
||||
CUSTOMER_KEY = 6, /* 0x0110 */
|
||||
KFDE0_KEY = 7, /* 0x0111 */
|
||||
KFDE1_KEY = 9, /* 0x1001 */
|
||||
KFDE2_KEY = 10, /* 0x1010 */
|
||||
KFDE3_KEY = 11, /* 0x1011 */
|
||||
END_OF_KEYS = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_hw_aes_key_size {
|
||||
AES_128_KEY = 0,
|
||||
AES_192_KEY = 1,
|
||||
AES_256_KEY = 2,
|
||||
END_OF_AES_KEYS = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_hw_des_key_size {
|
||||
DES_ONE_KEY = 0,
|
||||
DES_TWO_KEYS = 1,
|
||||
DES_THREE_KEYS = 2,
|
||||
END_OF_DES_KEYS = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_hash_conf_pad {
|
||||
HASH_PADDING_DISABLED = 0,
|
||||
HASH_PADDING_ENABLED = 1,
|
||||
HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
|
||||
HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
|
||||
};
|
||||
|
||||
enum cc_hash_cipher_pad {
|
||||
DO_NOT_PAD = 0,
|
||||
DO_PAD = 1,
|
||||
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
|
||||
};
|
||||
|
||||
/*****************************/
|
||||
/* Descriptor packing macros */
|
||||
/*****************************/
|
||||
|
||||
/*
|
||||
* Init a HW descriptor struct
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
*/
|
||||
static inline void hw_desc_init(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
memset(pdesc, 0, sizeof(struct cc_hw_desc));
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicates the end of current HW descriptors flow and release the HW engines.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
*/
|
||||
static inline void set_queue_last_ind(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
|
||||
* @addr: dinAdr DIN address
|
||||
* @size: Data size in bytes
|
||||
* @axi_sec: AXI secure bit
|
||||
*/
|
||||
static inline void set_din_type(struct cc_hw_desc *pdesc,
|
||||
enum cc_dma_mode dma_mode, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec)
|
||||
{
|
||||
pdesc->word[0] = (u32)addr;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
|
||||
#endif
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
|
||||
FIELD_PREP(WORD1_DIN_SIZE, size) |
|
||||
FIELD_PREP(WORD1_NS_BIT, axi_sec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to NO DMA mode.
|
||||
* Used for NOP descriptor, register patches and other special modes.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @addr: DIN address
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
|
||||
{
|
||||
pdesc->word[0] = addr;
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to SRAM mode.
|
||||
* Note: No need to check SRAM alignment since host requests do not use SRAM and
|
||||
* adaptor will enforce alignment check.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @addr: DIN address
|
||||
* @size Data size in bytes
|
||||
*/
|
||||
static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
u32 size)
|
||||
{
|
||||
pdesc->word[0] = (u32)addr;
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
|
||||
FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to CONST mode
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @val: DIN const value
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
|
||||
{
|
||||
pdesc->word[0] = val;
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
|
||||
FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
|
||||
FIELD_PREP(WORD1_DIN_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN not last input data indicator
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
*/
|
||||
static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @axi_sec: AXI secure bit
|
||||
*/
|
||||
static inline void set_dout_type(struct cc_hw_desc *pdesc,
|
||||
enum cc_dma_mode dma_mode, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec)
|
||||
{
|
||||
pdesc->word[2] = (u32)addr;
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
|
||||
#endif
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
|
||||
FIELD_PREP(WORD3_DOUT_SIZE, size) |
|
||||
FIELD_PREP(WORD3_NS_BIT, axi_sec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to DLLI type
|
||||
* The LAST INDICATION is provided by the user
|
||||
*
|
||||
* @pdesc pointer HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @last_ind: The last indication bit
|
||||
* @axi_sec: AXI secure bit
|
||||
*/
|
||||
static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec,
|
||||
u32 last_ind)
|
||||
{
|
||||
set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to DLLI type
|
||||
* The LAST INDICATION is provided by the user
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @last_ind: The last indication bit
|
||||
* @axi_sec: AXI secure bit
|
||||
*/
|
||||
static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
|
||||
u32 size, enum cc_axi_sec axi_sec,
|
||||
bool last_ind)
|
||||
{
|
||||
set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to NO DMA mode.
|
||||
* Used for NOP descriptor, register patches and other special modes.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
* @write_enable: Enables a write operation to a register
|
||||
*/
|
||||
static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
|
||||
u32 size, bool write_enable)
|
||||
{
|
||||
pdesc->word[2] = addr;
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
|
||||
FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the word for the XOR operation.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @val: xor data value
|
||||
*/
|
||||
static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
|
||||
{
|
||||
pdesc->word[2] = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the XOR indicator bit in the descriptor
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
*/
|
||||
static inline void set_xor_active(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Select the AES engine instead of HASH engine when setting up combined mode
|
||||
* with AES XCBC MAC
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
*/
|
||||
static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DOUT field of a HW descriptors to SRAM mode
|
||||
* Note: No need to check SRAM alignment since host requests do not use SRAM and
|
||||
* adaptor will enforce alignment check.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @addr: DOUT address
|
||||
* @size: Data size in bytes
|
||||
*/
|
||||
static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
|
||||
{
|
||||
pdesc->word[2] = addr;
|
||||
pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
|
||||
FIELD_PREP(WORD3_DOUT_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the data unit size for XEX mode in data_out_addr[15:0]
|
||||
*
|
||||
* @pdesc: pDesc pointer HW descriptor struct
|
||||
* @size: data unit size for XEX mode
|
||||
*/
|
||||
static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
pdesc->word[2] = size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the number of rounds for Multi2 in data_out_addr[15:0]
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @num: number of rounds for Multi2
|
||||
*/
|
||||
static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
|
||||
{
|
||||
pdesc->word[2] = num;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the flow mode.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_flow_mode(struct cc_hw_desc *pdesc,
|
||||
enum cc_flow_mode mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher mode.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
|
||||
enum drv_cipher_mode mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher configuration fields.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
|
||||
enum drv_crypto_direction mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the cipher configuration fields.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @config: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
|
||||
enum cc_hash_conf_pad config)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set HW key configuration fields.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
|
||||
*/
|
||||
static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
|
||||
enum cc_hw_crypto_key hw_key)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
|
||||
(hw_key & HW_KEY_MASK_CIPHER_DO)) |
|
||||
FIELD_PREP(WORD4_CIPHER_CONF2,
|
||||
(hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set byte order of all setup-finalize descriptors.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @config: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set CMAC_SIZE0 mode.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
*/
|
||||
static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set key size descriptor field.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @size: key size in bytes (NOT size code)
|
||||
*/
|
||||
static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set AES key size.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @size: key size in bytes (NOT size code)
|
||||
*/
|
||||
static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
set_key_size(pdesc, ((size >> 3) - 2));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set DES key size.
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @size: key size in bytes (NOT size code)
|
||||
*/
|
||||
static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
|
||||
{
|
||||
set_key_size(pdesc, ((size >> 3) - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the descriptor setup mode
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the setup modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_setup_mode(struct cc_hw_desc *pdesc,
|
||||
enum cc_setup_op mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the descriptor cipher DO
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @config: Any one of the cipher do defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_do(struct cc_hw_desc *pdesc,
|
||||
enum cc_hash_cipher_pad config)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
|
||||
(config & HW_KEY_MASK_CIPHER_DO));
|
||||
}
|
||||
|
||||
#endif /*__CC_HW_QUEUE_DEFS_H__*/
|
|
@ -1,280 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include <crypto/ctr.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_ivgen.h"
|
||||
#include "cc_request_mgr.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
|
||||
/* The max. size of pool *MUST* be <= SRAM total size */
|
||||
#define CC_IVPOOL_SIZE 1024
|
||||
/* The first 32B fraction of pool are dedicated to the
|
||||
* next encryption "key" & "IV" for pool regeneration
|
||||
*/
|
||||
#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
|
||||
#define CC_IVPOOL_GEN_SEQ_LEN 4
|
||||
|
||||
/**
|
||||
* struct cc_ivgen_ctx -IV pool generation context
|
||||
* @pool: the start address of the iv-pool resides in internal RAM
|
||||
* @ctr_key_dma: address of pool's encryption key material in internal RAM
|
||||
* @ctr_iv_dma: address of pool's counter iv in internal RAM
|
||||
* @next_iv_ofs: the offset to the next available IV in pool
|
||||
* @pool_meta: virt. address of the initial enc. key/IV
|
||||
* @pool_meta_dma: phys. address of the initial enc. key/IV
|
||||
*/
|
||||
struct cc_ivgen_ctx {
|
||||
cc_sram_addr_t pool;
|
||||
cc_sram_addr_t ctr_key;
|
||||
cc_sram_addr_t ctr_iv;
|
||||
u32 next_iv_ofs;
|
||||
u8 *pool_meta;
|
||||
dma_addr_t pool_meta_dma;
|
||||
};
|
||||
|
||||
/*!
|
||||
* Generates CC_IVPOOL_SIZE of random bytes by
|
||||
* encrypting 0's using AES128-CTR.
|
||||
*
|
||||
* \param ivgen iv-pool context
|
||||
* \param iv_seq IN/OUT array to the descriptors sequence
|
||||
* \param iv_seq_len IN/OUT pointer to the sequence length
|
||||
*/
|
||||
static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
|
||||
struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
|
||||
{
|
||||
unsigned int idx = *iv_seq_len;
|
||||
|
||||
if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
|
||||
/* The sequence will be longer than allowed */
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Setup key */
|
||||
hw_desc_init(&iv_seq[idx]);
|
||||
set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
|
||||
set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
|
||||
set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
|
||||
set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
|
||||
set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
|
||||
set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
|
||||
idx++;
|
||||
|
||||
/* Setup cipher state */
|
||||
hw_desc_init(&iv_seq[idx]);
|
||||
set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
|
||||
set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
|
||||
set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
|
||||
set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
|
||||
set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
|
||||
set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
|
||||
idx++;
|
||||
|
||||
/* Perform dummy encrypt to skip first block */
|
||||
hw_desc_init(&iv_seq[idx]);
|
||||
set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
|
||||
set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
|
||||
set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
|
||||
idx++;
|
||||
|
||||
/* Generate IV pool */
|
||||
hw_desc_init(&iv_seq[idx]);
|
||||
set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
|
||||
set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
|
||||
set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
|
||||
idx++;
|
||||
|
||||
*iv_seq_len = idx; /* Update sequence length */
|
||||
|
||||
/* queue ordering assures pool readiness */
|
||||
ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Generates the initial pool in SRAM.
|
||||
* This function should be invoked when resuming DX driver.
|
||||
*
|
||||
* \param drvdata
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_init_iv_sram(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
|
||||
struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
|
||||
unsigned int iv_seq_len = 0;
|
||||
int rc;
|
||||
|
||||
/* Generate initial enc. key/iv */
|
||||
get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
|
||||
|
||||
/* The first 32B reserved for the enc. Key/IV */
|
||||
ivgen_ctx->ctr_key = ivgen_ctx->pool;
|
||||
ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
|
||||
|
||||
/* Copy initial enc. key and IV to SRAM at a single descriptor */
|
||||
hw_desc_init(&iv_seq[iv_seq_len]);
|
||||
set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
|
||||
CC_IVPOOL_META_SIZE, NS_BIT);
|
||||
set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
|
||||
CC_IVPOOL_META_SIZE);
|
||||
set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
|
||||
iv_seq_len++;
|
||||
|
||||
/* Generate initial pool */
|
||||
rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Fire-and-forget */
|
||||
return send_request_init(drvdata, iv_seq, iv_seq_len);
|
||||
}
|
||||
|
||||
/*!
|
||||
* Free iv-pool and ivgen context.
|
||||
*
|
||||
* \param drvdata
|
||||
*/
|
||||
void cc_ivgen_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
|
||||
struct device *device = &drvdata->plat_dev->dev;
|
||||
|
||||
if (!ivgen_ctx)
|
||||
return;
|
||||
|
||||
if (ivgen_ctx->pool_meta) {
|
||||
memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
|
||||
dma_free_coherent(device, CC_IVPOOL_META_SIZE,
|
||||
ivgen_ctx->pool_meta,
|
||||
ivgen_ctx->pool_meta_dma);
|
||||
}
|
||||
|
||||
ivgen_ctx->pool = NULL_SRAM_ADDR;
|
||||
|
||||
/* release "this" context */
|
||||
kfree(ivgen_ctx);
|
||||
}
|
||||
|
||||
/*!
|
||||
* Allocates iv-pool and maps resources.
|
||||
* This function generates the first IV pool.
|
||||
*
|
||||
* \param drvdata Driver's private context
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_ivgen_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_ivgen_ctx *ivgen_ctx;
|
||||
struct device *device = &drvdata->plat_dev->dev;
|
||||
int rc;
|
||||
|
||||
/* Allocate "this" context */
|
||||
ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
|
||||
if (!ivgen_ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
drvdata->ivgen_handle = ivgen_ctx;
|
||||
|
||||
/* Allocate pool's header for initial enc. key/IV */
|
||||
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
|
||||
&ivgen_ctx->pool_meta_dma,
|
||||
GFP_KERNEL);
|
||||
if (!ivgen_ctx->pool_meta) {
|
||||
dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
|
||||
CC_IVPOOL_META_SIZE);
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/* Allocate IV pool in SRAM */
|
||||
ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
|
||||
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
|
||||
dev_err(device, "SRAM pool exhausted\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return cc_init_iv_sram(drvdata);
|
||||
|
||||
out:
|
||||
cc_ivgen_fini(drvdata);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Acquires 16 Bytes IV from the iv-pool
|
||||
*
|
||||
* \param drvdata Driver private context
|
||||
* \param iv_out_dma Array of physical IV out addresses
|
||||
* \param iv_out_dma_len Length of iv_out_dma array (additional elements
|
||||
* of iv_out_dma array are ignore)
|
||||
* \param iv_out_size May be 8 or 16 bytes long
|
||||
* \param iv_seq IN/OUT array to the descriptors sequence
|
||||
* \param iv_seq_len IN/OUT pointer to the sequence length
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
|
||||
unsigned int iv_out_dma_len, unsigned int iv_out_size,
|
||||
struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
|
||||
{
|
||||
struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
|
||||
unsigned int idx = *iv_seq_len;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
unsigned int t;
|
||||
|
||||
if (iv_out_size != CC_AES_IV_SIZE &&
|
||||
iv_out_size != CTR_RFC3686_IV_SIZE) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
|
||||
/* The sequence will be longer than allowed */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check that number of generated IV is limited to max dma address
|
||||
* iv buffer size
|
||||
*/
|
||||
if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
|
||||
/* The sequence will be longer than allowed */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (t = 0; t < iv_out_dma_len; t++) {
|
||||
/* Acquire IV from pool */
|
||||
hw_desc_init(&iv_seq[idx]);
|
||||
set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
|
||||
ivgen_ctx->next_iv_ofs),
|
||||
iv_out_size);
|
||||
set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
|
||||
NS_BIT, 0);
|
||||
set_flow_mode(&iv_seq[idx], BYPASS);
|
||||
idx++;
|
||||
}
|
||||
|
||||
/* Bypass operation is proceeded by crypto sequence, hence must
|
||||
* assure bypass-write-transaction by a memory barrier
|
||||
*/
|
||||
hw_desc_init(&iv_seq[idx]);
|
||||
set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
|
||||
set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
|
||||
idx++;
|
||||
|
||||
*iv_seq_len = idx; /* update seq length */
|
||||
|
||||
/* Update iv index */
|
||||
ivgen_ctx->next_iv_ofs += iv_out_size;
|
||||
|
||||
if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
|
||||
dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
|
||||
/* pool is drained -regenerate it! */
|
||||
return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_IVGEN_H__
|
||||
#define __CC_IVGEN_H__
|
||||
|
||||
#include "cc_hw_queue_defs.h"
|
||||
|
||||
#define CC_IVPOOL_SEQ_LEN 8
|
||||
|
||||
/*!
|
||||
* Allocates iv-pool and maps resources.
|
||||
* This function generates the first IV pool.
|
||||
*
|
||||
* \param drvdata Driver's private context
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_ivgen_init(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Free iv-pool and ivgen context.
|
||||
*
|
||||
* \param drvdata
|
||||
*/
|
||||
void cc_ivgen_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Generates the initial pool in SRAM.
|
||||
* This function should be invoked when resuming DX driver.
|
||||
*
|
||||
* \param drvdata
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_init_iv_sram(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Acquires 16 Bytes IV from the iv-pool
|
||||
*
|
||||
* \param drvdata Driver private context
|
||||
* \param iv_out_dma Array of physical IV out addresses
|
||||
* \param iv_out_dma_len Length of iv_out_dma array (additional elements of
|
||||
* iv_out_dma array are ignore)
|
||||
* \param iv_out_size May be 8 or 16 bytes long
|
||||
* \param iv_seq IN/OUT array to the descriptors sequence
|
||||
* \param iv_seq_len IN/OUT pointer to the sequence length
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
|
||||
unsigned int iv_out_dma_len, unsigned int iv_out_size,
|
||||
struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
|
||||
|
||||
#endif /*__CC_IVGEN_H__*/
|
|
@ -1,167 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_CRYS_KERNEL_H__
|
||||
#define __CC_CRYS_KERNEL_H__
|
||||
|
||||
// --------------------------------------
|
||||
// BLOCK: DSCRPTR
|
||||
// --------------------------------------
|
||||
#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
|
||||
#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
|
||||
#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
|
||||
#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
|
||||
#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
|
||||
#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
|
||||
#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
|
||||
#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
|
||||
#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
|
||||
#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
|
||||
#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
|
||||
#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
|
||||
#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
|
||||
#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
|
||||
#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
|
||||
// --------------------------------------
|
||||
// BLOCK: AXI_P
|
||||
// --------------------------------------
|
||||
#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
|
||||
#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
|
||||
#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
|
||||
#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
|
||||
#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
|
||||
#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
|
||||
#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
|
||||
#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
|
||||
#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
|
||||
#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
|
||||
#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
|
||||
#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
|
||||
#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
|
||||
#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
|
||||
#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
|
||||
#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
|
||||
#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
|
||||
#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
|
||||
#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
|
||||
#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
|
||||
#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
|
||||
#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
|
||||
#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
|
||||
#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
|
||||
#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
|
||||
#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
|
||||
#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
|
||||
#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
|
||||
#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
|
||||
#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
|
||||
#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
|
||||
#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
|
||||
#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
|
||||
#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
|
||||
#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
|
||||
#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
|
||||
#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
|
||||
#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
|
||||
#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
|
||||
#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
|
||||
#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
|
||||
#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
|
||||
#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
|
||||
#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
|
||||
#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
|
||||
#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
|
||||
#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
|
||||
#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
|
||||
#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
|
||||
#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
|
||||
#endif // __CC_CRYS_KERNEL_H__
|
|
@ -1,59 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef _CC_LLI_DEFS_H_
|
||||
#define _CC_LLI_DEFS_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Max DLLI size
|
||||
* AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
|
||||
*/
|
||||
#define DLLI_SIZE_BIT_SIZE 0x18
|
||||
|
||||
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
|
||||
|
||||
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
|
||||
#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
|
||||
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
|
||||
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
|
||||
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
|
||||
(2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
|
||||
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
|
||||
|
||||
/* Size of entry */
|
||||
#define LLI_ENTRY_WORD_SIZE 2
|
||||
#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
|
||||
|
||||
/* Word0[31:0] = ADDR[31:0] */
|
||||
#define LLI_WORD0_OFFSET 0
|
||||
#define LLI_LADDR_BIT_OFFSET 0
|
||||
#define LLI_LADDR_BIT_SIZE 32
|
||||
/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
|
||||
#define LLI_WORD1_OFFSET 1
|
||||
#define LLI_SIZE_BIT_OFFSET 0
|
||||
#define LLI_SIZE_BIT_SIZE 16
|
||||
#define LLI_HADDR_BIT_OFFSET 16
|
||||
#define LLI_HADDR_BIT_SIZE 16
|
||||
|
||||
#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
|
||||
#define LLI_HADDR_MASK GENMASK( \
|
||||
(LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
|
||||
LLI_HADDR_BIT_OFFSET)
|
||||
|
||||
static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
|
||||
{
|
||||
lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
|
||||
lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
|
||||
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
|
||||
}
|
||||
|
||||
static inline void cc_lli_set_size(u32 *lli_p, u16 size)
|
||||
{
|
||||
lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
|
||||
lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
|
||||
}
|
||||
|
||||
#endif /*_CC_LLI_DEFS_H_*/
|
|
@ -1,122 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
#include "cc_request_mgr.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
#include "cc_ivgen.h"
|
||||
#include "cc_hash.h"
|
||||
#include "cc_pm.h"
|
||||
|
||||
#define POWER_DOWN_ENABLE 0x01
|
||||
#define POWER_DOWN_DISABLE 0x00
|
||||
|
||||
const struct dev_pm_ops ccree_pm = {
|
||||
SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
|
||||
};
|
||||
|
||||
int cc_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
int rc;
|
||||
|
||||
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
|
||||
rc = cc_suspend_req_queue(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
fini_cc_regs(drvdata);
|
||||
cc_clk_off(drvdata);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cc_pm_resume(struct device *dev)
|
||||
{
|
||||
int rc;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
|
||||
|
||||
rc = cc_clk_on(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed getting clock back on. We're toast.\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = init_cc_regs(drvdata, false);
|
||||
if (rc) {
|
||||
dev_err(dev, "init_cc_regs (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = cc_resume_req_queue(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* must be after the queue resuming as it uses the HW queue*/
|
||||
cc_init_hash_sram(drvdata);
|
||||
|
||||
cc_init_iv_sram(drvdata);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cc_pm_get(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (cc_req_queue_suspended(drvdata))
|
||||
rc = pm_runtime_get_sync(dev);
|
||||
else
|
||||
pm_runtime_get_noresume(dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cc_pm_put_suspend(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (!cc_req_queue_suspended(drvdata)) {
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
rc = pm_runtime_put_autosuspend(dev);
|
||||
} else {
|
||||
/* Something wrong happens*/
|
||||
dev_err(dev, "request to suspend already suspended queue");
|
||||
rc = -EBUSY;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
int rc = 0;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
/* must be before the enabling to avoid resdundent suspending */
|
||||
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
/* activate the PM module */
|
||||
rc = pm_runtime_set_active(dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
/* enable the PM module*/
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void cc_pm_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
pm_runtime_disable(drvdata_to_dev(drvdata));
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_pm.h
|
||||
*/
|
||||
|
||||
#ifndef __CC_POWER_MGR_H__
|
||||
#define __CC_POWER_MGR_H__
|
||||
|
||||
#include "cc_driver.h"
|
||||
|
||||
#define CC_SUSPEND_TIMEOUT 3000
|
||||
|
||||
#if defined(CONFIG_PM)
|
||||
|
||||
extern const struct dev_pm_ops ccree_pm;
|
||||
|
||||
int cc_pm_init(struct cc_drvdata *drvdata);
|
||||
void cc_pm_fini(struct cc_drvdata *drvdata);
|
||||
int cc_pm_suspend(struct device *dev);
|
||||
int cc_pm_resume(struct device *dev);
|
||||
int cc_pm_get(struct device *dev);
|
||||
int cc_pm_put_suspend(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
|
||||
|
||||
static inline int cc_pm_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cc_pm_resume(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cc_pm_get(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cc_pm_put_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*__POWER_MGR_H__*/
|
||||
|
|
@ -1,713 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
#include "cc_request_mgr.h"
|
||||
#include "cc_ivgen.h"
|
||||
#include "cc_pm.h"
|
||||
|
||||
#define CC_MAX_POLL_ITER 10
|
||||
/* The highest descriptor count in used */
|
||||
#define CC_MAX_DESC_SEQ_LEN 23
|
||||
|
||||
struct cc_req_mgr_handle {
|
||||
/* Request manager resources */
|
||||
unsigned int hw_queue_size; /* HW capability */
|
||||
unsigned int min_free_hw_slots;
|
||||
unsigned int max_used_sw_slots;
|
||||
struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
|
||||
u32 req_queue_head;
|
||||
u32 req_queue_tail;
|
||||
u32 axi_completed;
|
||||
u32 q_free_slots;
|
||||
/* This lock protects access to HW register
|
||||
* that must be single request at a time
|
||||
*/
|
||||
spinlock_t hw_lock;
|
||||
struct cc_hw_desc compl_desc;
|
||||
u8 *dummy_comp_buff;
|
||||
dma_addr_t dummy_comp_buff_dma;
|
||||
|
||||
/* backlog queue */
|
||||
struct list_head backlog;
|
||||
unsigned int bl_len;
|
||||
spinlock_t bl_lock; /* protect backlog queue */
|
||||
|
||||
#ifdef COMP_IN_WQ
|
||||
struct workqueue_struct *workq;
|
||||
struct delayed_work compwork;
|
||||
#else
|
||||
struct tasklet_struct comptask;
|
||||
#endif
|
||||
bool is_runtime_suspended;
|
||||
};
|
||||
|
||||
struct cc_bl_item {
|
||||
struct cc_crypto_req creq;
|
||||
struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
|
||||
unsigned int len;
|
||||
struct list_head list;
|
||||
bool notif;
|
||||
};
|
||||
|
||||
static void comp_handler(unsigned long devarg);
|
||||
#ifdef COMP_IN_WQ
|
||||
static void comp_work_handler(struct work_struct *work);
|
||||
#endif
|
||||
|
||||
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
if (!req_mgr_h)
|
||||
return; /* Not allocated */
|
||||
|
||||
if (req_mgr_h->dummy_comp_buff_dma) {
|
||||
dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
|
||||
req_mgr_h->dummy_comp_buff_dma);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
|
||||
req_mgr_h->min_free_hw_slots));
|
||||
dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
|
||||
|
||||
#ifdef COMP_IN_WQ
|
||||
flush_workqueue(req_mgr_h->workq);
|
||||
destroy_workqueue(req_mgr_h->workq);
|
||||
#else
|
||||
/* Kill tasklet */
|
||||
tasklet_kill(&req_mgr_h->comptask);
|
||||
#endif
|
||||
memset(req_mgr_h, 0, sizeof(struct cc_req_mgr_handle));
|
||||
kfree(req_mgr_h);
|
||||
drvdata->request_mgr_handle = NULL;
|
||||
}
|
||||
|
||||
int cc_req_mgr_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *req_mgr_h;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
int rc = 0;
|
||||
|
||||
req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
|
||||
if (!req_mgr_h) {
|
||||
rc = -ENOMEM;
|
||||
goto req_mgr_init_err;
|
||||
}
|
||||
|
||||
drvdata->request_mgr_handle = req_mgr_h;
|
||||
|
||||
spin_lock_init(&req_mgr_h->hw_lock);
|
||||
spin_lock_init(&req_mgr_h->bl_lock);
|
||||
INIT_LIST_HEAD(&req_mgr_h->backlog);
|
||||
|
||||
#ifdef COMP_IN_WQ
|
||||
dev_dbg(dev, "Initializing completion workqueue\n");
|
||||
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
|
||||
if (!req_mgr_h->workq) {
|
||||
dev_err(dev, "Failed creating work queue\n");
|
||||
rc = -ENOMEM;
|
||||
goto req_mgr_init_err;
|
||||
}
|
||||
INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
|
||||
#else
|
||||
dev_dbg(dev, "Initializing completion tasklet\n");
|
||||
tasklet_init(&req_mgr_h->comptask, comp_handler,
|
||||
(unsigned long)drvdata);
|
||||
#endif
|
||||
req_mgr_h->hw_queue_size = cc_ioread(drvdata,
|
||||
CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
|
||||
dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
|
||||
if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
|
||||
dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
|
||||
req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
|
||||
rc = -ENOMEM;
|
||||
goto req_mgr_init_err;
|
||||
}
|
||||
req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
|
||||
req_mgr_h->max_used_sw_slots = 0;
|
||||
|
||||
/* Allocate DMA word for "dummy" completion descriptor use */
|
||||
req_mgr_h->dummy_comp_buff =
|
||||
dma_alloc_coherent(dev, sizeof(u32),
|
||||
&req_mgr_h->dummy_comp_buff_dma,
|
||||
GFP_KERNEL);
|
||||
if (!req_mgr_h->dummy_comp_buff) {
|
||||
dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
|
||||
sizeof(u32));
|
||||
rc = -ENOMEM;
|
||||
goto req_mgr_init_err;
|
||||
}
|
||||
|
||||
/* Init. "dummy" completion descriptor */
|
||||
hw_desc_init(&req_mgr_h->compl_desc);
|
||||
set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
|
||||
set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
|
||||
sizeof(u32), NS_BIT, 1);
|
||||
set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
|
||||
set_queue_last_ind(&req_mgr_h->compl_desc);
|
||||
|
||||
return 0;
|
||||
|
||||
req_mgr_init_err:
|
||||
cc_req_mgr_fini(drvdata);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
|
||||
unsigned int seq_len)
|
||||
{
|
||||
int i, w;
|
||||
void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
/*
|
||||
* We do indeed write all 6 command words to the same
|
||||
* register. The HW supports this.
|
||||
*/
|
||||
|
||||
for (i = 0; i < seq_len; i++) {
|
||||
for (w = 0; w <= 5; w++)
|
||||
writel_relaxed(seq[i].word[w], reg);
|
||||
|
||||
if (cc_dump_desc)
|
||||
dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
|
||||
i, seq[i].word[0], seq[i].word[1],
|
||||
seq[i].word[2], seq[i].word[3],
|
||||
seq[i].word[4], seq[i].word[5]);
|
||||
}
|
||||
}
|
||||
|
||||
/*!
|
||||
* Completion will take place if and only if user requested completion
|
||||
* by cc_send_sync_request().
|
||||
*
|
||||
* \param dev
|
||||
* \param dx_compl_h The completion event to signal
|
||||
*/
|
||||
static void request_mgr_complete(struct device *dev, void *dx_compl_h,
|
||||
int dummy)
|
||||
{
|
||||
struct completion *this_compl = dx_compl_h;
|
||||
|
||||
complete(this_compl);
|
||||
}
|
||||
|
||||
static int cc_queues_status(struct cc_drvdata *drvdata,
|
||||
struct cc_req_mgr_handle *req_mgr_h,
|
||||
unsigned int total_seq_len)
|
||||
{
|
||||
unsigned long poll_queue;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
/* SW queue is checked only once as it will not
|
||||
* be chaned during the poll because the spinlock_bh
|
||||
* is held by the thread
|
||||
*/
|
||||
if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
|
||||
req_mgr_h->req_queue_tail) {
|
||||
dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
|
||||
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (req_mgr_h->q_free_slots >= total_seq_len)
|
||||
return 0;
|
||||
|
||||
/* Wait for space in HW queue. Poll constant num of iterations. */
|
||||
for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
|
||||
req_mgr_h->q_free_slots =
|
||||
cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
|
||||
if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
|
||||
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
|
||||
|
||||
if (req_mgr_h->q_free_slots >= total_seq_len) {
|
||||
/* If there is enough place return */
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
|
||||
req_mgr_h->q_free_slots, total_seq_len);
|
||||
}
|
||||
/* No room in the HW queue try again later */
|
||||
dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
|
||||
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
|
||||
req_mgr_h->q_free_slots, total_seq_len);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Enqueue caller request to crypto hardware.
|
||||
* Need to be called with HW lock held and PM running
|
||||
*
|
||||
* \param drvdata
|
||||
* \param cc_req The request to enqueue
|
||||
* \param desc The crypto sequence
|
||||
* \param len The crypto sequence length
|
||||
* \param add_comp If "true": add an artificial dout DMA to mark completion
|
||||
*
|
||||
* \return int Returns -EINPROGRESS or error code
|
||||
*/
|
||||
static int cc_do_send_request(struct cc_drvdata *drvdata,
|
||||
struct cc_crypto_req *cc_req,
|
||||
struct cc_hw_desc *desc, unsigned int len,
|
||||
bool add_comp, bool ivgen)
|
||||
{
|
||||
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
|
||||
unsigned int used_sw_slots;
|
||||
unsigned int iv_seq_len = 0;
|
||||
unsigned int total_seq_len = len; /*initial sequence length*/
|
||||
struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
int rc;
|
||||
|
||||
if (ivgen) {
|
||||
dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
|
||||
cc_req->ivgen_dma_addr_len,
|
||||
&cc_req->ivgen_dma_addr[0],
|
||||
&cc_req->ivgen_dma_addr[1],
|
||||
&cc_req->ivgen_dma_addr[2],
|
||||
cc_req->ivgen_size);
|
||||
|
||||
/* Acquire IV from pool */
|
||||
rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
|
||||
cc_req->ivgen_dma_addr_len,
|
||||
cc_req->ivgen_size, iv_seq, &iv_seq_len);
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
total_seq_len += iv_seq_len;
|
||||
}
|
||||
|
||||
used_sw_slots = ((req_mgr_h->req_queue_head -
|
||||
req_mgr_h->req_queue_tail) &
|
||||
(MAX_REQUEST_QUEUE_SIZE - 1));
|
||||
if (used_sw_slots > req_mgr_h->max_used_sw_slots)
|
||||
req_mgr_h->max_used_sw_slots = used_sw_slots;
|
||||
|
||||
/* Enqueue request - must be locked with HW lock*/
|
||||
req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
|
||||
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
|
||||
(MAX_REQUEST_QUEUE_SIZE - 1);
|
||||
/* TODO: Use circ_buf.h ? */
|
||||
|
||||
dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
|
||||
|
||||
/*
|
||||
* We are about to push command to the HW via the command registers
|
||||
* that may refernece hsot memory. We need to issue a memory barrier
|
||||
* to make sure there are no outstnading memory writes
|
||||
*/
|
||||
wmb();
|
||||
|
||||
/* STAT_PHASE_4: Push sequence */
|
||||
if (ivgen)
|
||||
enqueue_seq(drvdata, iv_seq, iv_seq_len);
|
||||
|
||||
enqueue_seq(drvdata, desc, len);
|
||||
|
||||
if (add_comp) {
|
||||
enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
|
||||
total_seq_len++;
|
||||
}
|
||||
|
||||
if (req_mgr_h->q_free_slots < total_seq_len) {
|
||||
/* This situation should never occur. Maybe indicating problem
|
||||
* with resuming power. Set the free slot count to 0 and hope
|
||||
* for the best.
|
||||
*/
|
||||
dev_err(dev, "HW free slot count mismatch.");
|
||||
req_mgr_h->q_free_slots = 0;
|
||||
} else {
|
||||
/* Update the free slots in HW queue */
|
||||
req_mgr_h->q_free_slots -= total_seq_len;
|
||||
}
|
||||
|
||||
/* Operation still in process */
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
|
||||
struct cc_bl_item *bli)
|
||||
{
|
||||
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
|
||||
|
||||
spin_lock_bh(&mgr->bl_lock);
|
||||
list_add_tail(&bli->list, &mgr->backlog);
|
||||
++mgr->bl_len;
|
||||
spin_unlock_bh(&mgr->bl_lock);
|
||||
tasklet_schedule(&mgr->comptask);
|
||||
}
|
||||
|
||||
static void cc_proc_backlog(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
|
||||
struct cc_bl_item *bli;
|
||||
struct cc_crypto_req *creq;
|
||||
struct crypto_async_request *req;
|
||||
bool ivgen;
|
||||
unsigned int total_len;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
int rc;
|
||||
|
||||
spin_lock(&mgr->bl_lock);
|
||||
|
||||
while (mgr->bl_len) {
|
||||
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
|
||||
spin_unlock(&mgr->bl_lock);
|
||||
|
||||
creq = &bli->creq;
|
||||
req = (struct crypto_async_request *)creq->user_arg;
|
||||
|
||||
/*
|
||||
* Notify the request we're moving out of the backlog
|
||||
* but only if we haven't done so already.
|
||||
*/
|
||||
if (!bli->notif) {
|
||||
req->complete(req, -EINPROGRESS);
|
||||
bli->notif = true;
|
||||
}
|
||||
|
||||
ivgen = !!creq->ivgen_dma_addr_len;
|
||||
total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
|
||||
|
||||
spin_lock(&mgr->hw_lock);
|
||||
|
||||
rc = cc_queues_status(drvdata, mgr, total_len);
|
||||
if (rc) {
|
||||
/*
|
||||
* There is still not room in the FIFO for
|
||||
* this request. Bail out. We'll return here
|
||||
* on the next completion irq.
|
||||
*/
|
||||
spin_unlock(&mgr->hw_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
|
||||
bli->len, false, ivgen);
|
||||
|
||||
spin_unlock(&mgr->hw_lock);
|
||||
|
||||
if (rc != -EINPROGRESS) {
|
||||
cc_pm_put_suspend(dev);
|
||||
creq->user_cb(dev, req, rc);
|
||||
}
|
||||
|
||||
/* Remove ourselves from the backlog list */
|
||||
spin_lock(&mgr->bl_lock);
|
||||
list_del(&bli->list);
|
||||
--mgr->bl_len;
|
||||
}
|
||||
|
||||
spin_unlock(&mgr->bl_lock);
|
||||
}
|
||||
|
||||
int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
|
||||
struct cc_hw_desc *desc, unsigned int len,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
int rc;
|
||||
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
|
||||
bool ivgen = !!cc_req->ivgen_dma_addr_len;
|
||||
unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||
gfp_t flags = cc_gfp_flags(req);
|
||||
struct cc_bl_item *bli;
|
||||
|
||||
rc = cc_pm_get(dev);
|
||||
if (rc) {
|
||||
dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mgr->hw_lock);
|
||||
rc = cc_queues_status(drvdata, mgr, total_len);
|
||||
|
||||
#ifdef CC_DEBUG_FORCE_BACKLOG
|
||||
if (backlog_ok)
|
||||
rc = -ENOSPC;
|
||||
#endif /* CC_DEBUG_FORCE_BACKLOG */
|
||||
|
||||
if (rc == -ENOSPC && backlog_ok) {
|
||||
spin_unlock_bh(&mgr->hw_lock);
|
||||
|
||||
bli = kmalloc(sizeof(*bli), flags);
|
||||
if (!bli) {
|
||||
cc_pm_put_suspend(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(&bli->creq, cc_req, sizeof(*cc_req));
|
||||
memcpy(&bli->desc, desc, len * sizeof(*desc));
|
||||
bli->len = len;
|
||||
bli->notif = false;
|
||||
cc_enqueue_backlog(drvdata, bli);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
|
||||
ivgen);
|
||||
|
||||
spin_unlock_bh(&mgr->hw_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cc_send_sync_request(struct cc_drvdata *drvdata,
|
||||
struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
|
||||
unsigned int len)
|
||||
{
|
||||
int rc;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
|
||||
|
||||
init_completion(&cc_req->seq_compl);
|
||||
cc_req->user_cb = request_mgr_complete;
|
||||
cc_req->user_arg = &cc_req->seq_compl;
|
||||
|
||||
rc = cc_pm_get(dev);
|
||||
if (rc) {
|
||||
dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
spin_lock_bh(&mgr->hw_lock);
|
||||
rc = cc_queues_status(drvdata, mgr, len + 1);
|
||||
|
||||
if (!rc)
|
||||
break;
|
||||
|
||||
spin_unlock_bh(&mgr->hw_lock);
|
||||
if (rc != -EAGAIN) {
|
||||
cc_pm_put_suspend(dev);
|
||||
return rc;
|
||||
}
|
||||
wait_for_completion_interruptible(&drvdata->hw_queue_avail);
|
||||
reinit_completion(&drvdata->hw_queue_avail);
|
||||
}
|
||||
|
||||
rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
|
||||
spin_unlock_bh(&mgr->hw_lock);
|
||||
|
||||
if (rc != -EINPROGRESS) {
|
||||
cc_pm_put_suspend(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
wait_for_completion(&cc_req->seq_compl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Enqueue caller request to crypto hardware during init process.
|
||||
* assume this function is not called in middle of a flow,
|
||||
* since we set QUEUE_LAST_IND flag in the last descriptor.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param desc The crypto sequence
|
||||
* \param len The crypto sequence length
|
||||
*
|
||||
* \return int Returns "0" upon success
|
||||
*/
|
||||
int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
|
||||
unsigned int len)
|
||||
{
|
||||
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
|
||||
unsigned int total_seq_len = len; /*initial sequence length*/
|
||||
int rc = 0;
|
||||
|
||||
/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
|
||||
*/
|
||||
rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
set_queue_last_ind(&desc[(len - 1)]);
|
||||
|
||||
/*
|
||||
* We are about to push command to the HW via the command registers
|
||||
* that may refernece hsot memory. We need to issue a memory barrier
|
||||
* to make sure there are no outstnading memory writes
|
||||
*/
|
||||
wmb();
|
||||
enqueue_seq(drvdata, desc, len);
|
||||
|
||||
/* Update the free slots in HW queue */
|
||||
req_mgr_h->q_free_slots =
|
||||
cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void complete_request(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
complete(&drvdata->hw_queue_avail);
|
||||
#ifdef COMP_IN_WQ
|
||||
queue_delayed_work(request_mgr_handle->workq,
|
||||
&request_mgr_handle->compwork, 0);
|
||||
#else
|
||||
tasklet_schedule(&request_mgr_handle->comptask);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef COMP_IN_WQ
|
||||
static void comp_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct cc_drvdata *drvdata =
|
||||
container_of(work, struct cc_drvdata, compwork.work);
|
||||
|
||||
comp_handler((unsigned long)drvdata);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void proc_completions(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_crypto_req *cc_req;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
unsigned int *tail = &request_mgr_handle->req_queue_tail;
|
||||
unsigned int *head = &request_mgr_handle->req_queue_head;
|
||||
|
||||
while (request_mgr_handle->axi_completed) {
|
||||
request_mgr_handle->axi_completed--;
|
||||
|
||||
/* Dequeue request */
|
||||
if (*head == *tail) {
|
||||
/* We are supposed to handle a completion but our
|
||||
* queue is empty. This is not normal. Return and
|
||||
* hope for the best.
|
||||
*/
|
||||
dev_err(dev, "Request queue is empty head == tail %u\n",
|
||||
*head);
|
||||
break;
|
||||
}
|
||||
|
||||
cc_req = &request_mgr_handle->req_queue[*tail];
|
||||
|
||||
if (cc_req->user_cb)
|
||||
cc_req->user_cb(dev, cc_req->user_arg, 0);
|
||||
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
|
||||
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
|
||||
dev_dbg(dev, "Request completed. axi_completed=%d\n",
|
||||
request_mgr_handle->axi_completed);
|
||||
cc_pm_put_suspend(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
|
||||
{
|
||||
return FIELD_GET(AXIM_MON_COMP_VALUE,
|
||||
cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
|
||||
}
|
||||
|
||||
/* Deferred service handler, run as interrupt-fired tasklet */
|
||||
static void comp_handler(unsigned long devarg)
|
||||
{
|
||||
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
u32 irq;
|
||||
|
||||
irq = (drvdata->irq & CC_COMP_IRQ_MASK);
|
||||
|
||||
if (irq & CC_COMP_IRQ_MASK) {
|
||||
/* To avoid the interrupt from firing as we unmask it,
|
||||
* we clear it now
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
|
||||
|
||||
/* Avoid race with above clear: Test completion counter
|
||||
* once more
|
||||
*/
|
||||
request_mgr_handle->axi_completed +=
|
||||
cc_axi_comp_count(drvdata);
|
||||
|
||||
while (request_mgr_handle->axi_completed) {
|
||||
do {
|
||||
proc_completions(drvdata);
|
||||
/* At this point (after proc_completions()),
|
||||
* request_mgr_handle->axi_completed is 0.
|
||||
*/
|
||||
request_mgr_handle->axi_completed =
|
||||
cc_axi_comp_count(drvdata);
|
||||
} while (request_mgr_handle->axi_completed > 0);
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR),
|
||||
CC_COMP_IRQ_MASK);
|
||||
|
||||
request_mgr_handle->axi_completed +=
|
||||
cc_axi_comp_count(drvdata);
|
||||
}
|
||||
}
|
||||
/* after verifing that there is nothing to do,
|
||||
* unmask AXI completion interrupt
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR),
|
||||
cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
|
||||
|
||||
cc_proc_backlog(drvdata);
|
||||
}
|
||||
|
||||
/*
|
||||
* resume the queue configuration - no need to take the lock as this happens
|
||||
* inside the spin lock protection
|
||||
*/
|
||||
#if defined(CONFIG_PM)
|
||||
int cc_resume_req_queue(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
spin_lock_bh(&request_mgr_handle->hw_lock);
|
||||
request_mgr_handle->is_runtime_suspended = false;
|
||||
spin_unlock_bh(&request_mgr_handle->hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* suspend the queue configuration. Since it is used for the runtime suspend
|
||||
* only verify that the queue can be suspended.
|
||||
*/
|
||||
int cc_suspend_req_queue(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
/* lock the send_request */
|
||||
spin_lock_bh(&request_mgr_handle->hw_lock);
|
||||
if (request_mgr_handle->req_queue_head !=
|
||||
request_mgr_handle->req_queue_tail) {
|
||||
spin_unlock_bh(&request_mgr_handle->hw_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
request_mgr_handle->is_runtime_suspended = true;
|
||||
spin_unlock_bh(&request_mgr_handle->hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
return request_mgr_handle->is_runtime_suspended;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
/* \file cc_request_mgr.h
|
||||
* Request Manager
|
||||
*/
|
||||
|
||||
#ifndef __REQUEST_MGR_H__
|
||||
#define __REQUEST_MGR_H__
|
||||
|
||||
#include "cc_hw_queue_defs.h"
|
||||
|
||||
int cc_req_mgr_init(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Enqueue caller request to crypto hardware.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param cc_req The request to enqueue
|
||||
* \param desc The crypto sequence
|
||||
* \param len The crypto sequence length
|
||||
* \param is_dout If "true": completion is handled by the caller
|
||||
* If "false": this function adds a dummy descriptor completion
|
||||
* and waits upon completion signal.
|
||||
*
|
||||
* \return int Returns -EINPROGRESS or error
|
||||
*/
|
||||
int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
|
||||
struct cc_hw_desc *desc, unsigned int len,
|
||||
struct crypto_async_request *req);
|
||||
|
||||
int cc_send_sync_request(struct cc_drvdata *drvdata,
|
||||
struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
|
||||
unsigned int len);
|
||||
|
||||
int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
|
||||
unsigned int len);
|
||||
|
||||
void complete_request(struct cc_drvdata *drvdata);
|
||||
|
||||
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
#if defined(CONFIG_PM)
|
||||
int cc_resume_req_queue(struct cc_drvdata *drvdata);
|
||||
|
||||
int cc_suspend_req_queue(struct cc_drvdata *drvdata);
|
||||
|
||||
bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
|
||||
#endif
|
||||
|
||||
#endif /*__REQUEST_MGR_H__*/
|
|
@ -1,107 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#include "cc_driver.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
|
||||
/**
|
||||
* struct cc_sram_ctx -Internal RAM context manager
|
||||
* @sram_free_offset: the offset to the non-allocated area
|
||||
*/
|
||||
struct cc_sram_ctx {
|
||||
cc_sram_addr_t sram_free_offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* cc_sram_mgr_fini() - Cleanup SRAM pool.
|
||||
*
|
||||
* @drvdata: Associated device driver context
|
||||
*/
|
||||
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
/* Free "this" context */
|
||||
kfree(drvdata->sram_mgr_handle);
|
||||
}
|
||||
|
||||
/**
|
||||
* cc_sram_mgr_init() - Initializes SRAM pool.
|
||||
* The pool starts right at the beginning of SRAM.
|
||||
* Returns zero for success, negative value otherwise.
|
||||
*
|
||||
* @drvdata: Associated device driver context
|
||||
*/
|
||||
int cc_sram_mgr_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_sram_ctx *ctx;
|
||||
|
||||
/* Allocate "this" context */
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
drvdata->sram_mgr_handle = ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*!
|
||||
* Allocated buffer from SRAM pool.
|
||||
* Note: Caller is responsible to free the LAST allocated buffer.
|
||||
* This function does not taking care of any fragmentation may occur
|
||||
* by the order of calls to alloc/free.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param size The requested bytes to allocate
|
||||
*/
|
||||
cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
|
||||
{
|
||||
struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
cc_sram_addr_t p;
|
||||
|
||||
if ((size & 0x3)) {
|
||||
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
|
||||
size);
|
||||
return NULL_SRAM_ADDR;
|
||||
}
|
||||
if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
|
||||
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
|
||||
size, smgr_ctx->sram_free_offset);
|
||||
return NULL_SRAM_ADDR;
|
||||
}
|
||||
|
||||
p = smgr_ctx->sram_free_offset;
|
||||
smgr_ctx->sram_free_offset += size;
|
||||
dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* cc_set_sram_desc() - Create const descriptors sequence to
|
||||
* set values in given array into SRAM.
|
||||
* Note: each const value can't exceed word size.
|
||||
*
|
||||
* @src: A pointer to array of words to set as consts.
|
||||
* @dst: The target SRAM buffer to set into
|
||||
* @nelements: The number of words in "src" array
|
||||
* @seq: A pointer to the given IN/OUT descriptor sequence
|
||||
* @seq_len: A pointer to the given IN/OUT sequence length
|
||||
*/
|
||||
void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
|
||||
unsigned int nelement, struct cc_hw_desc *seq,
|
||||
unsigned int *seq_len)
|
||||
{
|
||||
u32 i;
|
||||
unsigned int idx = *seq_len;
|
||||
|
||||
for (i = 0; i < nelement; i++, idx++) {
|
||||
hw_desc_init(&seq[idx]);
|
||||
set_din_const(&seq[idx], src[i], sizeof(u32));
|
||||
set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
|
||||
set_flow_mode(&seq[idx], BYPASS);
|
||||
}
|
||||
|
||||
*seq_len = idx;
|
||||
}
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_SRAM_MGR_H__
|
||||
#define __CC_SRAM_MGR_H__
|
||||
|
||||
#ifndef CC_CC_SRAM_SIZE
|
||||
#define CC_CC_SRAM_SIZE 4096
|
||||
#endif
|
||||
|
||||
struct cc_drvdata;
|
||||
|
||||
/**
|
||||
* Address (offset) within CC internal SRAM
|
||||
*/
|
||||
|
||||
typedef u64 cc_sram_addr_t;
|
||||
|
||||
#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
|
||||
|
||||
/*!
|
||||
* Initializes SRAM pool.
|
||||
* The first X bytes of SRAM are reserved for ROM usage, hence, pool
|
||||
* starts right after X bytes.
|
||||
*
|
||||
* \param drvdata
|
||||
*
|
||||
* \return int Zero for success, negative value otherwise.
|
||||
*/
|
||||
int cc_sram_mgr_init(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Uninits SRAM pool.
|
||||
*
|
||||
* \param drvdata
|
||||
*/
|
||||
void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
/*!
|
||||
* Allocated buffer from SRAM pool.
|
||||
* Note: Caller is responsible to free the LAST allocated buffer.
|
||||
* This function does not taking care of any fragmentation may occur
|
||||
* by the order of calls to alloc/free.
|
||||
*
|
||||
* \param drvdata
|
||||
* \param size The requested bytes to allocate
|
||||
*/
|
||||
cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
|
||||
|
||||
/**
|
||||
* cc_set_sram_desc() - Create const descriptors sequence to
|
||||
* set values in given array into SRAM.
|
||||
* Note: each const value can't exceed word size.
|
||||
*
|
||||
* @src: A pointer to array of words to set as consts.
|
||||
* @dst: The target SRAM buffer to set into
|
||||
* @nelements: The number of words in "src" array
|
||||
* @seq: A pointer to the given IN/OUT descriptor sequence
|
||||
* @seq_len: A pointer to the given IN/OUT sequence length
|
||||
*/
|
||||
void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
|
||||
unsigned int nelement, struct cc_hw_desc *seq,
|
||||
unsigned int *seq_len);
|
||||
|
||||
#endif /*__CC_SRAM_MGR_H__*/
|
Loading…
Reference in New Issue