newfeature: crypto: ccp: Support SM4 algorithm for hygon ccp.

In order to add SM4 driver for hygon ccp, relating to sm4 mode of
ecb/ecb_hs, cbc/cbc_hs, cfb, ofb and ctr

Signed-off-by: Yabin Li <liyabin@hygon.cn>
Signed-off-by: yangdepei <yangdepei@hygon.cn>
This commit is contained in:
Yabin Li 2022-05-07 18:25:26 +08:00 committed by Jianping Liu
parent 46f6d0dbdb
commit f1897b7bd8
8 changed files with 818 additions and 2 deletions

View File

@ -25,4 +25,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-sha.o
ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \
ccp-crypto-sm3-hygon.o
ccp-crypto-sm3-hygon.o \
ccp-crypto-sm4-hygon.o

View File

@ -336,6 +336,10 @@ static int ccp_register_algs(void)
if (ret)
return ret;
ret = ccp_register_sm4_hygon_algs(&skcipher_algs);
if (ret)
return ret;
/* Return on hygon platform */
return 0;
}

View File

@ -0,0 +1,307 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support
*
* Copyright (C) 2022 Hygon Info Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
enum ccp_sm4_alg_mode {
CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB,
CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC,
CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB,
CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB,
CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR,
CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB,
CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC,
CCP_SM4_ALG_MODE__LAST,
};
static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req);
if (ret)
return ret;
if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) {
memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE);
memset(rctx->iv, 0, SM4_BLOCK_SIZE);
}
return 0;
}
static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
/* key_len is checked by crypto_ablkcipher_type,
* but key isn't checked
*/
if (!key)
return -EINVAL;
memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE);
sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE);
ctx->u.sm4.key_len = SM4_KEY_SIZE;
return 0;
}
static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
struct ccp_cmd *cmd = NULL;
enum ccp_sm4_alg_mode mode;
enum ccp_sm4_action action;
int ret;
if (!ctx->u.sm4.key_len)
return -ENOKEY;
mode = ctx->u.sm4.mode;
if ((mode != CCP_SM4_ALG_MODE_CTR) &&
(mode != CCP_SM4_ALG_MODE_OFB) &&
(mode != CCP_SM4_ALG_MODE_CFB) &&
(req->cryptlen & (SM4_BLOCK_SIZE - 1)))
return -EINVAL;
if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) {
if (!req->iv)
return -EINVAL;
memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE);
iv_sg = &rctx->iv_sg;
sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE);
}
cmd = &rctx->cmd;
memset(cmd, 0, sizeof(*cmd));
INIT_LIST_HEAD(&cmd->entry);
action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT;
if (mode == CCP_SM4_ALG_MODE_CTR) {
cmd->engine = CCP_ENGINE_SM4_CTR;
cmd->u.sm4_ctr.action = action;
cmd->u.sm4_ctr.size = 63;
cmd->u.sm4_ctr.step = 1;
cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg;
cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE;
cmd->u.sm4_ctr.iv = iv_sg;
cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE;
cmd->u.sm4_ctr.src = req->src;
cmd->u.sm4_ctr.dst = req->dst;
cmd->u.sm4_ctr.src_len = req->cryptlen;
} else {
cmd->engine = CCP_ENGINE_SM4;
cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK;
cmd->u.sm4.action = action;
if (mode & CCP_SM4_MODE_HS_SEL)
cmd->u.sm4.select = 1;
cmd->u.sm4.key = &ctx->u.sm4.key_sg;
cmd->u.sm4.key_len = SM4_KEY_SIZE;
cmd->u.sm4.iv = iv_sg;
cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0;
cmd->u.sm4.src = req->src;
cmd->u.sm4.dst = req->dst;
cmd->u.sm4.src_len = req->cryptlen;
}
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
}
static int ccp_sm4_encrypt(struct skcipher_request *req)
{
return ccp_sm4_crypt(req, true);
}
static int ccp_sm4_decrypt(struct skcipher_request *req)
{
return ccp_sm4_crypt(req, false);
}
static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm)
{
struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->complete = ccp_sm4_complete;
ctx->u.sm4.mode = alg->mode;
crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx));
return 0;
}
static const struct skcipher_alg ccp_sm4_defaults = {
.setkey = ccp_sm4_setkey,
.encrypt = ccp_sm4_encrypt,
.decrypt = ccp_sm4_decrypt,
.min_keysize = SM4_KEY_SIZE,
.max_keysize = SM4_KEY_SIZE,
.init = ccp_sm4_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = SM4_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ccp_ctx),
.base.cra_priority = CCP_CRA_PRIORITY,
.base.cra_module = THIS_MODULE,
};
struct ccp_sm4_def {
enum ccp_sm4_alg_mode mode;
unsigned int version;
const char *name;
const char *driver_name;
unsigned int blocksize;
unsigned int ivsize;
const struct skcipher_alg *alg_defaults;
};
static struct ccp_sm4_def sm4_algs[] = {
{
.mode = CCP_SM4_ALG_MODE_ECB,
.version = CCP_VERSION(5, 0),
.name = "ecb(sm4)",
.driver_name = "ecb-sm4-ccp",
.blocksize = SM4_BLOCK_SIZE,
.ivsize = 0,
.alg_defaults = &ccp_sm4_defaults,
},
{
.mode = CCP_SM4_ALG_MODE_ECB_HS,
.version = CCP_VERSION(5, 0),
.name = "ecb(sm4)",
.driver_name = "ecb-sm4-hs-ccp",
.blocksize = SM4_BLOCK_SIZE,
.ivsize = 0,
.alg_defaults = &ccp_sm4_defaults,
},
{
.mode = CCP_SM4_ALG_MODE_CBC,
.version = CCP_VERSION(5, 0),
.name = "cbc(sm4)",
.driver_name = "cbc-sm4-ccp",
.blocksize = SM4_BLOCK_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.alg_defaults = &ccp_sm4_defaults,
},
{
.mode = CCP_SM4_ALG_MODE_CBC_HS,
.version = CCP_VERSION(5, 0),
.name = "cbc(sm4)",
.driver_name = "cbc-sm4-hs-ccp",
.blocksize = SM4_BLOCK_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.alg_defaults = &ccp_sm4_defaults,
},
{
.mode = CCP_SM4_ALG_MODE_OFB,
.version = CCP_VERSION(5, 0),
.name = "ofb(sm4)",
.driver_name = "ofb-sm4-ccp",
.blocksize = SM4_BLOCK_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.alg_defaults = &ccp_sm4_defaults,
},
{
.mode = CCP_SM4_ALG_MODE_CFB,
.version = CCP_VERSION(5, 0),
.name = "cfb(sm4)",
.driver_name = "cfb-sm4-ccp",
.blocksize = SM4_BLOCK_SIZE,
.ivsize = SM4_BLOCK_SIZE,
.alg_defaults = &ccp_sm4_defaults,
},
{
.mode = CCP_SM4_ALG_MODE_CTR,
.version = CCP_VERSION(5, 0),
.name = "ctr(sm4)",
.driver_name = "ctr-sm4-ccp",
.blocksize = 1,
.ivsize = SM4_BLOCK_SIZE,
.alg_defaults = &ccp_sm4_defaults,
},
};
static int ccp_register_sm4_hygon_alg(struct list_head *head,
const struct ccp_sm4_def *def)
{
struct ccp_crypto_skcipher_alg *ccp_alg;
struct skcipher_alg *alg;
int ret;
ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
if (!ccp_alg)
return -ENOMEM;
INIT_LIST_HEAD(&ccp_alg->entry);
ccp_alg->mode = def->mode;
/* copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;
ret = crypto_register_skcipher(alg);
if (ret) {
pr_err("%s skcipher algorithm registration error (%d)\n",
alg->base.cra_name, ret);
kfree(ccp_alg);
return ret;
}
list_add(&ccp_alg->entry, head);
return 0;
}
int ccp_register_sm4_hygon_algs(struct list_head *head)
{
int i, ret;
unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) {
if (sm4_algs[i].version > ccpversion)
continue;
ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]);
if (ret)
return ret;
}
return 0;
}

View File

@ -342,6 +342,21 @@ struct ccp_sm3_exp_ctx {
u8 buf[SM3_BLOCK_SIZE];
};
/***** SM4 related defines *****/
struct ccp_sm4_ctx {
struct scatterlist key_sg;
u8 key[SM4_KEY_SIZE];
u32 key_len;
u32 mode;
};
struct ccp_sm4_req_ctx {
struct scatterlist iv_sg;
u8 iv[SM4_BLOCK_SIZE];
struct ccp_cmd cmd;
};
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
@ -353,6 +368,7 @@ struct ccp_ctx {
struct ccp_des3_ctx des3;
struct ccp_sm2_ctx sm2;
struct ccp_sm3_ctx sm3;
struct ccp_sm4_ctx sm4;
} u;
};
@ -370,5 +386,6 @@ int ccp_register_des3_algs(struct list_head *head);
int ccp_register_rsa_algs(struct list_head *head);
int ccp_register_sm2_hygon_algs(struct list_head *head);
int ccp_register_sm3_hygon_algs(struct list_head *head);
int ccp_register_sm4_hygon_algs(struct list_head *head);
#endif

View File

@ -141,6 +141,18 @@ union ccp_function {
u16 type:4;
u16 rsvd2:1;
} sm3;
struct {
u16 rsvd:7;
u16 encrypt:1;
u16 mode:4;
u16 select:1;
u16 rsvd2:2;
} sm4;
struct {
u16 size:7;
u16 encrypt:1;
u16 step:7;
} sm4_ctr;
u16 raw;
};
@ -164,6 +176,12 @@ union ccp_function {
#define CCP_SM2_RAND(p) ((p)->sm2.rand)
#define CCP_SM2_MODE(p) ((p)->sm2.mode)
#define CCP_SM3_TYPE(p) ((p)->sm3.type)
#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt)
#define CCP_SM4_MODE(p) ((p)->sm4.mode)
#define CCP_SM4_SELECT(p) ((p)->sm4.select)
#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt)
#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step)
#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size)
/* Word 0 */
#define CCP5_CMD_DW0(p) ((p)->dw0)
@ -672,6 +690,90 @@ static int ccp5_perform_sm3(struct ccp_op *op)
return ccp5_do_cmd(&desc, op->cmd_q);
}
static int ccp5_perform_sm4(struct ccp_op *op)
{
struct ccp5_desc desc;
union ccp_function function;
u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE;
op->cmd_q->total_sm4_ops++;
memset(&desc, 0, Q_DESC_SIZE);
CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4;
CCP5_CMD_SOC(&desc) = op->soc;
CCP5_CMD_IOC(&desc) = op->ioc;
CCP5_CMD_INIT(&desc) = op->init;
CCP5_CMD_EOM(&desc) = op->eom;
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
CCP_SM4_ENCRYPT(&function) = op->u.sm4.action;
CCP_SM4_MODE(&function) = op->u.sm4.mode;
CCP_SM4_SELECT(&function) = op->u.sm4.select;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
CCP5_CMD_KEY_HI(&desc) = 0;
CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
static int ccp5_perform_sm4_ctr(struct ccp_op *op)
{
struct ccp5_desc desc;
union ccp_function function;
u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE;
op->cmd_q->total_sm4_ctr_ops++;
memset(&desc, 0, Q_DESC_SIZE);
CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR;
CCP5_CMD_SOC(&desc) = op->soc;
CCP5_CMD_IOC(&desc) = op->ioc;
CCP5_CMD_INIT(&desc) = op->init;
CCP5_CMD_EOM(&desc) = op->eom;
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size;
CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action;
CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
CCP5_CMD_KEY_HI(&desc) = 0;
CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
return ccp5_do_cmd(&desc, op->cmd_q);
}
static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
{
int q_mask = 1 << cmd_q->id;
@ -1150,6 +1252,26 @@ static void ccp5_destroy(struct ccp_device *ccp)
}
}
static int ccp5_get_trng_mask_param(void)
{
/* According to spec description for SM4 high secure module,
* which need 64 bytes data, so the initialize times of writing
* mask register must be 16 or a multiple of 16.
*
* The AES algorithem need 48 bytes, so the initialize times will
* be 12 or a multiple of 12.
*/
#ifdef CONFIG_HYGON_GM
/* for sm4 HS */
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return 16;
#endif
/* for AES HS */
return 12;
}
static void ccp5_config(struct ccp_device *ccp)
{
/* Public side */
@ -1160,12 +1282,13 @@ static void ccp5other_config(struct ccp_device *ccp)
{
int i;
u32 rnd;
int len = ccp5_get_trng_mask_param();
/* We own all of the queues on the NTB CCP */
iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
for (i = 0; i < 12; i++) {
for (i = 0; i < len; i++) {
rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
}
@ -1193,6 +1316,8 @@ static const struct ccp_actions ccp5_actions = {
.ecc = ccp5_perform_ecc,
.sm2 = ccp5_perform_sm2,
.sm3 = ccp5_perform_sm3,
.sm4 = ccp5_perform_sm4,
.sm4_ctr = ccp5_perform_sm4_ctr,
.sballoc = ccp_lsb_alloc,
.sbfree = ccp_lsb_free,
.init = ccp5_init,

View File

@ -336,6 +336,8 @@ struct ccp_cmd_queue {
unsigned long total_ecc_ops;
unsigned long total_sm2_ops;
unsigned long total_sm3_ops;
unsigned long total_sm4_ops;
unsigned long total_sm4_ctr_ops;
} ____cacheline_aligned;
struct ccp_device {
@ -540,6 +542,18 @@ struct ccp_sm3_op {
u64 msg_bits;
};
struct ccp_sm4_op {
enum ccp_sm4_action action;
enum ccp_sm4_mode mode;
u32 select;
};
struct ccp_sm4_ctr_op {
u32 size;
enum ccp_sm4_action action;
u32 step;
};
struct ccp_op {
struct ccp_cmd_queue *cmd_q;
@ -565,6 +579,8 @@ struct ccp_op {
struct ccp_ecc_op ecc;
struct ccp_sm2_op sm2;
struct ccp_sm3_op sm3;
struct ccp_sm4_op sm4;
struct ccp_sm4_ctr_op sm4_ctr;
} u;
};
@ -675,6 +691,8 @@ struct ccp_actions {
int (*ecc)(struct ccp_op *);
int (*sm2)(struct ccp_op *op);
int (*sm3)(struct ccp_op *op);
int (*sm4)(struct ccp_op *op);
int (*sm4_ctr)(struct ccp_op *op);
u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
unsigned int (*get_free_slots)(struct ccp_cmd_queue *);

View File

@ -2708,6 +2708,230 @@ e_ctx:
return ret;
}
static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_sm4_engine *sm4 = &cmd->u.sm4;
struct ccp_dm_workarea iv_key;
struct ccp_data src, dst;
struct ccp_op op;
bool in_place = false;
int ret;
if (sm4->src == NULL || sm4->dst == NULL)
return -EINVAL;
if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE)
return -EINVAL;
if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0)
return -EINVAL;
if (sm4->mode != CCP_SM4_MODE_ECB) {
if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE)
return -EINVAL;
if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0)
return -EINVAL;
}
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.ioc = 1;
op.sb_ctx = cmd_q->sb_ctx;
op.u.sm4.action = sm4->action;
op.u.sm4.mode = sm4->mode;
op.u.sm4.select = sm4->select;
/* Prepare the input and output data workareas. For in-place
* operations we need to set the dma direction to BIDIRECTIONAL
* and copy the src workarea to the dst workarea.
*/
if (sg_virt(sm4->src) == sg_virt(sm4->dst))
in_place = true;
ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len,
SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
if (ret)
return ret;
if (in_place) {
dst = src;
} else {
ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len,
SM4_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
/* load iv and key */
ret = ccp_init_dm_workarea(&iv_key, cmd_q,
SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
if (sm4->mode != CCP_SM4_MODE_ECB)
ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE);
ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE);
ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_iv_key;
}
/* send data to the CCP SM4 engine */
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true);
if (!src.sg_wa.bytes_left)
op.eom = 1;
ret = cmd_q->ccp->vdata->perform->sm4(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_iv_key;
}
ccp_process_data(&src, &dst, &op);
}
if (sm4->mode != CCP_SM4_MODE_ECB) {
/* retrieve the SM4 iv */
ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_iv_key;
}
ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE);
}
e_iv_key:
memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE);
ccp_dm_free(&iv_key);
e_dst:
if (!in_place)
ccp_free_data(&dst, cmd_q);
e_src:
ccp_free_data(&src, cmd_q);
return ret;
}
static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr;
struct ccp_dm_workarea iv_key;
struct ccp_data src, dst;
struct ccp_op op;
bool in_place = false;
int ret;
if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL)
return -EINVAL;
if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE)
return -EINVAL;
if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0)
return -EINVAL;
if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE)
return -EINVAL;
if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0)
return -EINVAL;
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.ioc = 1;
op.sb_ctx = cmd_q->sb_ctx;
op.u.sm4_ctr.size = sm4_ctr->size;
op.u.sm4_ctr.action = sm4_ctr->action;
op.u.sm4_ctr.step = sm4_ctr->step;
/* Prepare the input and output data workareas. For in-place
* operations we need to set the dma direction to BIDIRECTIONAL
* and copy the src workarea to the dst workarea.
*/
if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst))
in_place = true;
ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len,
SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
if (ret)
return ret;
if (in_place) {
dst = src;
} else {
ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst,
sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
/* load iv and key */
ret = ccp_init_dm_workarea(&iv_key, cmd_q,
SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE);
ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE);
ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_iv_key;
}
/* send data to the CCP SM4_CTR engine */
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false);
if (!src.sg_wa.bytes_left)
op.eom = 1;
ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_iv_key;
}
ccp_process_data(&src, &dst, &op);
}
/* retrieve the SM4_CTR iv */
ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_iv_key;
}
ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE);
e_iv_key:
memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE);
ccp_dm_free(&iv_key);
e_dst:
if (!in_place)
ccp_free_data(&dst, cmd_q);
e_src:
ccp_free_data(&src, cmd_q);
return ret;
}
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
int ret;
@ -2758,6 +2982,12 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
case CCP_ENGINE_SM3:
ret = ccp_run_sm3_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_SM4:
ret = ccp_run_sm4_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_SM4_CTR:
ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd);
break;
default:
ret = -EINVAL;
}

View File

@ -675,6 +675,116 @@ struct ccp_sm3_engine {
u64 msg_bits;
};
/***** SM4 engine *****/
#define SM4_BLOCK_SIZE 16
#define SM4_KEY_SIZE 16
#define CCP_SM4_MODE_MASK 0x0F
#define CCP_SM4_MODE_HS_SEL 0x10
/**
* ccp_sm4_mode - SM4 operation mode
*
* @CCP_SM4_MODE_ECB: ECB mode
* @CCP_SM4_MODE_CBC: CBC mode
* @CCP_SM4_MODE_OFB: OFB mode
* @CCP_SM4_MODE_CFB: CFB mode
* @CCP_SM4_MODE_CTR: CTR mode
*/
enum ccp_sm4_mode {
CCP_SM4_MODE_ECB = 0,
CCP_SM4_MODE_CBC,
CCP_SM4_MODE_OFB,
CCP_SM4_MODE_CFB,
CCP_SM4_MODE_CTR,
CCP_SM4_MODE__LAST,
};
/**
* ccp_sm4_action - SM4 operation
*
* @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation
* @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation
*/
enum ccp_sm4_action {
CCP_SM4_ACTION_DECRYPT = 0,
CCP_SM4_ACTION_ENCRYPT,
CCP_SM4_ACTION__LAST,
};
/**
* struct ccp_sm4_engine - CCP SM4 operation
* @mode: SM4 operation mode
* @action: SM4 operation (decrypt/encrypt)
* @select: Indicating that high-secure engine is selected
* @key: key to be used for this SM4 operation
* @key_len: length in bytes of key
* @iv: IV to be used for this SM4 operation
* @iv_len: length in bytes of iv
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - mode, action, select, key, key_len, src, dst, src_len
* - iv, iv_len for any mode other than ECB
* - key_len and iv_len must be 16B
* - src_len must be multiple of 16B
* - high-secure engine only for ECB and CBC mode
*
* The iv variable is used as both input and output. On completion of the
* SM4 operation the new IV overwrites the old IV.
*/
struct ccp_sm4_engine {
enum ccp_sm4_mode mode;
enum ccp_sm4_action action;
u32 select; /* Indicating that high-secure engine is selected */
struct scatterlist *key;
u32 key_len; /* In bytes */
struct scatterlist *iv;
u32 iv_len; /* In bytes */
struct scatterlist *src, *dst;
u64 src_len; /* In bytes */
};
/***** SM4_CTR engine *****/
/**
* struct ccp_sm4_ctr_engine - CCP SM4_CTR operation
* @action: SM4_CTR operation (decrypt/encrypt)
* @size: counter bit size
* @step: counter increase step
* @key: key to be used for this SM4 operation
* @key_len: length in bytes of key
* @iv: IV to be used for this SM4 operation
* @iv_len: length in bytes of iv
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - action, size, step, key, key_len, iv, iv_len, src, dst, src_len
* - key_len and iv_len must be 16B
*
* The iv variable is used as both input and output. On completion of the
* SM4_CTR operation the new IV overwrites the old IV.
*/
struct ccp_sm4_ctr_engine {
enum ccp_sm4_action action;
u32 size;
u32 step;
struct scatterlist *key;
u32 key_len; /* In bytes */
struct scatterlist *iv;
u32 iv_len; /* In bytes */
struct scatterlist *src, *dst;
u64 src_len; /* In bytes */
};
/**
* ccp_engine - CCP operation identifiers
*
@ -700,6 +810,8 @@ enum ccp_engine {
CCP_ENGINE_ECC,
CCP_ENGINE_SM2 = 8, /* fixed value */
CCP_ENGINE_SM3,
CCP_ENGINE_SM4,
CCP_ENGINE_SM4_CTR,
CCP_ENGINE__LAST,
};
@ -750,6 +862,8 @@ struct ccp_cmd {
struct ccp_ecc_engine ecc;
struct ccp_sm2_engine sm2;
struct ccp_sm3_engine sm3;
struct ccp_sm4_engine sm4;
struct ccp_sm4_ctr_engine sm4_ctr;
} u;
/* Completion callback support */