crypto: qat - honor CRYPTO_TFM_REQ_MAY_SLEEP flag
If a request has the flag CRYPTO_TFM_REQ_MAY_SLEEP set, allocate memory using the flag GFP_KERNEL otherwise use GFP_ATOMIC. Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Adam Guerin <adam.guerin@intel.com> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
2acbb8771f
commit
8fb203c65a
|
@ -703,7 +703,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
|||
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
struct scatterlist *sgl,
|
||||
struct scatterlist *sglout,
|
||||
struct qat_crypto_request *qat_req)
|
||||
struct qat_crypto_request *qat_req,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct device *dev = &GET_DEV(inst->accel_dev);
|
||||
int i, sg_nctr = 0;
|
||||
|
@ -723,7 +724,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|||
qat_req->buf.sgl_dst_valid = false;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
bufl = kzalloc_node(sz, GFP_ATOMIC, node);
|
||||
bufl = kzalloc_node(sz, flags, node);
|
||||
if (unlikely(!bufl))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
|
@ -765,7 +766,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|||
sg_nctr = 0;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
|
||||
buflout = kzalloc_node(sz_out, flags, node);
|
||||
if (unlikely(!buflout))
|
||||
goto err_in;
|
||||
} else {
|
||||
|
@ -966,6 +967,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
|||
struct icp_qat_fw_la_auth_req_params *auth_param;
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
int digst_size = crypto_aead_authsize(aead_tfm);
|
||||
gfp_t f = qat_algs_alloc_flags(&areq->base);
|
||||
int ret;
|
||||
u32 cipher_len;
|
||||
|
||||
|
@ -973,7 +975,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
|||
if (cipher_len % AES_BLOCK_SIZE != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
@ -1008,6 +1010,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
|
|||
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
struct icp_qat_fw_la_auth_req_params *auth_param;
|
||||
gfp_t f = qat_algs_alloc_flags(&areq->base);
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
u8 *iv = areq->iv;
|
||||
int ret;
|
||||
|
@ -1015,7 +1018,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
|
|||
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
@ -1193,13 +1196,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
|
|||
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
gfp_t f = qat_algs_alloc_flags(&req->base);
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
int ret;
|
||||
|
||||
if (req->cryptlen == 0)
|
||||
return 0;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
@ -1258,13 +1262,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
|
|||
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
gfp_t f = qat_algs_alloc_flags(&req->base);
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
int ret;
|
||||
|
||||
if (req->cryptlen == 0)
|
||||
return 0;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -224,9 +224,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
|||
struct qat_asym_request *qat_req =
|
||||
PTR_ALIGN(kpp_request_ctx(req), 64);
|
||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||
int ret;
|
||||
gfp_t flags = qat_algs_alloc_flags(&req->base);
|
||||
int n_input_params = 0;
|
||||
u8 *vaddr;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ctx->xa))
|
||||
return -EINVAL;
|
||||
|
@ -291,7 +292,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
|||
} else {
|
||||
int shift = ctx->p_size - req->src_len;
|
||||
|
||||
qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
|
||||
qat_req->src_align = kzalloc(ctx->p_size, flags);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
|
@ -317,7 +318,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
|||
qat_req->dst_align = NULL;
|
||||
vaddr = sg_virt(req->dst);
|
||||
} else {
|
||||
qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
|
||||
qat_req->dst_align = kzalloc(ctx->p_size, flags);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
|
||||
|
@ -650,6 +651,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
|||
struct qat_asym_request *qat_req =
|
||||
PTR_ALIGN(akcipher_request_ctx(req), 64);
|
||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||
gfp_t flags = qat_algs_alloc_flags(&req->base);
|
||||
u8 *vaddr;
|
||||
int ret;
|
||||
|
||||
|
@ -696,7 +698,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
|||
} else {
|
||||
int shift = ctx->key_sz - req->src_len;
|
||||
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
|
@ -714,7 +716,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
|||
qat_req->dst_align = NULL;
|
||||
vaddr = sg_virt(req->dst);
|
||||
} else {
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
vaddr = qat_req->dst_align;
|
||||
|
@ -783,6 +785,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
|||
struct qat_asym_request *qat_req =
|
||||
PTR_ALIGN(akcipher_request_ctx(req), 64);
|
||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||
gfp_t flags = qat_algs_alloc_flags(&req->base);
|
||||
u8 *vaddr;
|
||||
int ret;
|
||||
|
||||
|
@ -839,7 +842,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
|||
} else {
|
||||
int shift = ctx->key_sz - req->src_len;
|
||||
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
|
@ -857,7 +860,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
|||
qat_req->dst_align = NULL;
|
||||
vaddr = sg_virt(req->dst);
|
||||
} else {
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
vaddr = qat_req->dst_align;
|
||||
|
|
|
@ -109,4 +109,9 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
|
||||
{
|
||||
return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue