crypto: qat - use pre-allocated buffers in datapath
In order to do DMAs, the QAT device requires that the scatterlist
structures are mapped and translated into a format that the firmware can
understand. This is defined as the composition of a scatter gather list
(SGL) descriptor header, the struct qat_alg_buf_list, plus a variable
number of flat buffer descriptors, the struct qat_alg_buf.
The allocation and mapping of these data structures is done each time a
request is received from the skcipher and aead APIs.
In an OOM situation, this behaviour might lead to a dead-lock if an
allocation fails.
Based on the conversation in [1], increase the size of the aead and
skcipher request contexts to include an SGL descriptor that can handle
a maximum of 4 flat buffers.
If requests exceed 4 entries buffers, memory is allocated dynamically.
[1] https://lore.kernel.org/linux-crypto/20200722072932.GA27544@gondor.apana.org.au/
Cc: stable@vger.kernel.org
Fixes: d370cec321
("crypto: qat - Intel(R) QAT crypto interface")
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
1731160ff7
commit
e0831e7af4
|
@ -46,19 +46,6 @@
|
|||
static DEFINE_MUTEX(algs_lock);
|
||||
static unsigned int active_devs;
|
||||
|
||||
struct qat_alg_buf {
|
||||
u32 len;
|
||||
u32 resrvd;
|
||||
u64 addr;
|
||||
} __packed;
|
||||
|
||||
struct qat_alg_buf_list {
|
||||
u64 resrvd;
|
||||
u32 num_bufs;
|
||||
u32 num_mapped_bufs;
|
||||
struct qat_alg_buf bufers[];
|
||||
} __packed __aligned(64);
|
||||
|
||||
/* Common content descriptor */
|
||||
struct qat_alg_cd {
|
||||
union {
|
||||
|
@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
|||
bl->bufers[i].len, DMA_BIDIRECTIONAL);
|
||||
|
||||
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
|
||||
kfree(bl);
|
||||
|
||||
if (!qat_req->buf.sgl_src_valid)
|
||||
kfree(bl);
|
||||
|
||||
if (blp != blpout) {
|
||||
/* If out of place operation dma unmap only data */
|
||||
int bufless = blout->num_bufs - blout->num_mapped_bufs;
|
||||
|
@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
|||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
|
||||
kfree(blout);
|
||||
|
||||
if (!qat_req->buf.sgl_dst_valid)
|
||||
kfree(blout);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|||
dma_addr_t blp = DMA_MAPPING_ERROR;
|
||||
dma_addr_t bloutp = DMA_MAPPING_ERROR;
|
||||
struct scatterlist *sg;
|
||||
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
|
||||
size_t sz_out, sz = struct_size(bufl, bufers, n);
|
||||
int node = dev_to_node(&GET_DEV(inst->accel_dev));
|
||||
|
||||
if (unlikely(!n))
|
||||
return -EINVAL;
|
||||
|
||||
bufl = kzalloc_node(sz, GFP_ATOMIC,
|
||||
dev_to_node(&GET_DEV(inst->accel_dev)));
|
||||
if (unlikely(!bufl))
|
||||
return -ENOMEM;
|
||||
qat_req->buf.sgl_src_valid = false;
|
||||
qat_req->buf.sgl_dst_valid = false;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
bufl = kzalloc_node(sz, GFP_ATOMIC, node);
|
||||
if (unlikely(!bufl))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
bufl = &qat_req->buf.sgl_src.sgl_hdr;
|
||||
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
|
||||
qat_req->buf.sgl_src_valid = true;
|
||||
}
|
||||
|
||||
for_each_sg(sgl, sg, n, i)
|
||||
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
||||
|
@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|||
struct qat_alg_buf *bufers;
|
||||
|
||||
n = sg_nents(sglout);
|
||||
sz_out = struct_size(buflout, bufers, n + 1);
|
||||
sz_out = struct_size(buflout, bufers, n);
|
||||
sg_nctr = 0;
|
||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
|
||||
dev_to_node(&GET_DEV(inst->accel_dev)));
|
||||
if (unlikely(!buflout))
|
||||
goto err_in;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
|
||||
if (unlikely(!buflout))
|
||||
goto err_in;
|
||||
} else {
|
||||
buflout = &qat_req->buf.sgl_dst.sgl_hdr;
|
||||
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
|
||||
qat_req->buf.sgl_dst_valid = true;
|
||||
}
|
||||
|
||||
bufers = buflout->bufers;
|
||||
for_each_sg(sglout, sg, n, i)
|
||||
|
@ -810,7 +817,9 @@ err_out:
|
|||
dma_unmap_single(dev, buflout->bufers[i].addr,
|
||||
buflout->bufers[i].len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
kfree(buflout);
|
||||
|
||||
if (!qat_req->buf.sgl_dst_valid)
|
||||
kfree(buflout);
|
||||
|
||||
err_in:
|
||||
if (!dma_mapping_error(dev, blp))
|
||||
|
@ -823,7 +832,8 @@ err_in:
|
|||
bufl->bufers[i].len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
kfree(bufl);
|
||||
if (!qat_req->buf.sgl_src_valid)
|
||||
kfree(bufl);
|
||||
|
||||
dev_err(dev, "Failed to map buf for dma\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -21,6 +21,26 @@ struct qat_crypto_instance {
|
|||
atomic_t refctr;
|
||||
};
|
||||
|
||||
#define QAT_MAX_BUFF_DESC 4
|
||||
|
||||
struct qat_alg_buf {
|
||||
u32 len;
|
||||
u32 resrvd;
|
||||
u64 addr;
|
||||
} __packed;
|
||||
|
||||
struct qat_alg_buf_list {
|
||||
u64 resrvd;
|
||||
u32 num_bufs;
|
||||
u32 num_mapped_bufs;
|
||||
struct qat_alg_buf bufers[];
|
||||
} __packed;
|
||||
|
||||
struct qat_alg_fixed_buf_list {
|
||||
struct qat_alg_buf_list sgl_hdr;
|
||||
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
|
||||
} __packed __aligned(64);
|
||||
|
||||
struct qat_crypto_request_buffs {
|
||||
struct qat_alg_buf_list *bl;
|
||||
dma_addr_t blp;
|
||||
|
@ -28,6 +48,10 @@ struct qat_crypto_request_buffs {
|
|||
dma_addr_t bloutp;
|
||||
size_t sz;
|
||||
size_t sz_out;
|
||||
bool sgl_src_valid;
|
||||
bool sgl_dst_valid;
|
||||
struct qat_alg_fixed_buf_list sgl_src;
|
||||
struct qat_alg_fixed_buf_list sgl_dst;
|
||||
};
|
||||
|
||||
struct qat_crypto_request;
|
||||
|
|
Loading…
Reference in New Issue