dmaengine: xgene-dma: Remove memcpy offload support due to performance drop
The DMA engine supports memory copy, RAID5 XOR, RAID6 PQ, and other computations. But the bandwidth of the entire DMA engine is shared among all channels. This patch re-configures operations availability such that one can achieve maximum performance for XOR and PQ computation by removing the memory offload operations. Signed-off-by: Rameshwar Prasad Sahu <rsahu@apm.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
parent
5ec9555ed0
commit
e6d5bf6a8f
|
@ -907,60 +907,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
|
||||||
chan->desc_pool = NULL;
|
chan->desc_pool = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
|
|
||||||
struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
|
|
||||||
size_t len, unsigned long flags)
|
|
||||||
{
|
|
||||||
struct xgene_dma_desc_sw *first = NULL, *new;
|
|
||||||
struct xgene_dma_chan *chan;
|
|
||||||
size_t copy;
|
|
||||||
|
|
||||||
if (unlikely(!dchan || !len))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
chan = to_dma_chan(dchan);
|
|
||||||
|
|
||||||
do {
|
|
||||||
/* Allocate the link descriptor from DMA pool */
|
|
||||||
new = xgene_dma_alloc_descriptor(chan);
|
|
||||||
if (!new)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
/* Create the largest transaction possible */
|
|
||||||
copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
|
|
||||||
|
|
||||||
/* Prepare DMA descriptor */
|
|
||||||
xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
|
|
||||||
|
|
||||||
if (!first)
|
|
||||||
first = new;
|
|
||||||
|
|
||||||
new->tx.cookie = 0;
|
|
||||||
async_tx_ack(&new->tx);
|
|
||||||
|
|
||||||
/* Update metadata */
|
|
||||||
len -= copy;
|
|
||||||
dst += copy;
|
|
||||||
src += copy;
|
|
||||||
|
|
||||||
/* Insert the link descriptor to the LD ring */
|
|
||||||
list_add_tail(&new->node, &first->tx_list);
|
|
||||||
} while (len);
|
|
||||||
|
|
||||||
new->tx.flags = flags; /* client is in control of this ack */
|
|
||||||
new->tx.cookie = -EBUSY;
|
|
||||||
list_splice(&first->tx_list, &new->tx_list);
|
|
||||||
|
|
||||||
return &new->tx;
|
|
||||||
|
|
||||||
fail:
|
|
||||||
if (!first)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
xgene_dma_free_desc_list(chan, &first->tx_list);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
|
static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
|
||||||
struct dma_chan *dchan, struct scatterlist *dst_sg,
|
struct dma_chan *dchan, struct scatterlist *dst_sg,
|
||||||
u32 dst_nents, struct scatterlist *src_sg,
|
u32 dst_nents, struct scatterlist *src_sg,
|
||||||
|
@ -1717,7 +1663,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
|
||||||
dma_cap_zero(dma_dev->cap_mask);
|
dma_cap_zero(dma_dev->cap_mask);
|
||||||
|
|
||||||
/* Set DMA device capability */
|
/* Set DMA device capability */
|
||||||
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
|
||||||
dma_cap_set(DMA_SG, dma_dev->cap_mask);
|
dma_cap_set(DMA_SG, dma_dev->cap_mask);
|
||||||
|
|
||||||
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
|
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
|
||||||
|
@ -1744,7 +1689,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
|
||||||
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
|
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
|
||||||
dma_dev->device_issue_pending = xgene_dma_issue_pending;
|
dma_dev->device_issue_pending = xgene_dma_issue_pending;
|
||||||
dma_dev->device_tx_status = xgene_dma_tx_status;
|
dma_dev->device_tx_status = xgene_dma_tx_status;
|
||||||
dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
|
|
||||||
dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
|
dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
|
||||||
|
|
||||||
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
||||||
|
@ -1797,8 +1741,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
|
||||||
|
|
||||||
/* DMA capability info */
|
/* DMA capability info */
|
||||||
dev_info(pdma->dev,
|
dev_info(pdma->dev,
|
||||||
"%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
|
"%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
|
||||||
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
|
|
||||||
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
|
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
|
||||||
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
|
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
|
||||||
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
|
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
|
||||||
|
|
Loading…
Reference in New Issue