dmaengine: Use dma_sg_len(sg) instead of sg->length

sg->length may or may not contain the length of the dma region to transfer,
depending on the architecture - dma_sg_len(sg) always will though. For the
architectures which use the drivers modified by this patch it probably is the
case that sg->length contains the dma transfer length. But to be consistent and
future proof change them to use dma_sg_len.

To quote Russel King:
	sg->length is meaningless to something performing DMA.

	In cases where sg_dma_len(sg) and sg->length are the same storage, then
	there's no problem. But scatterlists _can_ (and one some architectures) do
	split them - especially when you have an IOMMU which can allow you to
	combine a scatterlist into fewer entries.

	So, anything using sg->length for the size of a scatterlist's DMA transfer
	_after_ a call to dma_map_sg() is almost certainly buggy.

The patch has been generated using the following coccinelle patch:
<smpl>
@@
struct scatterlist *sg;
expression X;
@@
-sg[X].length
+sg_dma_len(&sg[X])
@@
struct scatterlist *sg;
@@
-sg->length
+sg_dma_len(sg)
</smpl>

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
This commit is contained in:
Lars-Peter Clausen 2012-04-25 20:50:52 +02:00 committed by Vinod Koul
parent cbb796ccd8
commit fdaf9c4b22
7 changed files with 16 additions and 16 deletions

View File

@ -1328,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
int ret, tmp; int ret, tmp;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
__func__, sgl->length, plchan->name); __func__, sg_dma_len(sgl), plchan->name);
txd = pl08x_get_txd(plchan, flags); txd = pl08x_get_txd(plchan, flags);
if (!txd) { if (!txd) {

View File

@ -1040,7 +1040,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sgl) if (!sgl)
goto out; goto out;
if (sgl->length == 0) if (sg_dma_len(sgl) == 0)
goto out; goto out;
spin_lock_irqsave(&cohc->lock, flg); spin_lock_irqsave(&cohc->lock, flg);

View File

@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
struct scatterlist *sg = d->sg; struct scatterlist *sg = d->sg;
unsigned long now; unsigned long now;
now = min(d->len, sg->length); now = min(d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP) if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now; d->len -= now;
@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
dma_length += sg->length; dma_length += sg_dma_len(sg);
} }
switch (imxdmac->word_size) { switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES: case DMA_SLAVE_BUSWIDTH_4_BYTES:
if (sgl->length & 3 || sgl->dma_address & 3) if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
return NULL; return NULL;
break; break;
case DMA_SLAVE_BUSWIDTH_2_BYTES: case DMA_SLAVE_BUSWIDTH_2_BYTES:
if (sgl->length & 1 || sgl->dma_address & 1) if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
return NULL; return NULL;
break; break;
case DMA_SLAVE_BUSWIDTH_1_BYTE: case DMA_SLAVE_BUSWIDTH_1_BYTE:
@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[i].page_link = 0; imxdmac->sg_list[i].page_link = 0;
imxdmac->sg_list[i].offset = 0; imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr; imxdmac->sg_list[i].dma_address = dma_addr;
imxdmac->sg_list[i].length = period_len; sg_dma_len(&imxdmac->sg_list[i]) = period_len;
dma_addr += period_len; dma_addr += period_len;
} }
/* close the loop */ /* close the loop */
imxdmac->sg_list[periods].offset = 0; imxdmac->sg_list[periods].offset = 0;
imxdmac->sg_list[periods].length = 0; sg_dma_len(&imxdmac->sg_list[periods]) = 0;
imxdmac->sg_list[periods].page_link = imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;

View File

@ -941,7 +941,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd->buffer_addr = sg->dma_address; bd->buffer_addr = sg->dma_address;
count = sg->length; count = sg_dma_len(sg);
if (count > 0xffff) { if (count > 0xffff) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",

View File

@ -394,7 +394,7 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
} }
} }
/*Populate CTL_HI values*/ /*Populate CTL_HI values*/
ctl_hi.ctlx.block_ts = get_block_ts(sg->length, ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
desc->width, desc->width,
midc->dma->block_size); midc->dma->block_size);
/*Populate SAR and DAR values*/ /*Populate SAR and DAR values*/
@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
txd = intel_mid_dma_prep_memcpy(chan, txd = intel_mid_dma_prep_memcpy(chan,
mids->dma_slave.dst_addr, mids->dma_slave.dst_addr,
mids->dma_slave.src_addr, mids->dma_slave.src_addr,
sgl->length, sg_dma_len(sgl),
flags); flags);
return txd; return txd;
} else { } else {
@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
sg_len, direction, flags); sg_len, direction, flags);
txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
if (NULL == txd) { if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n"); pr_err("MDMA: Prep memcpy failed\n");
return NULL; return NULL;

View File

@ -415,9 +415,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
} else { } else {
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
if (sg->length > MAX_XFER_BYTES) { if (sg_dma_len(sg) > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
sg->length, MAX_XFER_BYTES); sg_dma_len(sg), MAX_XFER_BYTES);
goto err_out; goto err_out;
} }
@ -425,7 +425,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
ccw->bufaddr = sg->dma_address; ccw->bufaddr = sg->dma_address;
ccw->xfer_bytes = sg->length; ccw->xfer_bytes = sg_dma_len(sg);
ccw->bits = 0; ccw->bits = 0;
ccw->bits |= CCW_CHAIN; ccw->bits |= CCW_CHAIN;

View File

@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
} }
sg[periods].offset = 0; sg[periods].offset = 0;
sg[periods].length = 0; sg_dma_len(&sg[periods]) = 0;
sg[periods].page_link = sg[periods].page_link =
((unsigned long)sg | 0x01) & ~0x02; ((unsigned long)sg | 0x01) & ~0x02;