dmaengine-4.20-rc6
dmaengine fixes for v4.20-rc6 - Fixing imx-sdma handling of channel terminations, this involves reverting two commits and implement async termination - Fix cppi dma channel deletion from pending list on stop - Fix FIFO size for dw controller in Intel Merrifield -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcCfKmAAoJEHwUBw8lI4NHiDUP/2dOZoPBnzbSXRYVr0PQY1Us 6aB3cClYtrLeGSKzSjiMgNqxRQ/VnMX3iZMmEseuLd9JNQXF48qWcH0HiPgWmoxy OgzcJxc/faAfFumpBnjM5Zm3+kzOWf0xPdVBkFKrJoLIMCZ4rU1bstEujzs+3ViE a8U2BoAADW6RWqjBSMjXGhRfmo0QxXz88wzzeV9l2gtFvgN32SlhD3+cHcRdV81K 23XbeVD7ivmUKPTZWA+4T7O7ShGH2wB4it9DpTgCMsPdrQkdncBTjYg5JwSfNJ4j vfQQO02ctqm9ipZacifc0VknISzMIPRY2F/PScCuUfHbDfyw10u4NHGcZDgRrXLz BvE624XXZoISmRzLvRxdayo1pPVyVzvkT5BvTcsMGx44SrO8fmTf/M/B4HMlj6F+ vCe+h6e77eY+FsIvZfrZiOYEp45OEHTT+kuv97kvLdfBl998CpP9K8B6aODIVCt2 YujqHUSE/Udato9H5jrAms0tZDQqSdTgQU95ZHOWrNn9+MPWRLZ9HUBF7Hmp9zA8 SbCDseUR1bc/WeW8ge/WAbS+KCL1wbwCXm5/t6JI7yXPn/eiVQgUMI71wphtiDFK hMSQX6oZy0vmblMiI7WjInZJn01+NigN7xiF2uxai4Xh7MvRbg9L6pFrgxc3wfB2 3JPfWLdIXp/Zcf/qlR2E =qeTx -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-4.20-rc6' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine fixes from Vinod Koul: "Another pull request for dmaengine. We got bunch of fixes early this week and all are tagged to stable. Hope this is last fix for this cycle: - Fix imx-sdma handling of channel terminations, this involves reverting two commits and implement async termination - Fix cppi dma channel deletion from pending list on stop - Fix FIFO size for dw controller in Intel Merrifield" * tag 'dmaengine-fix-4.20-rc6' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: dw: Fix FIFO size for Intel Merrifield dmaengine: cppi41: delete channel from pending list when stop channel dmaengine: imx-sdma: use GFP_NOWAIT for dma descriptor allocations dmaengine: imx-sdma: implement channel termination via worker Revert "dmaengine: imx-sdma: alloclate bd memory from dma pool" Revert "dmaengine: imx-sdma: Use GFP_NOWAIT for dma allocations"
This commit is contained in:
commit
c431b42058
|
@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
|
|||
/*
|
||||
* Program FIFO size of channels.
|
||||
*
|
||||
* By default full FIFO (1024 bytes) is assigned to channel 0. Here we
|
||||
* By default full FIFO (512 bytes) is assigned to channel 0. Here we
|
||||
* slice FIFO on equal parts between channels.
|
||||
*/
|
||||
static void idma32_fifo_partition(struct dw_dma *dw)
|
||||
{
|
||||
u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
|
||||
u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
|
||||
IDMA32C_FP_UPDATE;
|
||||
u64 fifo_partition = 0;
|
||||
|
||||
|
@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
|
|||
/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
|
||||
fifo_partition |= value << 32;
|
||||
|
||||
/* Program FIFO Partition registers - 128 bytes for each channel */
|
||||
/* Program FIFO Partition registers - 64 bytes per channel */
|
||||
idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
|
||||
idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -33,6 +32,7 @@
|
|||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <linux/platform_data/dma-imx-sdma.h>
|
||||
|
@ -376,7 +376,7 @@ struct sdma_channel {
|
|||
u32 shp_addr, per_addr;
|
||||
enum dma_status status;
|
||||
struct imx_dma_data data;
|
||||
struct dma_pool *bd_pool;
|
||||
struct work_struct terminate_worker;
|
||||
};
|
||||
|
||||
#define IMX_DMA_SG_LOOP BIT(0)
|
||||
|
@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
|
||||
static void sdma_channel_terminate_work(struct work_struct *work)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
|
||||
terminate_worker);
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
sdma_disable_channel(chan);
|
||||
spin_lock_irqsave(&sdmac->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&sdmac->vc, &head);
|
||||
sdmac->desc = NULL;
|
||||
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&sdmac->vc, &head);
|
||||
|
||||
/*
|
||||
* According to NXP R&D team a delay of one BD SDMA cost time
|
||||
* (maximum is 1ms) should be added after disable of the channel
|
||||
* bit, to ensure SDMA core has really been stopped after SDMA
|
||||
* clients call .device_terminate_all.
|
||||
*/
|
||||
mdelay(1);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
spin_lock_irqsave(&sdmac->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&sdmac->vc, &head);
|
||||
sdmac->desc = NULL;
|
||||
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&sdmac->vc, &head);
|
||||
}
|
||||
|
||||
static int sdma_disable_channel_async(struct dma_chan *chan)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
|
||||
sdma_disable_channel(chan);
|
||||
|
||||
if (sdmac->desc)
|
||||
schedule_work(&sdmac->terminate_worker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sdma_channel_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
|
||||
vchan_synchronize(&sdmac->vc);
|
||||
|
||||
flush_work(&sdmac->terminate_worker);
|
||||
}
|
||||
|
||||
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
|
||||
{
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
|
@ -1192,10 +1210,11 @@ out:
|
|||
|
||||
static int sdma_alloc_bd(struct sdma_desc *desc)
|
||||
{
|
||||
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
|
||||
int ret = 0;
|
||||
|
||||
desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
|
||||
&desc->bd_phys);
|
||||
desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
|
||||
GFP_NOWAIT);
|
||||
if (!desc->bd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1206,7 +1225,9 @@ out:
|
|||
|
||||
static void sdma_free_bd(struct sdma_desc *desc)
|
||||
{
|
||||
dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
|
||||
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
|
||||
|
||||
dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
|
||||
}
|
||||
|
||||
static void sdma_desc_free(struct virt_dma_desc *vd)
|
||||
|
@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
|
|||
if (ret)
|
||||
goto disable_clk_ahb;
|
||||
|
||||
sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
|
||||
sizeof(struct sdma_buffer_descriptor),
|
||||
32, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
disable_clk_ahb:
|
||||
|
@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
|||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
|
||||
sdma_disable_channel_with_delay(chan);
|
||||
sdma_disable_channel_async(chan);
|
||||
|
||||
sdma_channel_synchronize(chan);
|
||||
|
||||
if (sdmac->event_id0)
|
||||
sdma_event_disable(sdmac, sdmac->event_id0);
|
||||
|
@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
clk_disable(sdma->clk_ipg);
|
||||
clk_disable(sdma->clk_ahb);
|
||||
|
||||
dma_pool_destroy(sdmac->bd_pool);
|
||||
sdmac->bd_pool = NULL;
|
||||
}
|
||||
|
||||
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
|
||||
|
@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
|
||||
sdmac->channel = i;
|
||||
sdmac->vc.desc_free = sdma_desc_free;
|
||||
INIT_WORK(&sdmac->terminate_worker,
|
||||
sdma_channel_terminate_work);
|
||||
/*
|
||||
* Add the channel to the DMAC list. Do not add channel 0 though
|
||||
* because we need it internally in the SDMA driver. This also means
|
||||
|
@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
|
||||
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
|
||||
sdma->dma_device.device_config = sdma_config;
|
||||
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
|
||||
sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
|
||||
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
|
||||
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
|
||||
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
|
||||
sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
|
||||
|
|
|
@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
|
|||
|
||||
desc_phys = lower_32_bits(c->desc_phys);
|
||||
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
|
||||
if (!cdd->chan_busy[desc_num])
|
||||
if (!cdd->chan_busy[desc_num]) {
|
||||
struct cppi41_channel *cc, *_ct;
|
||||
|
||||
/*
|
||||
* channels might still be in the pendling list if
|
||||
* cppi41_dma_issue_pending() is called after
|
||||
* cppi41_runtime_suspend() is called
|
||||
*/
|
||||
list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
|
||||
if (cc != c)
|
||||
continue;
|
||||
list_del(&cc->node);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = cppi41_tear_down_chan(c);
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue