dmaengine fixes for v5.7-rc4
Core: - Documentation typo fixes - fix the channel indexes - Dmatest: fixes for process hang and iterations Drivers: - hisilicon: build error fix without PCI_MSI - ti-k3: deadlock fix - uniphier-xdmac: fix for reg region - pch: fix data race - tegra: fix clock state -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAl6tUikACgkQfBQHDyUj g0dGcw//ZIU2KARtc+IpMEPlId15gDuNXYfiC5c8yqGlGy7bfj853XecDjGWSWa3 9Ouj1mZL2szGudzSKvQwIWXQNlf0nYTH1QvKJVRoiO9uiwrY9tse5hKd247lJzVh EH4PvZpk0gTXZcV4eM3LrQZV+lxJLnrJIlnmk4m8/HGtAa0hLXZpFEDJnwsLaVVL CLeJoPZpbDWEfmITdB622yAgnHMqSlra0jwBwhfNXtf22MkLGliHLveB0/IxWOal QR0r9b/9SGSBtxAYBq5BakgP0HuArKuoNyYw6C9Ve8w3Gq+enEPKiY+OPNN59zTQ 86x9o3l0Zp6Bs0xoQEi1Ut8Iate5LEeJw47LfGvQZdDbg+h7CcbIoqEaLUfVmchz shwEoX1vPQehomJqmdolktxsBjYfYCpmPhp0c4vLNuNdVycAbS0x/sEAWc6HdP77 f3BU5yk1FahHc9y7z7RQfdukD3eAFcjpv6Lk4xbpJcy0/6TPpYEznk6XU82c4iJZ LjYAxwUIibMhvjgJ12Ik9kEHP2or1wUNRg0wWLRI83ko+r6gXlemZH2mLOE04xz+ kFxKnNWdCFKnTRMQtCa0+Y7TSuZEm7Gcep952hWgRu5+wAtEMWreub31QIRyuAj+ fqKrpUzQtAQly4ToOYj0Ns5w87berbkhJG+fmHX/kgukdYWh104= =py+J -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine fixes from Vinod Koul: "Core: - Documentation typo fixes - fix the channel indexes - dmatest: fixes for process hang and iterations Drivers: - hisilicon: build error fix without PCI_MSI - ti-k3: deadlock fix - uniphier-xdmac: fix for reg region - pch: fix data race - tegra: fix clock state" * tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: dmatest: Fix process hang when reading 'wait' parameter dmaengine: dmatest: Fix iteration non-stop logic dmaengine: tegra-apb: Ensure that clock is enabled during of DMA synchronization dmaengine: fix channel index enumeration dmaengine: mmp_tdma: Reset channel error on release dmaengine: mmp_tdma: Do not ignore slave config validation errors dmaengine: pch_dma.c: Avoid data race between probe and irq handler dt-bindings: dma: uniphier-xdmac: switch to single reg region include/linux/dmaengine: Typos fixes in API documentation dmaengine: xilinx_dma: Add missing check for empty list dmaengine: ti: k3-psil: fix deadlock on error path dmaengine: hisilicon: Fix build error without PCI_MSI
This commit is contained in:
commit
ed6889db63
|
@ -22,9 +22,7 @@ properties:
|
|||
const: socionext,uniphier-xdmac
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: XDMAC base register region (offset and length)
|
||||
- description: XDMAC extension register region (offset and length)
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
@ -49,12 +47,13 @@ required:
|
|||
- reg
|
||||
- interrupts
|
||||
- "#dma-cells"
|
||||
- dma-channels
|
||||
|
||||
examples:
|
||||
- |
|
||||
xdmac: dma-controller@5fc10000 {
|
||||
compatible = "socionext,uniphier-xdmac";
|
||||
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
|
||||
reg = <0x5fc10000 0x5300>;
|
||||
interrupts = <0 188 4>;
|
||||
#dma-cells = <2>;
|
||||
dma-channels = <16>;
|
||||
|
|
|
@ -241,7 +241,8 @@ config FSL_RAID
|
|||
|
||||
config HISI_DMA
|
||||
tristate "HiSilicon DMA Engine support"
|
||||
depends on ARM64 || (COMPILE_TEST && PCI_MSI)
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
depends on PCI_MSI
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
|
|
|
@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
|
|||
struct dma_chan_dev *chan_dev;
|
||||
|
||||
chan_dev = container_of(dev, typeof(*chan_dev), device);
|
||||
if (atomic_dec_and_test(chan_dev->idr_ref)) {
|
||||
ida_free(&dma_ida, chan_dev->dev_id);
|
||||
kfree(chan_dev->idr_ref);
|
||||
}
|
||||
kfree(chan_dev);
|
||||
}
|
||||
|
||||
|
@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
|
|||
}
|
||||
|
||||
static int __dma_async_device_channel_register(struct dma_device *device,
|
||||
struct dma_chan *chan,
|
||||
int chan_id)
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
int rc = 0;
|
||||
int chancnt = device->chancnt;
|
||||
atomic_t *idr_ref;
|
||||
struct dma_chan *tchan;
|
||||
|
||||
tchan = list_first_entry_or_null(&device->channels,
|
||||
struct dma_chan, device_node);
|
||||
if (!tchan)
|
||||
return -ENODEV;
|
||||
|
||||
if (tchan->dev) {
|
||||
idr_ref = tchan->dev->idr_ref;
|
||||
} else {
|
||||
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
|
||||
if (!idr_ref)
|
||||
return -ENOMEM;
|
||||
atomic_set(idr_ref, 0);
|
||||
}
|
||||
|
||||
chan->local = alloc_percpu(typeof(*chan->local));
|
||||
if (!chan->local)
|
||||
|
@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||
* When the chan_id is a negative value, we are dynamically adding
|
||||
* the channel. Otherwise we are static enumerating.
|
||||
*/
|
||||
chan->chan_id = chan_id < 0 ? chancnt : chan_id;
|
||||
mutex_lock(&device->chan_mutex);
|
||||
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
if (chan->chan_id < 0) {
|
||||
pr_err("%s: unable to alloc ida for chan: %d\n",
|
||||
__func__, chan->chan_id);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
chan->dev->device.class = &dma_devclass;
|
||||
chan->dev->device.parent = device->dev;
|
||||
chan->dev->chan = chan;
|
||||
chan->dev->idr_ref = idr_ref;
|
||||
chan->dev->dev_id = device->dev_id;
|
||||
atomic_inc(idr_ref);
|
||||
dev_set_name(&chan->dev->device, "dma%dchan%d",
|
||||
device->dev_id, chan->chan_id);
|
||||
|
||||
rc = device_register(&chan->dev->device);
|
||||
if (rc)
|
||||
goto err_out;
|
||||
goto err_out_ida;
|
||||
chan->client_count = 0;
|
||||
device->chancnt = chan->chan_id + 1;
|
||||
device->chancnt++;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_ida:
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
err_out:
|
||||
free_percpu(chan->local);
|
||||
kfree(chan->dev);
|
||||
if (atomic_dec_return(idr_ref) == 0)
|
||||
kfree(idr_ref);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
|
|||
{
|
||||
int rc;
|
||||
|
||||
rc = __dma_async_device_channel_register(device, chan, -1);
|
||||
rc = __dma_async_device_channel_register(device, chan);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
|
@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
|||
device->chancnt--;
|
||||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
device_unregister(&chan->dev->device);
|
||||
free_percpu(chan->local);
|
||||
}
|
||||
|
@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
|
|||
*/
|
||||
int dma_async_device_register(struct dma_device *device)
|
||||
{
|
||||
int rc, i = 0;
|
||||
int rc;
|
||||
struct dma_chan* chan;
|
||||
|
||||
if (!device)
|
||||
|
@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
|
|||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
mutex_init(&device->chan_mutex);
|
||||
ida_init(&device->chan_ida);
|
||||
|
||||
/* represent channels in sysfs. Probably want devs too */
|
||||
list_for_each_entry(chan, &device->channels, device_node) {
|
||||
rc = __dma_async_device_channel_register(device, chan, i++);
|
||||
rc = __dma_async_device_channel_register(device, chan);
|
||||
if (rc < 0)
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
|
|||
*/
|
||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||
dma_channel_rebalance();
|
||||
ida_free(&dma_ida, device->dev_id);
|
||||
dma_device_put(device);
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
}
|
||||
|
|
|
@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
|
|||
struct dmatest_thread *thread;
|
||||
|
||||
list_for_each_entry(thread, &dtc->threads, node) {
|
||||
if (!thread->done)
|
||||
if (!thread->done && !thread->pending)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -662,8 +662,8 @@ static int dmatest_func(void *data)
|
|||
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
|
||||
ktime = ktime_get();
|
||||
while (!kthread_should_stop()
|
||||
&& !(params->iterations && total_tests >= params->iterations)) {
|
||||
while (!(kthread_should_stop() ||
|
||||
(params->iterations && total_tests >= params->iterations))) {
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
struct dmaengine_unmap_data *um;
|
||||
dma_addr_t *dsts;
|
||||
|
|
|
@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
|||
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
|
||||
size);
|
||||
tdmac->desc_arr = NULL;
|
||||
if (tdmac->status == DMA_ERROR)
|
||||
tdmac->status = DMA_COMPLETE;
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
|
|||
if (!desc)
|
||||
goto err_out;
|
||||
|
||||
mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
|
||||
if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
|
||||
goto err_out;
|
||||
|
||||
while (buf < buf_len) {
|
||||
desc = &tdmac->desc_arr[i];
|
||||
|
|
|
@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pd->dma.dev = &pdev->dev;
|
||||
|
||||
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
|
||||
if (err) {
|
||||
|
@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
|
|||
goto err_free_irq;
|
||||
}
|
||||
|
||||
pd->dma.dev = &pdev->dev;
|
||||
|
||||
INIT_LIST_HEAD(&pd->dma.channels);
|
||||
|
||||
|
|
|
@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
|
|||
static void tegra_dma_synchronize(struct dma_chan *dc)
|
||||
{
|
||||
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||||
int err;
|
||||
|
||||
err = pm_runtime_get_sync(tdc->tdma->dev);
|
||||
if (err < 0) {
|
||||
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU, which handles interrupt, could be busy in
|
||||
|
@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
|
|||
wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
|
||||
|
||||
tasklet_kill(&tdc->tasklet);
|
||||
|
||||
pm_runtime_put(tdc->tdma->dev);
|
||||
}
|
||||
|
||||
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
|
||||
|
|
|
@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
|
|||
soc_ep_map = &j721e_ep_map;
|
||||
} else {
|
||||
pr_err("PSIL: No compatible machine found for map\n");
|
||||
mutex_unlock(&ep_map_mutex);
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
|
||||
|
|
|
@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
|
|||
return ret;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
|
||||
desc = list_last_entry(&chan->active_list,
|
||||
struct xilinx_dma_tx_descriptor, node);
|
||||
/*
|
||||
* VDMA and simple mode do not support residue reporting, so the
|
||||
* residue field will always be 0.
|
||||
*/
|
||||
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
|
||||
residue = xilinx_dma_get_residue(chan, desc);
|
||||
|
||||
if (!list_empty(&chan->active_list)) {
|
||||
desc = list_last_entry(&chan->active_list,
|
||||
struct xilinx_dma_tx_descriptor, node);
|
||||
/*
|
||||
* VDMA and simple mode do not support residue reporting, so the
|
||||
* residue field will always be 0.
|
||||
*/
|
||||
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
|
||||
residue = xilinx_dma_get_residue(chan, desc);
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
dma_set_residue(txstate, residue);
|
||||
|
|
|
@ -83,9 +83,9 @@ enum dma_transfer_direction {
|
|||
/**
|
||||
* Interleaved Transfer Request
|
||||
* ----------------------------
|
||||
* A chunk is collection of contiguous bytes to be transfered.
|
||||
* A chunk is collection of contiguous bytes to be transferred.
|
||||
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
|
||||
* ICGs may or maynot change between chunks.
|
||||
* ICGs may or may not change between chunks.
|
||||
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
|
||||
* that when repeated an integral number of times, specifies the transfer.
|
||||
* A transfer template is specification of a Frame, the number of times
|
||||
|
@ -341,13 +341,11 @@ struct dma_chan {
|
|||
* @chan: driver channel device
|
||||
* @device: sysfs device
|
||||
* @dev_id: parent dma_device dev_id
|
||||
* @idr_ref: reference count to gate release of dma_device dev_id
|
||||
*/
|
||||
struct dma_chan_dev {
|
||||
struct dma_chan *chan;
|
||||
struct device device;
|
||||
int dev_id;
|
||||
atomic_t *idr_ref;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -835,6 +833,8 @@ struct dma_device {
|
|||
int dev_id;
|
||||
struct device *dev;
|
||||
struct module *owner;
|
||||
struct ida chan_ida;
|
||||
struct mutex chan_mutex; /* to protect chan_ida */
|
||||
|
||||
u32 src_addr_widths;
|
||||
u32 dst_addr_widths;
|
||||
|
@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
|||
* dmaengine_synchronize() needs to be called before it is safe to free
|
||||
* any memory that is accessed by previously submitted descriptors or before
|
||||
* freeing any resources accessed from within the completion callback of any
|
||||
* perviously submitted descriptors.
|
||||
* previously submitted descriptors.
|
||||
*
|
||||
* This function can be called from atomic context as well as from within a
|
||||
* complete callback of a descriptor submitted on the same channel.
|
||||
|
@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
|
|||
*
|
||||
* Synchronizes to the DMA channel termination to the current context. When this
|
||||
* function returns it is guaranteed that all transfers for previously issued
|
||||
* descriptors have stopped and and it is safe to free the memory assoicated
|
||||
* descriptors have stopped and it is safe to free the memory associated
|
||||
* with them. Furthermore it is guaranteed that all complete callback functions
|
||||
* for a previously submitted descriptor have finished running and it is safe to
|
||||
* free resources accessed from within the complete callbacks.
|
||||
|
|
Loading…
Reference in New Issue