dmaengine fixes for v5.13
Bunch of driver fixes, notably: - More idxd fixes for driver unregister, error handling and bus assignment - HAS_IOMEM depends fix for few drivers - lock fix in pl330 driver - xilinx drivers fixes for initialize registers, missing dependencies and limiting descriptor IDs - mediatek descriptor management fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmDJ9UwACgkQfBQHDyUj g0fTYA/9ERo+2cohKCfxDdgjkvsGF1wdqFpUmg/ZGfcDyB3NEu2YKV8NhPR2+QCV LLDScP790bfOOtM8MKd3PrRD7Qx8Rdx6BH5Fdojwf1hoeLQ1sGUYVQEs2gcXxjM+ XM20mHQvgmbpOBVDdAPlxAlFA/0ERlKSFSVOwJRTtV7haUwZM7WE3krQmjGaJpsB MTTEVGOjVLUH3q0yxzd9DAOKPK3h/uLqHWQACxyRY4tmjIKE42SeCO63db0yXzbL FR1dh9/S7BGm2VO0mm+ByFnmygN+w7mBNzgAYBDECsibJAa2AZX2c9xI2h6LiI/W SKMp+NtW6yBwb4lUDQ56DY47Qzx45ZCTjmdq4+2DaHZV+ijF0rESudPDKYICdNep dawuJjAUfI7scbpZhuNO+oAmq3ZMbpq0K6yCEPsb18hHq6b7DGlZ+LZS7vv2XElB 0VywgAV1eV4p/GpCyLOtPjS1HCC4Lq2rtW/YaAByr7ZLF0q1RXMhtuthP/sjH8zj gys18nKSL6iBmc9dm02ITq/r5hXgjfWoqBvTQVfuTVv8vBZREQ/WalZEFSFaYtRJ 0HVn7S4H8CcsUcALrY8PSKfIIenTIsQtjpNdKdbKpuyxcjhjehjbfW13LgirgvBS F/LNhWIZBAje1prsvbdAJSyPt5lnxD7nVPfbG+LxIloygIBp+y4= =FL1v -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: "A bunch of driver fixes, notably: - More idxd fixes for driver unregister, error handling and bus assignment - HAS_IOMEM depends fix for few drivers - lock fix in pl330 driver - xilinx drivers fixes for initialize registers, missing dependencies and limiting descriptor IDs - mediatek descriptor management fixes" * tag 'dmaengine-fix-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: mediatek: use GFP_NOWAIT instead of GFP_ATOMIC in prep_dma dmaengine: mediatek: do not issue a new desc if one is still current dmaengine: mediatek: free the proper desc in desc_free handler dmaengine: ipu: fix doc warning in ipu_irq.c dmaengine: rcar-dmac: Fix PM reference leak in rcar_dmac_probe() dmaengine: idxd: Fix missing error code in idxd_cdev_open() dmaengine: stedma40: add missing iounmap() on error in d40_probe() dmaengine: SF_PDMA depends on HAS_IOMEM dmaengine: QCOM_HIDMA_MGMT depends on HAS_IOMEM dmaengine: ALTERA_MSGDMA depends on HAS_IOMEM dmaengine: idxd: Add missing cleanup for early error out in probe call dmaengine: xilinx: dpdma: Limit descriptor IDs to 16 bits dmaengine: xilinx: dpdma: Add missing dependencies to Kconfig dmaengine: stm32-mdma: fix PM reference leak in stm32_mdma_alloc_chan_resourc() dmaengine: zynqmp_dma: Fix PM reference leak in zynqmp_dma_alloc_chan_resourc() dmaengine: xilinx: dpdma: initialize registers before request_irq dmaengine: pl330: fix wrong usage of spinlock flags in dma_cyclc dmaengine: fsl-dpaa2-qdma: Fix error return code in two functions dmaengine: idxd: add missing dsa driver unregister dmaengine: idxd: add engine 'struct device' missing bus type assignment
This commit is contained in:
commit
6b00bc639f
|
@ -59,6 +59,7 @@ config DMA_OF
|
||||||
#devices
|
#devices
|
||||||
config ALTERA_MSGDMA
|
config ALTERA_MSGDMA
|
||||||
tristate "Altera / Intel mSGDMA Engine"
|
tristate "Altera / Intel mSGDMA Engine"
|
||||||
|
depends on HAS_IOMEM
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for Altera / Intel mSGDMA controller.
|
Enable support for Altera / Intel mSGDMA controller.
|
||||||
|
@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
|
||||||
|
|
||||||
config XILINX_ZYNQMP_DPDMA
|
config XILINX_ZYNQMP_DPDMA
|
||||||
tristate "Xilinx DPDMA Engine"
|
tristate "Xilinx DPDMA Engine"
|
||||||
|
depends on HAS_IOMEM && OF
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
help
|
help
|
||||||
|
|
|
@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
|
if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
|
||||||
|
err = -EINVAL;
|
||||||
dev_err(dev, "DPDMAI major version mismatch\n"
|
dev_err(dev, "DPDMAI major version mismatch\n"
|
||||||
"Found %u.%u, supported version is %u.%u\n",
|
"Found %u.%u, supported version is %u.%u\n",
|
||||||
priv->dpdmai_attr.version.major,
|
priv->dpdmai_attr.version.major,
|
||||||
|
@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
|
if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
|
||||||
|
err = -EINVAL;
|
||||||
dev_err(dev, "DPDMAI minor version mismatch\n"
|
dev_err(dev, "DPDMAI minor version mismatch\n"
|
||||||
"Found %u.%u, supported version is %u.%u\n",
|
"Found %u.%u, supported version is %u.%u\n",
|
||||||
priv->dpdmai_attr.version.major,
|
priv->dpdmai_attr.version.major,
|
||||||
|
@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
|
||||||
ppriv->store =
|
ppriv->store =
|
||||||
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
|
dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
|
||||||
if (!ppriv->store) {
|
if (!ppriv->store) {
|
||||||
|
err = -ENOMEM;
|
||||||
dev_err(dev, "dpaa2_io_store_create() failed\n");
|
dev_err(dev, "dpaa2_io_store_create() failed\n");
|
||||||
goto err_store;
|
goto err_store;
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||||
pasid = iommu_sva_get_pasid(sva);
|
pasid = iommu_sva_get_pasid(sva);
|
||||||
if (pasid == IOMMU_PASID_INVALID) {
|
if (pasid == IOMMU_PASID_INVALID) {
|
||||||
iommu_sva_unbind_device(sva);
|
iommu_sva_unbind_device(sva);
|
||||||
|
rc = -EINVAL;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = idxd->pdev;
|
||||||
|
struct idxd_irq_entry *irq_entry;
|
||||||
|
int i, msixcnt;
|
||||||
|
|
||||||
|
msixcnt = pci_msix_vec_count(pdev);
|
||||||
|
if (msixcnt <= 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
irq_entry = &idxd->irq_entries[0];
|
||||||
|
free_irq(irq_entry->vector, irq_entry);
|
||||||
|
|
||||||
|
for (i = 1; i < msixcnt; i++) {
|
||||||
|
|
||||||
|
irq_entry = &idxd->irq_entries[i];
|
||||||
|
if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
|
||||||
|
idxd_device_release_int_handle(idxd, idxd->int_handles[i],
|
||||||
|
IDXD_IRQ_MSIX);
|
||||||
|
free_irq(irq_entry->vector, irq_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
idxd_mask_error_interrupts(idxd);
|
||||||
|
pci_free_irq_vectors(pdev);
|
||||||
|
}
|
||||||
|
|
||||||
static int idxd_setup_wqs(struct idxd_device *idxd)
|
static int idxd_setup_wqs(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
|
||||||
engine->idxd = idxd;
|
engine->idxd = idxd;
|
||||||
device_initialize(&engine->conf_dev);
|
device_initialize(&engine->conf_dev);
|
||||||
engine->conf_dev.parent = &idxd->conf_dev;
|
engine->conf_dev.parent = &idxd->conf_dev;
|
||||||
|
engine->conf_dev.bus = &dsa_bus_type;
|
||||||
engine->conf_dev.type = &idxd_engine_device_type;
|
engine->conf_dev.type = &idxd_engine_device_type;
|
||||||
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
|
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
|
@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void idxd_cleanup_internals(struct idxd_device *idxd)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < idxd->max_groups; i++)
|
||||||
|
put_device(&idxd->groups[i]->conf_dev);
|
||||||
|
for (i = 0; i < idxd->max_engines; i++)
|
||||||
|
put_device(&idxd->engines[i]->conf_dev);
|
||||||
|
for (i = 0; i < idxd->max_wqs; i++)
|
||||||
|
put_device(&idxd->wqs[i]->conf_dev);
|
||||||
|
destroy_workqueue(idxd->wq);
|
||||||
|
}
|
||||||
|
|
||||||
static int idxd_setup_internals(struct idxd_device *idxd)
|
static int idxd_setup_internals(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||||
dev_dbg(dev, "Loading RO device config\n");
|
dev_dbg(dev, "Loading RO device config\n");
|
||||||
rc = idxd_device_load_config(idxd);
|
rc = idxd_device_load_config(idxd);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err;
|
goto err_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = idxd_setup_interrupts(idxd);
|
rc = idxd_setup_interrupts(idxd);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err;
|
goto err_config;
|
||||||
|
|
||||||
dev_dbg(dev, "IDXD interrupt setup complete.\n");
|
dev_dbg(dev, "IDXD interrupt setup complete.\n");
|
||||||
|
|
||||||
|
@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||||
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
|
dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_config:
|
||||||
|
idxd_cleanup_internals(idxd);
|
||||||
err:
|
err:
|
||||||
if (device_pasid_enabled(idxd))
|
if (device_pasid_enabled(idxd))
|
||||||
idxd_disable_system_pasid(idxd);
|
idxd_disable_system_pasid(idxd);
|
||||||
|
@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void idxd_cleanup(struct idxd_device *idxd)
|
||||||
|
{
|
||||||
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
|
||||||
|
perfmon_pmu_remove(idxd);
|
||||||
|
idxd_cleanup_interrupts(idxd);
|
||||||
|
idxd_cleanup_internals(idxd);
|
||||||
|
if (device_pasid_enabled(idxd))
|
||||||
|
idxd_disable_system_pasid(idxd);
|
||||||
|
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||||
|
}
|
||||||
|
|
||||||
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
|
@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
rc = idxd_register_devices(idxd);
|
rc = idxd_register_devices(idxd);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "IDXD sysfs setup failed\n");
|
dev_err(dev, "IDXD sysfs setup failed\n");
|
||||||
goto err;
|
goto err_dev_register;
|
||||||
}
|
}
|
||||||
|
|
||||||
idxd->state = IDXD_DEV_CONF_READY;
|
idxd->state = IDXD_DEV_CONF_READY;
|
||||||
|
@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_dev_register:
|
||||||
|
idxd_cleanup(idxd);
|
||||||
err:
|
err:
|
||||||
pci_iounmap(pdev, idxd->reg_base);
|
pci_iounmap(pdev, idxd->reg_base);
|
||||||
err_iomap:
|
err_iomap:
|
||||||
|
@ -787,6 +843,7 @@ module_init(idxd_init_module);
|
||||||
|
|
||||||
static void __exit idxd_exit_module(void)
|
static void __exit idxd_exit_module(void)
|
||||||
{
|
{
|
||||||
|
idxd_unregister_driver();
|
||||||
pci_unregister_driver(&idxd_pci_driver);
|
pci_unregister_driver(&idxd_pci_driver);
|
||||||
idxd_cdev_remove();
|
idxd_cdev_remove();
|
||||||
idxd_unregister_bus_type();
|
idxd_unregister_bus_type();
|
||||||
|
|
|
@ -230,7 +230,7 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipu_irq_map() - map an IPU interrupt source to an IRQ number
|
* ipu_irq_unmap() - unmap an IPU interrupt source
|
||||||
* @source: interrupt source bit position (see ipu_irq_map())
|
* @source: interrupt source bit position (see ipu_irq_map())
|
||||||
* @return: 0 or negative error code
|
* @return: 0 or negative error code
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
|
||||||
|
|
||||||
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
|
static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
|
||||||
{
|
{
|
||||||
struct dma_chan *chan = vd->tx.chan;
|
kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
|
||||||
struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
|
|
||||||
|
|
||||||
kfree(c->desc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
|
static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
|
||||||
|
@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
|
||||||
|
|
||||||
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
|
static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
|
||||||
{
|
{
|
||||||
struct mtk_uart_apdma_desc *d = c->desc;
|
|
||||||
|
|
||||||
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
|
mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
|
||||||
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
|
||||||
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
|
||||||
|
|
||||||
list_del(&d->vd.node);
|
|
||||||
vchan_cookie_complete(&d->vd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
|
static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
|
||||||
|
@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
|
||||||
|
|
||||||
c->rx_status = d->avail_len - cnt;
|
c->rx_status = d->avail_len - cnt;
|
||||||
mtk_uart_apdma_write(c, VFF_RPT, wg);
|
mtk_uart_apdma_write(c, VFF_RPT, wg);
|
||||||
|
}
|
||||||
|
|
||||||
list_del(&d->vd.node);
|
static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
|
||||||
vchan_cookie_complete(&d->vd);
|
{
|
||||||
|
struct mtk_uart_apdma_desc *d = c->desc;
|
||||||
|
|
||||||
|
if (d) {
|
||||||
|
list_del(&d->vd.node);
|
||||||
|
vchan_cookie_complete(&d->vd);
|
||||||
|
c->desc = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
|
static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
|
||||||
|
@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
|
||||||
mtk_uart_apdma_rx_handler(c);
|
mtk_uart_apdma_rx_handler(c);
|
||||||
else if (c->dir == DMA_MEM_TO_DEV)
|
else if (c->dir == DMA_MEM_TO_DEV)
|
||||||
mtk_uart_apdma_tx_handler(c);
|
mtk_uart_apdma_tx_handler(c);
|
||||||
|
mtk_uart_apdma_chan_complete_handler(c);
|
||||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Now allocate and setup the descriptor */
|
/* Now allocate and setup the descriptor */
|
||||||
d = kzalloc(sizeof(*d), GFP_ATOMIC);
|
d = kzalloc(sizeof(*d), GFP_NOWAIT);
|
||||||
if (!d)
|
if (!d)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&c->vc.lock, flags);
|
spin_lock_irqsave(&c->vc.lock, flags);
|
||||||
if (vchan_issue_pending(&c->vc)) {
|
if (vchan_issue_pending(&c->vc) && !c->desc) {
|
||||||
vd = vchan_next_desc(&c->vc);
|
vd = vchan_next_desc(&c->vc);
|
||||||
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
|
c->desc = to_mtk_uart_apdma_desc(&vd->tx);
|
||||||
|
|
||||||
|
|
|
@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||||
for (i = 0; i < len / period_len; i++) {
|
for (i = 0; i < len / period_len; i++) {
|
||||||
desc = pl330_get_desc(pch);
|
desc = pl330_get_desc(pch);
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
|
unsigned long iflags;
|
||||||
|
|
||||||
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
|
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
|
||||||
__func__, __LINE__);
|
__func__, __LINE__);
|
||||||
|
|
||||||
if (!first)
|
if (!first)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
spin_lock_irqsave(&pl330->pool_lock, flags);
|
spin_lock_irqsave(&pl330->pool_lock, iflags);
|
||||||
|
|
||||||
while (!list_empty(&first->node)) {
|
while (!list_empty(&first->node)) {
|
||||||
desc = list_entry(first->node.next,
|
desc = list_entry(first->node.next,
|
||||||
|
@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||||
|
|
||||||
list_move_tail(&first->node, &pl330->desc_pool);
|
list_move_tail(&first->node, &pl330->desc_pool);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&pl330->pool_lock, flags);
|
spin_unlock_irqrestore(&pl330->pool_lock, iflags);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ config QCOM_GPI_DMA
|
||||||
|
|
||||||
config QCOM_HIDMA_MGMT
|
config QCOM_HIDMA_MGMT
|
||||||
tristate "Qualcomm Technologies HIDMA Management support"
|
tristate "Qualcomm Technologies HIDMA Management support"
|
||||||
|
depends on HAS_IOMEM
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for the Qualcomm Technologies HIDMA Management.
|
Enable support for the Qualcomm Technologies HIDMA Management.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
config SF_PDMA
|
config SF_PDMA
|
||||||
tristate "Sifive PDMA controller driver"
|
tristate "Sifive PDMA controller driver"
|
||||||
|
depends on HAS_IOMEM
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
help
|
help
|
||||||
|
|
|
@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
/* Enable runtime PM and initialize the device. */
|
/* Enable runtime PM and initialize the device. */
|
||||||
pm_runtime_enable(&pdev->dev);
|
pm_runtime_enable(&pdev->dev);
|
||||||
ret = pm_runtime_get_sync(&pdev->dev);
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
kfree(base->lcla_pool.base_unaligned);
|
kfree(base->lcla_pool.base_unaligned);
|
||||||
|
|
||||||
|
if (base->lcpa_base)
|
||||||
|
iounmap(base->lcpa_base);
|
||||||
|
|
||||||
if (base->phy_lcpa)
|
if (base->phy_lcpa)
|
||||||
release_mem_region(base->phy_lcpa,
|
release_mem_region(base->phy_lcpa,
|
||||||
base->lcpa_size);
|
base->lcpa_size);
|
||||||
|
|
|
@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(dmadev->ddev.dev);
|
ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
|
||||||
u32 ccr, id;
|
u32 ccr, id;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(dev);
|
ret = pm_runtime_resume_and_get(dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -113,6 +113,7 @@
|
||||||
#define XILINX_DPDMA_CH_VDO 0x020
|
#define XILINX_DPDMA_CH_VDO 0x020
|
||||||
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
|
#define XILINX_DPDMA_CH_PYLD_SZ 0x024
|
||||||
#define XILINX_DPDMA_CH_DESC_ID 0x028
|
#define XILINX_DPDMA_CH_DESC_ID 0x028
|
||||||
|
#define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
|
||||||
|
|
||||||
/* DPDMA descriptor fields */
|
/* DPDMA descriptor fields */
|
||||||
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
|
#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
|
||||||
|
@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
||||||
* will be used, but it should be enough.
|
* will be used, but it should be enough.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(sw_desc, &desc->descriptors, node)
|
list_for_each_entry(sw_desc, &desc->descriptors, node)
|
||||||
sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
|
sw_desc->hw.desc_id = desc->vdesc.tx.cookie
|
||||||
|
& XILINX_DPDMA_CH_DESC_ID_MASK;
|
||||||
|
|
||||||
sw_desc = list_first_entry(&desc->descriptors,
|
sw_desc = list_first_entry(&desc->descriptors,
|
||||||
struct xilinx_dpdma_sw_desc, node);
|
struct xilinx_dpdma_sw_desc, node);
|
||||||
|
@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
|
||||||
if (!chan->running || !pending)
|
if (!chan->running || !pending)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
|
desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
|
||||||
|
& XILINX_DPDMA_CH_DESC_ID_MASK;
|
||||||
|
|
||||||
/* If the retrigger raced with vsync, retry at the next frame. */
|
/* If the retrigger raced with vsync, retry at the next frame. */
|
||||||
sw_desc = list_first_entry(&pending->descriptors,
|
sw_desc = list_first_entry(&pending->descriptors,
|
||||||
|
@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
|
||||||
*/
|
*/
|
||||||
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
|
static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
|
||||||
{
|
{
|
||||||
dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
|
dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
|
||||||
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
|
dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
|
||||||
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
|
return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
void __iomem *reg;
|
||||||
|
|
||||||
|
/* Disable all interrupts */
|
||||||
|
xilinx_dpdma_disable_irq(xdev);
|
||||||
|
|
||||||
|
/* Stop all channels */
|
||||||
|
for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
|
||||||
|
reg = xdev->reg + XILINX_DPDMA_CH_BASE
|
||||||
|
+ XILINX_DPDMA_CH_OFFSET * i;
|
||||||
|
dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clear the interrupt status registers */
|
||||||
|
dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
|
||||||
|
dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
|
||||||
|
}
|
||||||
|
|
||||||
static int xilinx_dpdma_probe(struct platform_device *pdev)
|
static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct xilinx_dpdma_device *xdev;
|
struct xilinx_dpdma_device *xdev;
|
||||||
|
@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
|
||||||
if (IS_ERR(xdev->reg))
|
if (IS_ERR(xdev->reg))
|
||||||
return PTR_ERR(xdev->reg);
|
return PTR_ERR(xdev->reg);
|
||||||
|
|
||||||
|
dpdma_hw_init(xdev);
|
||||||
|
|
||||||
xdev->irq = platform_get_irq(pdev, 0);
|
xdev->irq = platform_get_irq(pdev, 0);
|
||||||
if (xdev->irq < 0) {
|
if (xdev->irq < 0) {
|
||||||
dev_err(xdev->dev, "failed to get platform irq\n");
|
dev_err(xdev->dev, "failed to get platform irq\n");
|
||||||
|
|
|
@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
|
||||||
struct zynqmp_dma_desc_sw *desc;
|
struct zynqmp_dma_desc_sw *desc;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(chan->dev);
|
ret = pm_runtime_resume_and_get(chan->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue