dmaengine updates for v5.16-rc1
Updates: - Another pile of idxd updates - pm routines cleanup for at_xdmac driver - Correct handling of callback_result for few drivers - zynqmp_dma driver updates and descriptor management refinement - Hardware handshaking support for dw-axi-dmac - Support for remotely powered controllers in Qcom bam dma - tegra driver updates -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmGLtLMACgkQfBQHDyUj g0dZKA/+P+OiIoQ3SHvEARL1JCIVKB1aWfIq4RIwQLcEInfL+P461csjld0hBAAJ oHZxVhE5PGKJ2qgZWyrhBe+5pX1RZm3U5rkNCA/qvEWQPJPukTP2Wvo28uDmCadz PjQ/KfX/OPf+uYiKxxPDwgoFeWbNX0hTIjRqxd0/roclcOLvNGbpDj8KEBYvvh0/ kzpmn3SMoA/ak3Y7e3AUHQGN8hDEtncETzNojkF8KcNwu3LYjgWqd+PPj7VQYJ8c NCaen7iPHYS8ZstgPU4bXaET/yuByv9PLm4Mw11R6y8dUdL/BjD9RRM6KL81dZvS /dxtBdequpCod+2Uf0/OOlxUsPYaGyJ3M5NSNAx8Iqz7CX4UkiiQyC5Px5PfBkC/ BSCxP2D3E9BQ7k/uKO0pDXVpQoLiAGKZAPoONApG3/Y/hgwi2VifmoOte1nvzJx8 6AGngwHbSyUq8E3M2mIRhpO+3sH58f8cRLOqrxSHxWGq2C9yI3oFqh4n7jW1pDUh 8DEDTV4JtNF5gxuDi9tM6dqJ5cZvdcQ5ITRjXj4CjTMdd4P4D7hJzxR3OyKyUFiQ DqtW9UL0vJdufdE04IL29V5p746XkMe++o0Op912kDUIGSH8vuR9GMs9qvMriysw 6PZ+DOzlMfdkWc654A8W4vcmqY5JF5xOM00SYznGnruUu/RCNh4= =eqOs -----END PGP SIGNATURE----- Merge tag 'dmaengine-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine updates from Vinod Koul: "A bunch of driver updates, no new driver or controller support this time though: - Another pile of idxd updates - pm routines cleanup for at_xdmac driver - Correct handling of callback_result for few drivers - zynqmp_dma driver updates and descriptor management refinement - Hardware handshaking support for dw-axi-dmac - Support for remotely powered controllers in Qcom bam dma - tegra driver updates" * tag 'dmaengine-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (69 commits) dmaengine: ti: k3-udma: Set r/tchan or rflow to NULL if request fail dmaengine: ti: k3-udma: Set bchan to NULL if a channel request fail dmaengine: stm32-dma: avoid 64-bit division in stm32_dma_get_max_width dmaengine: fsl-edma: support edma memcpy dmaengine: idxd: fix resource leak on dmaengine driver disable dmaengine: idxd: cleanup completion record allocation dmaengine: zynqmp_dma: Correctly handle descriptor callbacks dmaengine: xilinx_dma: Correctly handle cyclic descriptor callbacks dmaengine: altera-msgdma: Correctly handle descriptor callbacks dmaengine: at_xdmac: fix compilation warning dmaengine: dw-axi-dmac: Simplify assignment in dma_chan_pause() dmaengine: qcom: bam_dma: Add "powered remotely" mode dt-bindings: dmaengine: bam_dma: Add "powered remotely" mode dmaengine: sa11x0: Mark PM functions as __maybe_unused dmaengine: switch from 'pci_' to 'dma_' API dmaengine: ioat: switch from 'pci_' to 'dma_' API dmaengine: hsu: switch from 'pci_' to 'dma_' API dmaengine: hisi_dma: switch from 'pci_' to 'dma_' API dmaengine: dw: switch from 'pci_' to 'dma_' API dmaengine: dw-edma-pcie: switch from 'pci_' to 'dma_' API ...
This commit is contained in:
commit
e68a7d35bb
|
@ -15,6 +15,8 @@ Required properties:
|
||||||
the secure world.
|
the secure world.
|
||||||
- qcom,controlled-remotely : optional, indicates that the bam is controlled by
|
- qcom,controlled-remotely : optional, indicates that the bam is controlled by
|
||||||
remote proccessor i.e. execution environment.
|
remote proccessor i.e. execution environment.
|
||||||
|
- qcom,powered-remotely : optional, indicates that the bam is powered up by
|
||||||
|
a remote processor but must be initialized by the local processor.
|
||||||
- num-channels : optional, indicates supported number of DMA channels in a
|
- num-channels : optional, indicates supported number of DMA channels in a
|
||||||
remotely controlled bam.
|
remotely controlled bam.
|
||||||
- qcom,num-ees : optional, indicates supported number of Execution Environments
|
- qcom,num-ees : optional, indicates supported number of Execution Environments
|
||||||
|
|
|
@ -717,7 +717,7 @@ config XILINX_DMA
|
||||||
|
|
||||||
config XILINX_ZYNQMP_DMA
|
config XILINX_ZYNQMP_DMA
|
||||||
tristate "Xilinx ZynqMP DMA Engine"
|
tristate "Xilinx ZynqMP DMA Engine"
|
||||||
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
|
depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for Xilinx ZynqMP DMA controller.
|
Enable support for Xilinx ZynqMP DMA controller.
|
||||||
|
|
|
@ -585,16 +585,14 @@ static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
|
||||||
struct msgdma_sw_desc *desc, *next;
|
struct msgdma_sw_desc *desc, *next;
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
|
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
|
||||||
dma_async_tx_callback callback;
|
struct dmaengine_desc_callback cb;
|
||||||
void *callback_param;
|
|
||||||
|
|
||||||
list_del(&desc->node);
|
list_del(&desc->node);
|
||||||
|
|
||||||
callback = desc->async_tx.callback;
|
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||||
callback_param = desc->async_tx.callback_param;
|
if (dmaengine_desc_callback_valid(&cb)) {
|
||||||
if (callback) {
|
|
||||||
spin_unlock(&mdev->lock);
|
spin_unlock(&mdev->lock);
|
||||||
callback(callback_param);
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||||
spin_lock(&mdev->lock);
|
spin_lock(&mdev->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@
|
||||||
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
|
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
|
||||||
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
|
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
|
||||||
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
|
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
|
||||||
#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
|
#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
|
||||||
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
|
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
|
||||||
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
|
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
|
||||||
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
|
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
|
||||||
|
@ -1926,8 +1926,31 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
static void at_xdmac_axi_config(struct platform_device *pdev)
|
||||||
static int atmel_xdmac_prepare(struct device *dev)
|
{
|
||||||
|
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
|
||||||
|
bool dev_m2m = false;
|
||||||
|
u32 dma_requests;
|
||||||
|
|
||||||
|
if (!atxdmac->layout->axi_config)
|
||||||
|
return; /* Not supported */
|
||||||
|
|
||||||
|
if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
|
||||||
|
&dma_requests)) {
|
||||||
|
dev_info(&pdev->dev, "controller in mem2mem mode.\n");
|
||||||
|
dev_m2m = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev_m2m) {
|
||||||
|
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
|
||||||
|
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
|
||||||
|
} else {
|
||||||
|
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
|
||||||
|
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
||||||
struct dma_chan *chan, *_chan;
|
struct dma_chan *chan, *_chan;
|
||||||
|
@ -1941,12 +1964,8 @@ static int atmel_xdmac_prepare(struct device *dev)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
# define atmel_xdmac_prepare NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
|
||||||
static int atmel_xdmac_suspend(struct device *dev)
|
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
||||||
struct dma_chan *chan, *_chan;
|
struct dma_chan *chan, *_chan;
|
||||||
|
@ -1970,11 +1989,12 @@ static int atmel_xdmac_suspend(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int atmel_xdmac_resume(struct device *dev)
|
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
|
||||||
struct at_xdmac_chan *atchan;
|
struct at_xdmac_chan *atchan;
|
||||||
struct dma_chan *chan, *_chan;
|
struct dma_chan *chan, *_chan;
|
||||||
|
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1982,6 +2002,8 @@ static int atmel_xdmac_resume(struct device *dev)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
at_xdmac_axi_config(pdev);
|
||||||
|
|
||||||
/* Clear pending interrupts. */
|
/* Clear pending interrupts. */
|
||||||
for (i = 0; i < atxdmac->dma.chancnt; i++) {
|
for (i = 0; i < atxdmac->dma.chancnt; i++) {
|
||||||
atchan = &atxdmac->chan[i];
|
atchan = &atxdmac->chan[i];
|
||||||
|
@ -2005,31 +2027,6 @@ static int atmel_xdmac_resume(struct device *dev)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PM_SLEEP */
|
|
||||||
|
|
||||||
static void at_xdmac_axi_config(struct platform_device *pdev)
|
|
||||||
{
|
|
||||||
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
|
|
||||||
bool dev_m2m = false;
|
|
||||||
u32 dma_requests;
|
|
||||||
|
|
||||||
if (!atxdmac->layout->axi_config)
|
|
||||||
return; /* Not supported */
|
|
||||||
|
|
||||||
if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
|
|
||||||
&dma_requests)) {
|
|
||||||
dev_info(&pdev->dev, "controller in mem2mem mode.\n");
|
|
||||||
dev_m2m = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dev_m2m) {
|
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
|
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
|
|
||||||
} else {
|
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
|
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int at_xdmac_probe(struct platform_device *pdev)
|
static int at_xdmac_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
@ -2210,7 +2207,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
|
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
|
||||||
.prepare = atmel_xdmac_prepare,
|
.prepare = atmel_xdmac_prepare,
|
||||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
|
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
|
||||||
};
|
};
|
||||||
|
@ -2234,7 +2231,7 @@ static struct platform_driver at_xdmac_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "at_xdmac",
|
.name = "at_xdmac",
|
||||||
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
|
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
|
||||||
.pm = &atmel_xdmac_dev_pm_ops,
|
.pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -133,7 +133,7 @@ void bcom_ata_reset_bd(struct bcom_task *tsk)
|
||||||
struct bcom_ata_var *var;
|
struct bcom_ata_var *var;
|
||||||
|
|
||||||
/* Reset all BD */
|
/* Reset all BD */
|
||||||
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||||||
|
|
||||||
tsk->index = 0;
|
tsk->index = 0;
|
||||||
tsk->outdex = 0;
|
tsk->outdex = 0;
|
||||||
|
|
|
@ -95,7 +95,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
|
||||||
tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
|
tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
|
||||||
if (!tsk->bd)
|
if (!tsk->bd)
|
||||||
goto error;
|
goto error;
|
||||||
memset(tsk->bd, 0x00, bd_count * bd_size);
|
memset_io(tsk->bd, 0x00, bd_count * bd_size);
|
||||||
|
|
||||||
tsk->num_bd = bd_count;
|
tsk->num_bd = bd_count;
|
||||||
tsk->bd_size = bd_size;
|
tsk->bd_size = bd_size;
|
||||||
|
@ -186,16 +186,16 @@ bcom_load_image(int task, u32 *task_image)
|
||||||
inc = bcom_task_inc(task);
|
inc = bcom_task_inc(task);
|
||||||
|
|
||||||
/* Clear & copy */
|
/* Clear & copy */
|
||||||
memset(var, 0x00, BCOM_VAR_SIZE);
|
memset_io(var, 0x00, BCOM_VAR_SIZE);
|
||||||
memset(inc, 0x00, BCOM_INC_SIZE);
|
memset_io(inc, 0x00, BCOM_INC_SIZE);
|
||||||
|
|
||||||
desc_src = (u32 *)(hdr + 1);
|
desc_src = (u32 *)(hdr + 1);
|
||||||
var_src = desc_src + hdr->desc_size;
|
var_src = desc_src + hdr->desc_size;
|
||||||
inc_src = var_src + hdr->var_size;
|
inc_src = var_src + hdr->var_size;
|
||||||
|
|
||||||
memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
|
memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32));
|
||||||
memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
|
memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
|
||||||
memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
|
memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -302,13 +302,13 @@ static int bcom_engine_init(void)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(bcom_eng->tdt, 0x00, tdt_size);
|
memset_io(bcom_eng->tdt, 0x00, tdt_size);
|
||||||
memset(bcom_eng->ctx, 0x00, ctx_size);
|
memset_io(bcom_eng->ctx, 0x00, ctx_size);
|
||||||
memset(bcom_eng->var, 0x00, var_size);
|
memset_io(bcom_eng->var, 0x00, var_size);
|
||||||
memset(bcom_eng->fdt, 0x00, fdt_size);
|
memset_io(bcom_eng->fdt, 0x00, fdt_size);
|
||||||
|
|
||||||
/* Copy the FDT for the EU#3 */
|
/* Copy the FDT for the EU#3 */
|
||||||
memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
|
memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
|
||||||
|
|
||||||
/* Initialize Task base structure */
|
/* Initialize Task base structure */
|
||||||
for (task=0; task<BCOM_MAX_TASKS; task++)
|
for (task=0; task<BCOM_MAX_TASKS; task++)
|
||||||
|
|
|
@ -140,7 +140,7 @@ bcom_fec_rx_reset(struct bcom_task *tsk)
|
||||||
tsk->index = 0;
|
tsk->index = 0;
|
||||||
tsk->outdex = 0;
|
tsk->outdex = 0;
|
||||||
|
|
||||||
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||||||
|
|
||||||
/* Configure some stuff */
|
/* Configure some stuff */
|
||||||
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
|
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
|
||||||
|
@ -241,7 +241,7 @@ bcom_fec_tx_reset(struct bcom_task *tsk)
|
||||||
tsk->index = 0;
|
tsk->index = 0;
|
||||||
tsk->outdex = 0;
|
tsk->outdex = 0;
|
||||||
|
|
||||||
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||||||
|
|
||||||
/* Configure some stuff */
|
/* Configure some stuff */
|
||||||
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
|
bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
|
||||||
|
|
|
@ -142,7 +142,7 @@ bcom_gen_bd_rx_reset(struct bcom_task *tsk)
|
||||||
tsk->index = 0;
|
tsk->index = 0;
|
||||||
tsk->outdex = 0;
|
tsk->outdex = 0;
|
||||||
|
|
||||||
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||||||
|
|
||||||
/* Configure some stuff */
|
/* Configure some stuff */
|
||||||
bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
|
bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
|
||||||
|
@ -226,7 +226,7 @@ bcom_gen_bd_tx_reset(struct bcom_task *tsk)
|
||||||
tsk->index = 0;
|
tsk->index = 0;
|
||||||
tsk->outdex = 0;
|
tsk->outdex = 0;
|
||||||
|
|
||||||
memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
|
||||||
|
|
||||||
/* Configure some stuff */
|
/* Configure some stuff */
|
||||||
bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
|
bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
|
||||||
|
|
|
@ -915,6 +915,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
||||||
dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
|
dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
|
||||||
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||||
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||||
|
dd->max_sg_burst = JZ_DMA_MAX_DESC;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable DMA controller, mark all channels as not programmable.
|
* Enable DMA controller, mark all channels as not programmable.
|
||||||
|
|
|
@ -695,13 +695,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
|
||||||
*/
|
*/
|
||||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
|
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
int err = -EBUSY;
|
|
||||||
|
|
||||||
/* lock against __dma_request_channel */
|
/* lock against __dma_request_channel */
|
||||||
mutex_lock(&dma_list_mutex);
|
mutex_lock(&dma_list_mutex);
|
||||||
|
|
||||||
if (chan->client_count == 0) {
|
if (chan->client_count == 0) {
|
||||||
struct dma_device *device = chan->device;
|
struct dma_device *device = chan->device;
|
||||||
|
int err;
|
||||||
|
|
||||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||||
device->privatecnt++;
|
device->privatecnt++;
|
||||||
|
|
|
@ -176,7 +176,7 @@ dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
|
||||||
static inline bool
|
static inline bool
|
||||||
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
|
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
|
||||||
{
|
{
|
||||||
return (cb->callback) ? true : false;
|
return cb->callback || cb->callback_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
||||||
|
|
|
@ -79,6 +79,32 @@ axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
|
||||||
iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
|
iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void axi_chan_config_write(struct axi_dma_chan *chan,
|
||||||
|
struct axi_dma_chan_config *config)
|
||||||
|
{
|
||||||
|
u32 cfg_lo, cfg_hi;
|
||||||
|
|
||||||
|
cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
|
||||||
|
config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
|
||||||
|
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||||
|
cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
|
||||||
|
config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
|
||||||
|
config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
|
||||||
|
config->src_per << CH_CFG_H_SRC_PER_POS |
|
||||||
|
config->dst_per << CH_CFG_H_DST_PER_POS |
|
||||||
|
config->prior << CH_CFG_H_PRIORITY_POS;
|
||||||
|
} else {
|
||||||
|
cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
|
||||||
|
config->dst_per << CH_CFG2_L_DST_PER_POS;
|
||||||
|
cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
|
||||||
|
config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
|
||||||
|
config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
|
||||||
|
config->prior << CH_CFG2_H_PRIORITY_POS;
|
||||||
|
}
|
||||||
|
axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
|
||||||
|
axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void axi_dma_disable(struct axi_dma_chip *chip)
|
static inline void axi_dma_disable(struct axi_dma_chip *chip)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
|
@ -154,7 +180,10 @@ static inline void axi_chan_disable(struct axi_dma_chan *chan)
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
|
val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
|
||||||
val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
if (chan->chip->dw->hdata->reg_map_8_channels)
|
||||||
|
val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
||||||
|
else
|
||||||
|
val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,8 +192,12 @@ static inline void axi_chan_enable(struct axi_dma_chan *chan)
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
|
if (chan->chip->dw->hdata->reg_map_8_channels)
|
||||||
BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
|
||||||
|
BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
|
||||||
|
else
|
||||||
|
val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
|
||||||
|
BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,12 +212,16 @@ static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
|
||||||
|
|
||||||
static void axi_dma_hw_init(struct axi_dma_chip *chip)
|
static void axi_dma_hw_init(struct axi_dma_chip *chip)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
|
for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
|
||||||
axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
|
axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
|
||||||
axi_chan_disable(&chip->dw->chan[i]);
|
axi_chan_disable(&chip->dw->chan[i]);
|
||||||
}
|
}
|
||||||
|
ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
|
||||||
|
if (ret)
|
||||||
|
dev_warn(chip->dev, "Unable to set coherent mask\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
|
static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
|
||||||
|
@ -336,7 +373,8 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
|
||||||
struct axi_dma_desc *first)
|
struct axi_dma_desc *first)
|
||||||
{
|
{
|
||||||
u32 priority = chan->chip->dw->hdata->priority[chan->id];
|
u32 priority = chan->chip->dw->hdata->priority[chan->id];
|
||||||
u32 reg, irq_mask;
|
struct axi_dma_chan_config config;
|
||||||
|
u32 irq_mask;
|
||||||
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
u8 lms = 0; /* Select AXI0 master for LLI fetching */
|
||||||
|
|
||||||
if (unlikely(axi_chan_is_hw_enable(chan))) {
|
if (unlikely(axi_chan_is_hw_enable(chan))) {
|
||||||
|
@ -348,36 +386,36 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
|
||||||
|
|
||||||
axi_dma_enable(chan->chip);
|
axi_dma_enable(chan->chip);
|
||||||
|
|
||||||
reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
|
config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
|
||||||
DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
|
config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
|
||||||
axi_chan_iowrite32(chan, CH_CFG_L, reg);
|
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
|
||||||
|
config.prior = priority;
|
||||||
reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
|
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
|
||||||
priority << CH_CFG_H_PRIORITY_POS |
|
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
|
||||||
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
|
|
||||||
DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
|
|
||||||
switch (chan->direction) {
|
switch (chan->direction) {
|
||||||
case DMA_MEM_TO_DEV:
|
case DMA_MEM_TO_DEV:
|
||||||
dw_axi_dma_set_byte_halfword(chan, true);
|
dw_axi_dma_set_byte_halfword(chan, true);
|
||||||
reg |= (chan->config.device_fc ?
|
config.tt_fc = chan->config.device_fc ?
|
||||||
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
|
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
|
||||||
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
|
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
|
||||||
<< CH_CFG_H_TT_FC_POS;
|
|
||||||
if (chan->chip->apb_regs)
|
if (chan->chip->apb_regs)
|
||||||
reg |= (chan->id << CH_CFG_H_DST_PER_POS);
|
config.dst_per = chan->id;
|
||||||
|
else
|
||||||
|
config.dst_per = chan->hw_handshake_num;
|
||||||
break;
|
break;
|
||||||
case DMA_DEV_TO_MEM:
|
case DMA_DEV_TO_MEM:
|
||||||
reg |= (chan->config.device_fc ?
|
config.tt_fc = chan->config.device_fc ?
|
||||||
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
|
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
|
||||||
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
|
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
|
||||||
<< CH_CFG_H_TT_FC_POS;
|
|
||||||
if (chan->chip->apb_regs)
|
if (chan->chip->apb_regs)
|
||||||
reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
|
config.src_per = chan->id;
|
||||||
|
else
|
||||||
|
config.src_per = chan->hw_handshake_num;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
axi_chan_iowrite32(chan, CH_CFG_H, reg);
|
axi_chan_config_write(chan, &config);
|
||||||
|
|
||||||
write_chan_llp(chan, first->hw_desc[0].llp | lms);
|
write_chan_llp(chan, first->hw_desc[0].llp | lms);
|
||||||
|
|
||||||
|
@ -1120,10 +1158,16 @@ static int dma_chan_pause(struct dma_chan *dchan)
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||||
val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
|
val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
|
} else {
|
||||||
|
val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
|
||||||
|
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
|
if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
|
||||||
|
@ -1147,9 +1191,15 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan)
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
|
||||||
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
|
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||||
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
|
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
|
||||||
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
|
||||||
|
} else {
|
||||||
|
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
|
||||||
|
val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
|
||||||
|
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
|
||||||
|
}
|
||||||
|
|
||||||
chan->is_paused = false;
|
chan->is_paused = false;
|
||||||
}
|
}
|
||||||
|
@ -1241,6 +1291,8 @@ static int parse_device_properties(struct axi_dma_chip *chip)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
chip->dw->hdata->nr_channels = tmp;
|
chip->dw->hdata->nr_channels = tmp;
|
||||||
|
if (tmp <= DMA_REG_MAP_CH_REF)
|
||||||
|
chip->dw->hdata->reg_map_8_channels = true;
|
||||||
|
|
||||||
ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
|
ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
#include "../virt-dma.h"
|
#include "../virt-dma.h"
|
||||||
|
|
||||||
#define DMAC_MAX_CHANNELS 8
|
#define DMAC_MAX_CHANNELS 16
|
||||||
#define DMAC_MAX_MASTERS 2
|
#define DMAC_MAX_MASTERS 2
|
||||||
#define DMAC_MAX_BLK_SIZE 0x200000
|
#define DMAC_MAX_BLK_SIZE 0x200000
|
||||||
|
|
||||||
|
@ -30,6 +30,8 @@ struct dw_axi_dma_hcfg {
|
||||||
u32 priority[DMAC_MAX_CHANNELS];
|
u32 priority[DMAC_MAX_CHANNELS];
|
||||||
/* maximum supported axi burst length */
|
/* maximum supported axi burst length */
|
||||||
u32 axi_rw_burst_len;
|
u32 axi_rw_burst_len;
|
||||||
|
/* Register map for DMAX_NUM_CHANNELS <= 8 */
|
||||||
|
bool reg_map_8_channels;
|
||||||
bool restrict_axi_burst_len;
|
bool restrict_axi_burst_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -103,6 +105,17 @@ struct axi_dma_desc {
|
||||||
u32 period_len;
|
u32 period_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct axi_dma_chan_config {
|
||||||
|
u8 dst_multblk_type;
|
||||||
|
u8 src_multblk_type;
|
||||||
|
u8 dst_per;
|
||||||
|
u8 src_per;
|
||||||
|
u8 tt_fc;
|
||||||
|
u8 prior;
|
||||||
|
u8 hs_sel_dst;
|
||||||
|
u8 hs_sel_src;
|
||||||
|
};
|
||||||
|
|
||||||
static inline struct device *dchan2dev(struct dma_chan *dchan)
|
static inline struct device *dchan2dev(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
return &dchan->dev->device;
|
return &dchan->dev->device;
|
||||||
|
@ -139,6 +152,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
||||||
#define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */
|
#define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */
|
||||||
#define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */
|
#define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */
|
||||||
#define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */
|
#define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */
|
||||||
|
#define DMAC_CHSUSPREG 0x020 /* R/W DMAC Channel Suspend */
|
||||||
|
#define DMAC_CHABORTREG 0x028 /* R/W DMAC Channel Abort */
|
||||||
#define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */
|
#define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */
|
||||||
#define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */
|
#define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */
|
||||||
#define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */
|
#define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */
|
||||||
|
@ -187,6 +202,7 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
||||||
#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
|
#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
|
||||||
#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
|
#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
|
||||||
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
|
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
|
||||||
|
#define DMA_REG_MAP_CH_REF 0x08 /* Channel count to choose register map */
|
||||||
|
|
||||||
/* DMAC_CFG */
|
/* DMAC_CFG */
|
||||||
#define DMAC_EN_POS 0
|
#define DMAC_EN_POS 0
|
||||||
|
@ -195,12 +211,20 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
|
||||||
#define INT_EN_POS 1
|
#define INT_EN_POS 1
|
||||||
#define INT_EN_MASK BIT(INT_EN_POS)
|
#define INT_EN_MASK BIT(INT_EN_POS)
|
||||||
|
|
||||||
|
/* DMAC_CHEN */
|
||||||
#define DMAC_CHAN_EN_SHIFT 0
|
#define DMAC_CHAN_EN_SHIFT 0
|
||||||
#define DMAC_CHAN_EN_WE_SHIFT 8
|
#define DMAC_CHAN_EN_WE_SHIFT 8
|
||||||
|
|
||||||
#define DMAC_CHAN_SUSP_SHIFT 16
|
#define DMAC_CHAN_SUSP_SHIFT 16
|
||||||
#define DMAC_CHAN_SUSP_WE_SHIFT 24
|
#define DMAC_CHAN_SUSP_WE_SHIFT 24
|
||||||
|
|
||||||
|
/* DMAC_CHEN2 */
|
||||||
|
#define DMAC_CHAN_EN2_WE_SHIFT 16
|
||||||
|
|
||||||
|
/* DMAC_CHSUSP */
|
||||||
|
#define DMAC_CHAN_SUSP2_SHIFT 0
|
||||||
|
#define DMAC_CHAN_SUSP2_WE_SHIFT 16
|
||||||
|
|
||||||
/* CH_CTL_H */
|
/* CH_CTL_H */
|
||||||
#define CH_CTL_H_ARLEN_EN BIT(6)
|
#define CH_CTL_H_ARLEN_EN BIT(6)
|
||||||
#define CH_CTL_H_ARLEN_POS 7
|
#define CH_CTL_H_ARLEN_POS 7
|
||||||
|
@ -289,6 +313,15 @@ enum {
|
||||||
DWAXIDMAC_MBLK_TYPE_LL
|
DWAXIDMAC_MBLK_TYPE_LL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* CH_CFG2 */
|
||||||
|
#define CH_CFG2_L_SRC_PER_POS 4
|
||||||
|
#define CH_CFG2_L_DST_PER_POS 11
|
||||||
|
|
||||||
|
#define CH_CFG2_H_TT_FC_POS 0
|
||||||
|
#define CH_CFG2_H_HS_SEL_SRC_POS 3
|
||||||
|
#define CH_CFG2_H_HS_SEL_DST_POS 4
|
||||||
|
#define CH_CFG2_H_PRIORITY_POS 20
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DW AXI DMA channel interrupts
|
* DW AXI DMA channel interrupts
|
||||||
*
|
*
|
||||||
|
|
|
@ -249,7 +249,6 @@ static int dw_edma_device_terminate_all(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
LIST_HEAD(head);
|
|
||||||
|
|
||||||
if (!chan->configured) {
|
if (!chan->configured) {
|
||||||
/* Do nothing */
|
/* Do nothing */
|
||||||
|
|
|
@ -186,27 +186,18 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
/* DMA configuration */
|
/* DMA configuration */
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
pci_err(pdev, "DMA mask 64 set failed\n");
|
||||||
if (err) {
|
return err;
|
||||||
pci_err(pdev, "consistent DMA mask 64 set failed\n");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
pci_err(pdev, "DMA mask 64 set failed\n");
|
pci_err(pdev, "DMA mask 64 set failed\n");
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (err) {
|
if (err) {
|
||||||
pci_err(pdev, "DMA mask 32 set failed\n");
|
pci_err(pdev, "DMA mask 32 set failed\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (err) {
|
|
||||||
pci_err(pdev, "consistent DMA mask 32 set failed\n");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Data structure allocation */
|
/* Data structure allocation */
|
||||||
|
|
|
@ -32,11 +32,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_try_set_mwi(pdev);
|
pci_try_set_mwi(pdev);
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -348,6 +348,7 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
||||||
struct fsl_edma_engine *edma = fsl_chan->edma;
|
struct fsl_edma_engine *edma = fsl_chan->edma;
|
||||||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||||
|
u16 csr = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
|
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
|
||||||
|
@ -373,6 +374,12 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
||||||
edma_writel(edma, (s32)tcd->dlast_sga,
|
edma_writel(edma, (s32)tcd->dlast_sga,
|
||||||
®s->tcd[ch].dlast_sga);
|
®s->tcd[ch].dlast_sga);
|
||||||
|
|
||||||
|
if (fsl_chan->is_sw) {
|
||||||
|
csr = le16_to_cpu(tcd->csr);
|
||||||
|
csr |= EDMA_TCD_CSR_START;
|
||||||
|
tcd->csr = cpu_to_le16(csr);
|
||||||
|
}
|
||||||
|
|
||||||
edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
|
edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -587,6 +594,29 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
|
EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
|
||||||
|
|
||||||
|
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||||
|
dma_addr_t dma_dst, dma_addr_t dma_src,
|
||||||
|
size_t len, unsigned long flags)
|
||||||
|
{
|
||||||
|
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||||
|
struct fsl_edma_desc *fsl_desc;
|
||||||
|
|
||||||
|
fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
|
||||||
|
if (!fsl_desc)
|
||||||
|
return NULL;
|
||||||
|
fsl_desc->iscyclic = false;
|
||||||
|
|
||||||
|
fsl_chan->is_sw = true;
|
||||||
|
|
||||||
|
/* To match with copy_align and max_seg_size so 1 tcd is enough */
|
||||||
|
fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
||||||
|
EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
|
||||||
|
32, len, 0, 1, 1, 32, 0, true, true, false);
|
||||||
|
|
||||||
|
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
|
||||||
|
|
||||||
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
|
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
|
||||||
{
|
{
|
||||||
struct virt_dma_desc *vdesc;
|
struct virt_dma_desc *vdesc;
|
||||||
|
@ -638,12 +668,14 @@ EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
|
||||||
void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||||
|
struct fsl_edma_engine *edma = fsl_chan->edma;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
LIST_HEAD(head);
|
LIST_HEAD(head);
|
||||||
|
|
||||||
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
|
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
|
||||||
fsl_edma_disable_request(fsl_chan);
|
fsl_edma_disable_request(fsl_chan);
|
||||||
fsl_edma_chan_mux(fsl_chan, 0, false);
|
if (edma->drvdata->dmamuxs)
|
||||||
|
fsl_edma_chan_mux(fsl_chan, 0, false);
|
||||||
fsl_chan->edesc = NULL;
|
fsl_chan->edesc = NULL;
|
||||||
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
|
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
|
||||||
fsl_edma_unprep_slave_dma(fsl_chan);
|
fsl_edma_unprep_slave_dma(fsl_chan);
|
||||||
|
@ -652,6 +684,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
||||||
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
|
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
|
||||||
dma_pool_destroy(fsl_chan->tcd_pool);
|
dma_pool_destroy(fsl_chan->tcd_pool);
|
||||||
fsl_chan->tcd_pool = NULL;
|
fsl_chan->tcd_pool = NULL;
|
||||||
|
fsl_chan->is_sw = false;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
|
EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
|
||||||
|
|
||||||
|
|
|
@ -121,6 +121,7 @@ struct fsl_edma_chan {
|
||||||
struct fsl_edma_desc *edesc;
|
struct fsl_edma_desc *edesc;
|
||||||
struct dma_slave_config cfg;
|
struct dma_slave_config cfg;
|
||||||
u32 attr;
|
u32 attr;
|
||||||
|
bool is_sw;
|
||||||
struct dma_pool *tcd_pool;
|
struct dma_pool *tcd_pool;
|
||||||
dma_addr_t dma_dev_addr;
|
dma_addr_t dma_dev_addr;
|
||||||
u32 dma_dev_size;
|
u32 dma_dev_size;
|
||||||
|
@ -240,6 +241,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
||||||
struct dma_chan *chan, struct scatterlist *sgl,
|
struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||||
unsigned long flags, void *context);
|
unsigned long flags, void *context);
|
||||||
|
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
|
||||||
|
struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
|
||||||
|
size_t len, unsigned long flags);
|
||||||
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
|
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
|
||||||
void fsl_edma_issue_pending(struct dma_chan *chan);
|
void fsl_edma_issue_pending(struct dma_chan *chan);
|
||||||
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
|
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <linux/of_irq.h>
|
#include <linux/of_irq.h>
|
||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
#include "fsl-edma-common.h"
|
#include "fsl-edma-common.h"
|
||||||
|
|
||||||
|
@ -372,6 +373,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||||
dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
|
dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
|
||||||
dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
|
dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
|
||||||
dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
|
dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
|
||||||
|
dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
|
||||||
|
|
||||||
fsl_edma->dma_dev.dev = &pdev->dev;
|
fsl_edma->dma_dev.dev = &pdev->dev;
|
||||||
fsl_edma->dma_dev.device_alloc_chan_resources
|
fsl_edma->dma_dev.device_alloc_chan_resources
|
||||||
|
@ -381,6 +383,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||||
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
|
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
|
||||||
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
|
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
|
||||||
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
|
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
|
||||||
|
fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
|
||||||
fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
|
fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
|
||||||
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
|
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
|
||||||
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
|
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
|
||||||
|
@ -392,6 +395,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||||
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
|
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
|
||||||
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||||
|
|
||||||
|
fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
|
||||||
|
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
|
||||||
|
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
|
||||||
|
|
||||||
platform_set_drvdata(pdev, fsl_edma);
|
platform_set_drvdata(pdev, fsl_edma);
|
||||||
|
|
||||||
ret = dma_async_device_register(&fsl_edma->dma_dev);
|
ret = dma_async_device_register(&fsl_edma->dma_dev);
|
||||||
|
|
|
@ -519,11 +519,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -65,11 +65,7 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_try_set_mwi(pdev);
|
pci_try_set_mwi(pdev);
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -135,8 +135,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
int rc, num_descs, i;
|
int rc, num_descs, i;
|
||||||
int align;
|
|
||||||
u64 tmp;
|
|
||||||
|
|
||||||
if (wq->type != IDXD_WQT_KERNEL)
|
if (wq->type != IDXD_WQT_KERNEL)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -148,21 +146,13 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
align = idxd->data->align;
|
wq->compls_size = num_descs * idxd->data->compl_size;
|
||||||
wq->compls_size = num_descs * idxd->data->compl_size + align;
|
wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
|
||||||
wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
|
if (!wq->compls) {
|
||||||
&wq->compls_addr_raw, GFP_KERNEL);
|
|
||||||
if (!wq->compls_raw) {
|
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto fail_alloc_compls;
|
goto fail_alloc_compls;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust alignment */
|
|
||||||
wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
|
|
||||||
tmp = (u64)wq->compls_raw;
|
|
||||||
tmp = (tmp + (align - 1)) & ~(align - 1);
|
|
||||||
wq->compls = (struct dsa_completion_record *)tmp;
|
|
||||||
|
|
||||||
rc = alloc_descs(wq, num_descs);
|
rc = alloc_descs(wq, num_descs);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail_alloc_descs;
|
goto fail_alloc_descs;
|
||||||
|
@ -191,8 +181,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
||||||
fail_sbitmap_init:
|
fail_sbitmap_init:
|
||||||
free_descs(wq);
|
free_descs(wq);
|
||||||
fail_alloc_descs:
|
fail_alloc_descs:
|
||||||
dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
|
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
|
||||||
wq->compls_addr_raw);
|
|
||||||
fail_alloc_compls:
|
fail_alloc_compls:
|
||||||
free_hw_descs(wq);
|
free_hw_descs(wq);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -207,8 +196,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
|
||||||
|
|
||||||
free_hw_descs(wq);
|
free_hw_descs(wq);
|
||||||
free_descs(wq);
|
free_descs(wq);
|
||||||
dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
|
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
|
||||||
wq->compls_addr_raw);
|
|
||||||
sbitmap_queue_free(&wq->sbq);
|
sbitmap_queue_free(&wq->sbq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,7 +415,6 @@ void idxd_wq_quiesce(struct idxd_wq *wq)
|
||||||
{
|
{
|
||||||
percpu_ref_kill(&wq->wq_active);
|
percpu_ref_kill(&wq->wq_active);
|
||||||
wait_for_completion(&wq->wq_dead);
|
wait_for_completion(&wq->wq_dead);
|
||||||
percpu_ref_exit(&wq->wq_active);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Device control bits */
|
/* Device control bits */
|
||||||
|
@ -584,6 +571,8 @@ void idxd_device_reset(struct idxd_device *idxd)
|
||||||
spin_lock(&idxd->dev_lock);
|
spin_lock(&idxd->dev_lock);
|
||||||
idxd_device_clear_state(idxd);
|
idxd_device_clear_state(idxd);
|
||||||
idxd->state = IDXD_DEV_DISABLED;
|
idxd->state = IDXD_DEV_DISABLED;
|
||||||
|
idxd_unmask_error_interrupts(idxd);
|
||||||
|
idxd_msix_perm_setup(idxd);
|
||||||
spin_unlock(&idxd->dev_lock);
|
spin_unlock(&idxd->dev_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -792,7 +781,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
|
||||||
/* Setup bandwidth token limit */
|
/* Setup bandwidth token limit */
|
||||||
if (idxd->token_limit) {
|
if (idxd->hw.gen_cap.config_en && idxd->token_limit) {
|
||||||
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
reg.token_limit = idxd->token_limit;
|
reg.token_limit = idxd->token_limit;
|
||||||
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
|
@ -1051,8 +1040,6 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
|
||||||
|
|
||||||
wq->size = wq->wqcfg->wq_size;
|
wq->size = wq->wqcfg->wq_size;
|
||||||
wq->threshold = wq->wqcfg->wq_thresh;
|
wq->threshold = wq->wqcfg->wq_thresh;
|
||||||
if (wq->wqcfg->priv)
|
|
||||||
wq->type = IDXD_WQT_KERNEL;
|
|
||||||
|
|
||||||
/* The driver does not support shared WQ mode in read-only config yet */
|
/* The driver does not support shared WQ mode in read-only config yet */
|
||||||
if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
|
if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
|
||||||
|
|
|
@ -311,6 +311,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
|
||||||
|
|
||||||
err_dma:
|
err_dma:
|
||||||
idxd_wq_quiesce(wq);
|
idxd_wq_quiesce(wq);
|
||||||
|
percpu_ref_exit(&wq->wq_active);
|
||||||
err_ref:
|
err_ref:
|
||||||
idxd_wq_free_resources(wq);
|
idxd_wq_free_resources(wq);
|
||||||
err_res_alloc:
|
err_res_alloc:
|
||||||
|
@ -328,9 +329,9 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
|
||||||
mutex_lock(&wq->wq_lock);
|
mutex_lock(&wq->wq_lock);
|
||||||
idxd_wq_quiesce(wq);
|
idxd_wq_quiesce(wq);
|
||||||
idxd_unregister_dma_channel(wq);
|
idxd_unregister_dma_channel(wq);
|
||||||
__drv_disable_wq(wq);
|
|
||||||
idxd_wq_free_resources(wq);
|
idxd_wq_free_resources(wq);
|
||||||
wq->type = IDXD_WQT_NONE;
|
__drv_disable_wq(wq);
|
||||||
|
percpu_ref_exit(&wq->wq_active);
|
||||||
mutex_unlock(&wq->wq_lock);
|
mutex_unlock(&wq->wq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -187,9 +187,7 @@ struct idxd_wq {
|
||||||
struct dsa_completion_record *compls;
|
struct dsa_completion_record *compls;
|
||||||
struct iax_completion_record *iax_compls;
|
struct iax_completion_record *iax_compls;
|
||||||
};
|
};
|
||||||
void *compls_raw;
|
|
||||||
dma_addr_t compls_addr;
|
dma_addr_t compls_addr;
|
||||||
dma_addr_t compls_addr_raw;
|
|
||||||
int compls_size;
|
int compls_size;
|
||||||
struct idxd_desc **descs;
|
struct idxd_desc **descs;
|
||||||
struct sbitmap_queue sbq;
|
struct sbitmap_queue sbq;
|
||||||
|
|
|
@ -797,11 +797,19 @@ static void idxd_remove(struct pci_dev *pdev)
|
||||||
int msixcnt = pci_msix_vec_count(pdev);
|
int msixcnt = pci_msix_vec_count(pdev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dev_dbg(&pdev->dev, "%s called\n", __func__);
|
idxd_unregister_devices(idxd);
|
||||||
|
/*
|
||||||
|
* When ->release() is called for the idxd->conf_dev, it frees all the memory related
|
||||||
|
* to the idxd context. The driver still needs those bits in order to do the rest of
|
||||||
|
* the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
|
||||||
|
* on the device here to hold off the freeing while allowing the idxd sub-driver
|
||||||
|
* to unbind.
|
||||||
|
*/
|
||||||
|
get_device(idxd_confdev(idxd));
|
||||||
|
device_unregister(idxd_confdev(idxd));
|
||||||
idxd_shutdown(pdev);
|
idxd_shutdown(pdev);
|
||||||
if (device_pasid_enabled(idxd))
|
if (device_pasid_enabled(idxd))
|
||||||
idxd_disable_system_pasid(idxd);
|
idxd_disable_system_pasid(idxd);
|
||||||
idxd_unregister_devices(idxd);
|
|
||||||
|
|
||||||
for (i = 0; i < msixcnt; i++) {
|
for (i = 0; i < msixcnt; i++) {
|
||||||
irq_entry = &idxd->irq_entries[i];
|
irq_entry = &idxd->irq_entries[i];
|
||||||
|
@ -815,7 +823,7 @@ static void idxd_remove(struct pci_dev *pdev)
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
destroy_workqueue(idxd->wq);
|
destroy_workqueue(idxd->wq);
|
||||||
perfmon_pmu_remove(idxd);
|
perfmon_pmu_remove(idxd);
|
||||||
device_unregister(idxd_confdev(idxd));
|
put_device(idxd_confdev(idxd));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pci_driver idxd_pci_driver = {
|
static struct pci_driver idxd_pci_driver = {
|
||||||
|
|
|
@ -63,6 +63,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||||
int i;
|
int i;
|
||||||
bool err = false;
|
bool err = false;
|
||||||
|
|
||||||
|
if (cause & IDXD_INTC_HALT_STATE)
|
||||||
|
goto halt;
|
||||||
|
|
||||||
if (cause & IDXD_INTC_ERR) {
|
if (cause & IDXD_INTC_ERR) {
|
||||||
spin_lock(&idxd->dev_lock);
|
spin_lock(&idxd->dev_lock);
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
|
@ -121,6 +124,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||||
if (!err)
|
if (!err)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
halt:
|
||||||
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||||
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
|
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
|
||||||
idxd->state = IDXD_DEV_HALTED;
|
idxd->state = IDXD_DEV_HALTED;
|
||||||
|
@ -134,6 +138,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||||
queue_work(idxd->wq, &idxd->work);
|
queue_work(idxd->wq, &idxd->work);
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&idxd->dev_lock);
|
spin_lock(&idxd->dev_lock);
|
||||||
|
idxd->state = IDXD_DEV_HALTED;
|
||||||
idxd_wqs_quiesce(idxd);
|
idxd_wqs_quiesce(idxd);
|
||||||
idxd_wqs_unmap_portal(idxd);
|
idxd_wqs_unmap_portal(idxd);
|
||||||
idxd_device_clear_state(idxd);
|
idxd_device_clear_state(idxd);
|
||||||
|
@ -221,8 +226,7 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
|
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
|
||||||
if (desc->completion->status) {
|
if (desc->completion->status) {
|
||||||
list_del(&desc->list);
|
list_move_tail(&desc->list, &flist);
|
||||||
list_add_tail(&desc->list, &flist);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,8 +36,7 @@ union gen_cap_reg {
|
||||||
u64 max_batch_shift:4;
|
u64 max_batch_shift:4;
|
||||||
u64 max_ims_mult:6;
|
u64 max_ims_mult:6;
|
||||||
u64 config_en:1;
|
u64 config_en:1;
|
||||||
u64 max_descs_per_engine:8;
|
u64 rsvd3:32;
|
||||||
u64 rsvd3:24;
|
|
||||||
};
|
};
|
||||||
u64 bits;
|
u64 bits;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
@ -158,6 +157,7 @@ enum idxd_device_reset_type {
|
||||||
#define IDXD_INTC_CMD 0x02
|
#define IDXD_INTC_CMD 0x02
|
||||||
#define IDXD_INTC_OCCUPY 0x04
|
#define IDXD_INTC_OCCUPY 0x04
|
||||||
#define IDXD_INTC_PERFMON_OVFL 0x08
|
#define IDXD_INTC_PERFMON_OVFL 0x08
|
||||||
|
#define IDXD_INTC_HALT_STATE 0x10
|
||||||
|
|
||||||
#define IDXD_CMD_OFFSET 0xa0
|
#define IDXD_CMD_OFFSET 0xa0
|
||||||
union idxd_command_reg {
|
union idxd_command_reg {
|
||||||
|
|
|
@ -741,9 +741,8 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
|
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
|
||||||
if (!buf_virt) {
|
if (!buf_virt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sdma->channel_0_lock, flags);
|
spin_lock_irqsave(&sdma->channel_0_lock, flags);
|
||||||
|
|
||||||
|
@ -1227,8 +1226,9 @@ static int sdma_config_channel(struct dma_chan *chan)
|
||||||
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
|
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
|
||||||
sdmac->peripheral_type == IMX_DMATYPE_ASRC)
|
sdmac->peripheral_type == IMX_DMATYPE_ASRC)
|
||||||
sdma_set_watermarklevel_for_p2p(sdmac);
|
sdma_set_watermarklevel_for_p2p(sdmac);
|
||||||
} else
|
} else {
|
||||||
__set_bit(sdmac->event_id0, sdmac->event_mask);
|
__set_bit(sdmac->event_id0, sdmac->event_mask);
|
||||||
|
}
|
||||||
|
|
||||||
/* Address */
|
/* Address */
|
||||||
sdmac->shp_addr = sdmac->per_address;
|
sdmac->shp_addr = sdmac->per_address;
|
||||||
|
@ -1241,7 +1241,7 @@ static int sdma_config_channel(struct dma_chan *chan)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
||||||
unsigned int priority)
|
unsigned int priority)
|
||||||
{
|
{
|
||||||
struct sdma_engine *sdma = sdmac->sdma;
|
struct sdma_engine *sdma = sdmac->sdma;
|
||||||
int channel = sdmac->channel;
|
int channel = sdmac->channel;
|
||||||
|
@ -1261,7 +1261,7 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
|
||||||
int ret = -EBUSY;
|
int ret = -EBUSY;
|
||||||
|
|
||||||
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
|
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
|
||||||
GFP_NOWAIT);
|
GFP_NOWAIT);
|
||||||
if (!sdma->bd0) {
|
if (!sdma->bd0) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1284,7 +1284,7 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
|
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
|
||||||
&desc->bd_phys, GFP_NOWAIT);
|
&desc->bd_phys, GFP_NOWAIT);
|
||||||
if (!desc->bd) {
|
if (!desc->bd) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1757,7 +1757,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
|
||||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
|
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
|
||||||
|
|
||||||
static void sdma_add_scripts(struct sdma_engine *sdma,
|
static void sdma_add_scripts(struct sdma_engine *sdma,
|
||||||
const struct sdma_script_start_addrs *addr)
|
const struct sdma_script_start_addrs *addr)
|
||||||
{
|
{
|
||||||
s32 *addr_arr = (u32 *)addr;
|
s32 *addr_arr = (u32 *)addr;
|
||||||
s32 *saddr_arr = (u32 *)sdma->script_addrs;
|
s32 *saddr_arr = (u32 *)sdma->script_addrs;
|
||||||
|
@ -1840,8 +1840,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
|
||||||
clk_enable(sdma->clk_ahb);
|
clk_enable(sdma->clk_ahb);
|
||||||
/* download the RAM image for SDMA */
|
/* download the RAM image for SDMA */
|
||||||
sdma_load_script(sdma, ram_code,
|
sdma_load_script(sdma, ram_code,
|
||||||
header->ram_code_size,
|
header->ram_code_size,
|
||||||
addr->ram_code_start_addr);
|
addr->ram_code_start_addr);
|
||||||
clk_disable(sdma->clk_ipg);
|
clk_disable(sdma->clk_ipg);
|
||||||
clk_disable(sdma->clk_ahb);
|
clk_disable(sdma->clk_ahb);
|
||||||
|
|
||||||
|
@ -1850,8 +1850,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
|
||||||
sdma->fw_loaded = true;
|
sdma->fw_loaded = true;
|
||||||
|
|
||||||
dev_info(sdma->dev, "loaded firmware %d.%d\n",
|
dev_info(sdma->dev, "loaded firmware %d.%d\n",
|
||||||
header->version_major,
|
header->version_major,
|
||||||
header->version_minor);
|
header->version_minor);
|
||||||
|
|
||||||
err_firmware:
|
err_firmware:
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
|
@ -1955,7 +1955,7 @@ static int sdma_init(struct sdma_engine *sdma)
|
||||||
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
|
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
|
||||||
|
|
||||||
sdma->channel_control = dma_alloc_coherent(sdma->dev,
|
sdma->channel_control = dma_alloc_coherent(sdma->dev,
|
||||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
|
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
|
||||||
sizeof(struct sdma_context_data),
|
sizeof(struct sdma_context_data),
|
||||||
&ccb_phys, GFP_KERNEL);
|
&ccb_phys, GFP_KERNEL);
|
||||||
|
|
||||||
|
@ -1965,9 +1965,9 @@ static int sdma_init(struct sdma_engine *sdma)
|
||||||
}
|
}
|
||||||
|
|
||||||
sdma->context = (void *)sdma->channel_control +
|
sdma->context = (void *)sdma->channel_control +
|
||||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
|
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
|
||||||
sdma->context_phys = ccb_phys +
|
sdma->context_phys = ccb_phys +
|
||||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
|
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
|
||||||
|
|
||||||
/* disable all channels */
|
/* disable all channels */
|
||||||
for (i = 0; i < sdma->drvdata->num_events; i++)
|
for (i = 0; i < sdma->drvdata->num_events; i++)
|
||||||
|
|
|
@ -1363,15 +1363,9 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
if (!iomap)
|
if (!iomap)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||||
if (err)
|
if (err)
|
||||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
||||||
if (err)
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -269,7 +269,7 @@ milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
if (!md)
|
if (!md)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT);
|
md->sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
|
||||||
if (!md->sgl) {
|
if (!md->sgl) {
|
||||||
kfree(md);
|
kfree(md);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -1123,6 +1123,7 @@ static int mmp_pdma_probe(struct platform_device *op)
|
||||||
mmp_pdma_dma_xlate, pdev);
|
mmp_pdma_dma_xlate, pdev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&op->dev, "of_dma_controller_register failed\n");
|
dev_err(&op->dev, "of_dma_controller_register failed\n");
|
||||||
|
dma_async_device_unregister(&pdev->device);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -563,15 +563,9 @@ static int plx_dma_probe(struct pci_dev *pdev,
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
|
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
|
||||||
if (rc)
|
if (rc)
|
||||||
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
|
|
||||||
if (rc)
|
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
|
|
@ -388,6 +388,8 @@ struct bam_device {
|
||||||
/* execution environment ID, from DT */
|
/* execution environment ID, from DT */
|
||||||
u32 ee;
|
u32 ee;
|
||||||
bool controlled_remotely;
|
bool controlled_remotely;
|
||||||
|
bool powered_remotely;
|
||||||
|
u32 active_channels;
|
||||||
|
|
||||||
const struct reg_offset_data *layout;
|
const struct reg_offset_data *layout;
|
||||||
|
|
||||||
|
@ -415,6 +417,44 @@ static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
|
||||||
r.ee_mult * bdev->ee;
|
r.ee_mult * bdev->ee;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bam_reset() - reset and initialize BAM registers
|
||||||
|
* @bdev: bam device
|
||||||
|
*/
|
||||||
|
static void bam_reset(struct bam_device *bdev)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* s/w reset bam */
|
||||||
|
/* after reset all pipes are disabled and idle */
|
||||||
|
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
val |= BAM_SW_RST;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
val &= ~BAM_SW_RST;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
|
||||||
|
/* make sure previous stores are visible before enabling BAM */
|
||||||
|
wmb();
|
||||||
|
|
||||||
|
/* enable bam */
|
||||||
|
val |= BAM_EN;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
|
||||||
|
/* set descriptor threshhold, start with 4 bytes */
|
||||||
|
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
||||||
|
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
||||||
|
|
||||||
|
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
|
||||||
|
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
|
||||||
|
|
||||||
|
/* enable irqs for errors */
|
||||||
|
writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
|
||||||
|
bam_addr(bdev, 0, BAM_IRQ_EN));
|
||||||
|
|
||||||
|
/* unmask global bam interrupt */
|
||||||
|
writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bam_reset_channel - Reset individual BAM DMA channel
|
* bam_reset_channel - Reset individual BAM DMA channel
|
||||||
* @bchan: bam channel
|
* @bchan: bam channel
|
||||||
|
@ -512,6 +552,9 @@ static int bam_alloc_chan(struct dma_chan *chan)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bdev->active_channels++ == 0 && bdev->powered_remotely)
|
||||||
|
bam_reset(bdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,6 +608,13 @@ static void bam_free_chan(struct dma_chan *chan)
|
||||||
/* disable irq */
|
/* disable irq */
|
||||||
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
|
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
|
||||||
|
|
||||||
|
if (--bdev->active_channels == 0 && bdev->powered_remotely) {
|
||||||
|
/* s/w reset bam */
|
||||||
|
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
val |= BAM_SW_RST;
|
||||||
|
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
||||||
|
}
|
||||||
|
|
||||||
err:
|
err:
|
||||||
pm_runtime_mark_last_busy(bdev->dev);
|
pm_runtime_mark_last_busy(bdev->dev);
|
||||||
pm_runtime_put_autosuspend(bdev->dev);
|
pm_runtime_put_autosuspend(bdev->dev);
|
||||||
|
@ -1164,37 +1214,9 @@ static int bam_init(struct bam_device *bdev)
|
||||||
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
|
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdev->controlled_remotely)
|
/* Reset BAM now if fully controlled locally */
|
||||||
return 0;
|
if (!bdev->controlled_remotely && !bdev->powered_remotely)
|
||||||
|
bam_reset(bdev);
|
||||||
/* s/w reset bam */
|
|
||||||
/* after reset all pipes are disabled and idle */
|
|
||||||
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
val |= BAM_SW_RST;
|
|
||||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
val &= ~BAM_SW_RST;
|
|
||||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
|
|
||||||
/* make sure previous stores are visible before enabling BAM */
|
|
||||||
wmb();
|
|
||||||
|
|
||||||
/* enable bam */
|
|
||||||
val |= BAM_EN;
|
|
||||||
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
|
|
||||||
|
|
||||||
/* set descriptor threshhold, start with 4 bytes */
|
|
||||||
writel_relaxed(DEFAULT_CNT_THRSHLD,
|
|
||||||
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
|
|
||||||
|
|
||||||
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
|
|
||||||
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
|
|
||||||
|
|
||||||
/* enable irqs for errors */
|
|
||||||
writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
|
|
||||||
bam_addr(bdev, 0, BAM_IRQ_EN));
|
|
||||||
|
|
||||||
/* unmask global bam interrupt */
|
|
||||||
writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1257,8 +1279,10 @@ static int bam_dma_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
|
bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
|
||||||
"qcom,controlled-remotely");
|
"qcom,controlled-remotely");
|
||||||
|
bdev->powered_remotely = of_property_read_bool(pdev->dev.of_node,
|
||||||
|
"qcom,powered-remotely");
|
||||||
|
|
||||||
if (bdev->controlled_remotely) {
|
if (bdev->controlled_remotely || bdev->powered_remotely) {
|
||||||
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
|
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
|
||||||
&bdev->num_channels);
|
&bdev->num_channels);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1270,7 +1294,7 @@ static int bam_dma_probe(struct platform_device *pdev)
|
||||||
dev_err(bdev->dev, "num-ees unspecified in dt\n");
|
dev_err(bdev->dev, "num-ees unspecified in dt\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdev->controlled_remotely)
|
if (bdev->controlled_remotely || bdev->powered_remotely)
|
||||||
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
|
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
|
||||||
else
|
else
|
||||||
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
|
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
|
||||||
|
|
|
@ -1001,7 +1001,7 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sa11x0_dma_suspend(struct device *dev)
|
static __maybe_unused int sa11x0_dma_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||||||
unsigned pch;
|
unsigned pch;
|
||||||
|
@ -1039,7 +1039,7 @@ static int sa11x0_dma_suspend(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sa11x0_dma_resume(struct device *dev)
|
static __maybe_unused int sa11x0_dma_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||||||
unsigned pch;
|
unsigned pch;
|
||||||
|
@ -1072,12 +1072,7 @@ static int sa11x0_dma_resume(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
|
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
|
||||||
.suspend_noirq = sa11x0_dma_suspend,
|
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume)
|
||||||
.resume_noirq = sa11x0_dma_resume,
|
|
||||||
.freeze_noirq = sa11x0_dma_suspend,
|
|
||||||
.thaw_noirq = sa11x0_dma_resume,
|
|
||||||
.poweroff_noirq = sa11x0_dma_suspend,
|
|
||||||
.restore_noirq = sa11x0_dma_resume,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_driver sa11x0_dma_driver = {
|
static struct platform_driver sa11x0_dma_driver = {
|
||||||
|
|
|
@ -1916,7 +1916,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
|
||||||
return ret;
|
goto err_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = rcar_dmac_init(dmac);
|
ret = rcar_dmac_init(dmac);
|
||||||
|
@ -1924,7 +1924,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "failed to reset device\n");
|
dev_err(&pdev->dev, "failed to reset device\n");
|
||||||
goto error;
|
goto err_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize engine */
|
/* Initialize engine */
|
||||||
|
@ -1958,14 +1958,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||||
for_each_rcar_dmac_chan(i, dmac, chan) {
|
for_each_rcar_dmac_chan(i, dmac, chan) {
|
||||||
ret = rcar_dmac_chan_probe(dmac, chan);
|
ret = rcar_dmac_chan_probe(dmac, chan);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto err_pm_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register the DMAC as a DMA provider for DT. */
|
/* Register the DMAC as a DMA provider for DT. */
|
||||||
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
|
ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
|
||||||
NULL);
|
NULL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto err_pm_disable;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register the DMA engine device.
|
* Register the DMA engine device.
|
||||||
|
@ -1974,12 +1974,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||||
*/
|
*/
|
||||||
ret = dma_async_device_register(engine);
|
ret = dma_async_device_register(engine);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto err_dma_free;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
err_dma_free:
|
||||||
of_dma_controller_free(pdev->dev.of_node);
|
of_dma_controller_free(pdev->dev.of_node);
|
||||||
|
err_pm_disable:
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
|
@ -573,7 +574,7 @@ static void rz_dmac_issue_pending(struct dma_chan *chan)
|
||||||
static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
|
static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
|
||||||
{
|
{
|
||||||
u8 i;
|
u8 i;
|
||||||
const enum dma_slave_buswidth ds_lut[] = {
|
static const enum dma_slave_buswidth ds_lut[] = {
|
||||||
DMA_SLAVE_BUSWIDTH_1_BYTE,
|
DMA_SLAVE_BUSWIDTH_1_BYTE,
|
||||||
DMA_SLAVE_BUSWIDTH_2_BYTES,
|
DMA_SLAVE_BUSWIDTH_2_BYTES,
|
||||||
DMA_SLAVE_BUSWIDTH_4_BYTES,
|
DMA_SLAVE_BUSWIDTH_4_BYTES,
|
||||||
|
@ -872,6 +873,13 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
||||||
/* Initialize the channels. */
|
/* Initialize the channels. */
|
||||||
INIT_LIST_HEAD(&dmac->engine.channels);
|
INIT_LIST_HEAD(&dmac->engine.channels);
|
||||||
|
|
||||||
|
pm_runtime_enable(&pdev->dev);
|
||||||
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
|
||||||
|
goto err_pm_disable;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < dmac->n_channels; i++) {
|
for (i = 0; i < dmac->n_channels; i++) {
|
||||||
ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -925,6 +933,10 @@ err:
|
||||||
channel->lmdesc.base_dma);
|
channel->lmdesc.base_dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pm_runtime_put(&pdev->dev);
|
||||||
|
err_pm_disable:
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -943,6 +955,8 @@ static int rz_dmac_remove(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
of_dma_controller_free(pdev->dev.of_node);
|
of_dma_controller_free(pdev->dev.of_node);
|
||||||
dma_async_device_unregister(&dmac->engine);
|
dma_async_device_unregister(&dmac->engine);
|
||||||
|
pm_runtime_put(&pdev->dev);
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,7 +270,6 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
|
||||||
u32 threshold)
|
u32 threshold)
|
||||||
{
|
{
|
||||||
enum dma_slave_buswidth max_width;
|
enum dma_slave_buswidth max_width;
|
||||||
u64 addr = buf_addr;
|
|
||||||
|
|
||||||
if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
|
if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
|
||||||
max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||||
|
@ -281,7 +280,7 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
|
||||||
max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
|
max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
|
||||||
max_width = max_width >> 1;
|
max_width = max_width >> 1;
|
||||||
|
|
||||||
if (do_div(addr, max_width))
|
if (buf_addr & (max_width - 1))
|
||||||
max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||||
|
|
||||||
return max_width;
|
return max_width;
|
||||||
|
@ -497,6 +496,7 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
|
||||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||||
|
|
||||||
if (chan->desc) {
|
if (chan->desc) {
|
||||||
|
dma_cookie_complete(&chan->desc->vdesc.tx);
|
||||||
vchan_terminate_vdesc(&chan->desc->vdesc);
|
vchan_terminate_vdesc(&chan->desc->vdesc);
|
||||||
if (chan->busy)
|
if (chan->busy)
|
||||||
stm32_dma_stop(chan);
|
stm32_dma_stop(chan);
|
||||||
|
@ -753,8 +753,14 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
|
||||||
if (src_bus_width < 0)
|
if (src_bus_width < 0)
|
||||||
return src_bus_width;
|
return src_bus_width;
|
||||||
|
|
||||||
/* Set memory burst size */
|
/*
|
||||||
src_maxburst = STM32_DMA_MAX_BURST;
|
* Set memory burst size - burst not possible if address is not aligned on
|
||||||
|
* the address boundary equal to the size of the transfer
|
||||||
|
*/
|
||||||
|
if (buf_addr & (buf_len - 1))
|
||||||
|
src_maxburst = 1;
|
||||||
|
else
|
||||||
|
src_maxburst = STM32_DMA_MAX_BURST;
|
||||||
src_best_burst = stm32_dma_get_best_burst(buf_len,
|
src_best_burst = stm32_dma_get_best_burst(buf_len,
|
||||||
src_maxburst,
|
src_maxburst,
|
||||||
fifoth,
|
fifoth,
|
||||||
|
@ -803,8 +809,14 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
|
||||||
if (dst_bus_width < 0)
|
if (dst_bus_width < 0)
|
||||||
return dst_bus_width;
|
return dst_bus_width;
|
||||||
|
|
||||||
/* Set memory burst size */
|
/*
|
||||||
dst_maxburst = STM32_DMA_MAX_BURST;
|
* Set memory burst size - burst not possible if address is not aligned on
|
||||||
|
* the address boundary equal to the size of the transfer
|
||||||
|
*/
|
||||||
|
if (buf_addr & (buf_len - 1))
|
||||||
|
dst_maxburst = 1;
|
||||||
|
else
|
||||||
|
dst_maxburst = STM32_DMA_MAX_BURST;
|
||||||
dst_best_burst = stm32_dma_get_best_burst(buf_len,
|
dst_best_burst = stm32_dma_get_best_burst(buf_len,
|
||||||
dst_maxburst,
|
dst_maxburst,
|
||||||
fifoth,
|
fifoth,
|
||||||
|
|
|
@ -1566,7 +1566,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
||||||
if (count < 0)
|
if (count < 0)
|
||||||
count = 0;
|
count = 0;
|
||||||
|
|
||||||
dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count,
|
dmadev = devm_kzalloc(&pdev->dev,
|
||||||
|
struct_size(dmadev, ahb_addr_masks, count),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!dmadev)
|
if (!dmadev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -43,10 +43,8 @@
|
||||||
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
|
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
|
||||||
|
|
||||||
#define ADMA_CH_FIFO_CTRL 0x2c
|
#define ADMA_CH_FIFO_CTRL 0x2c
|
||||||
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
|
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
|
||||||
#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
|
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
|
||||||
#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
|
|
||||||
#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
|
|
||||||
|
|
||||||
#define ADMA_CH_LOWER_SRC_ADDR 0x34
|
#define ADMA_CH_LOWER_SRC_ADDR 0x34
|
||||||
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
|
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
|
||||||
|
@ -61,29 +59,26 @@
|
||||||
|
|
||||||
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
|
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
|
||||||
|
|
||||||
#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
|
|
||||||
TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
|
|
||||||
|
|
||||||
#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
|
|
||||||
TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
|
|
||||||
|
|
||||||
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
|
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
|
||||||
|
|
||||||
struct tegra_adma;
|
struct tegra_adma;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct tegra_adma_chip_data - Tegra chip specific data
|
* struct tegra_adma_chip_data - Tegra chip specific data
|
||||||
|
* @adma_get_burst_config: Function callback used to set DMA burst size.
|
||||||
* @global_reg_offset: Register offset of DMA global register.
|
* @global_reg_offset: Register offset of DMA global register.
|
||||||
* @global_int_clear: Register offset of DMA global interrupt clear.
|
* @global_int_clear: Register offset of DMA global interrupt clear.
|
||||||
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
|
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
|
||||||
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
|
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
|
||||||
* @ch_base_offset: Register offset of DMA channel registers.
|
* @ch_base_offset: Register offset of DMA channel registers.
|
||||||
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
|
|
||||||
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
|
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
|
||||||
* @ch_req_mask: Mask for Tx or Rx channel select.
|
* @ch_req_mask: Mask for Tx or Rx channel select.
|
||||||
* @ch_req_max: Maximum number of Tx or Rx channels available.
|
* @ch_req_max: Maximum number of Tx or Rx channels available.
|
||||||
* @ch_reg_size: Size of DMA channel register space.
|
* @ch_reg_size: Size of DMA channel register space.
|
||||||
* @nr_channels: Number of DMA channels available.
|
* @nr_channels: Number of DMA channels available.
|
||||||
|
* @ch_fifo_size_mask: Mask for FIFO size field.
|
||||||
|
* @sreq_index_offset: Slave channel index offset.
|
||||||
|
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
|
||||||
*/
|
*/
|
||||||
struct tegra_adma_chip_data {
|
struct tegra_adma_chip_data {
|
||||||
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
|
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
|
||||||
|
@ -97,6 +92,8 @@ struct tegra_adma_chip_data {
|
||||||
unsigned int ch_req_max;
|
unsigned int ch_req_max;
|
||||||
unsigned int ch_reg_size;
|
unsigned int ch_reg_size;
|
||||||
unsigned int nr_channels;
|
unsigned int nr_channels;
|
||||||
|
unsigned int ch_fifo_size_mask;
|
||||||
|
unsigned int sreq_index_offset;
|
||||||
bool has_outstanding_reqs;
|
bool has_outstanding_reqs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -560,13 +557,14 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
|
||||||
{
|
{
|
||||||
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
|
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
|
||||||
const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
|
const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
|
||||||
unsigned int burst_size, adma_dir;
|
unsigned int burst_size, adma_dir, fifo_size_shift;
|
||||||
|
|
||||||
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
|
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
switch (direction) {
|
switch (direction) {
|
||||||
case DMA_MEM_TO_DEV:
|
case DMA_MEM_TO_DEV:
|
||||||
|
fifo_size_shift = ADMA_CH_TX_FIFO_SIZE_SHIFT;
|
||||||
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
|
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
|
||||||
burst_size = tdc->sconfig.dst_maxburst;
|
burst_size = tdc->sconfig.dst_maxburst;
|
||||||
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
|
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
|
||||||
|
@ -577,6 +575,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DMA_DEV_TO_MEM:
|
case DMA_DEV_TO_MEM:
|
||||||
|
fifo_size_shift = ADMA_CH_RX_FIFO_SIZE_SHIFT;
|
||||||
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
|
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
|
||||||
burst_size = tdc->sconfig.src_maxburst;
|
burst_size = tdc->sconfig.src_maxburst;
|
||||||
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
|
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
|
||||||
|
@ -598,7 +597,27 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
|
||||||
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
|
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
|
||||||
if (cdata->has_outstanding_reqs)
|
if (cdata->has_outstanding_reqs)
|
||||||
ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
|
ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
|
||||||
ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
|
|
||||||
|
/*
|
||||||
|
* 'sreq_index' represents the current ADMAIF channel number and as per
|
||||||
|
* HW recommendation its FIFO size should match with the corresponding
|
||||||
|
* ADMA channel.
|
||||||
|
*
|
||||||
|
* ADMA FIFO size is set as per below (based on default ADMAIF channel
|
||||||
|
* FIFO sizes):
|
||||||
|
* fifo_size = 0x2 (sreq_index > sreq_index_offset)
|
||||||
|
* fifo_size = 0x3 (sreq_index <= sreq_index_offset)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
if (tdc->sreq_index > cdata->sreq_index_offset)
|
||||||
|
ch_regs->fifo_ctrl =
|
||||||
|
ADMA_CH_REG_FIELD_VAL(2, cdata->ch_fifo_size_mask,
|
||||||
|
fifo_size_shift);
|
||||||
|
else
|
||||||
|
ch_regs->fifo_ctrl =
|
||||||
|
ADMA_CH_REG_FIELD_VAL(3, cdata->ch_fifo_size_mask,
|
||||||
|
fifo_size_shift);
|
||||||
|
|
||||||
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
|
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
|
||||||
|
|
||||||
return tegra_adma_request_alloc(tdc, direction);
|
return tegra_adma_request_alloc(tdc, direction);
|
||||||
|
@ -782,12 +801,13 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
|
||||||
.ch_req_tx_shift = 28,
|
.ch_req_tx_shift = 28,
|
||||||
.ch_req_rx_shift = 24,
|
.ch_req_rx_shift = 24,
|
||||||
.ch_base_offset = 0,
|
.ch_base_offset = 0,
|
||||||
.has_outstanding_reqs = false,
|
|
||||||
.ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
|
|
||||||
.ch_req_mask = 0xf,
|
.ch_req_mask = 0xf,
|
||||||
.ch_req_max = 10,
|
.ch_req_max = 10,
|
||||||
.ch_reg_size = 0x80,
|
.ch_reg_size = 0x80,
|
||||||
.nr_channels = 22,
|
.nr_channels = 22,
|
||||||
|
.ch_fifo_size_mask = 0xf,
|
||||||
|
.sreq_index_offset = 2,
|
||||||
|
.has_outstanding_reqs = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct tegra_adma_chip_data tegra186_chip_data = {
|
static const struct tegra_adma_chip_data tegra186_chip_data = {
|
||||||
|
@ -797,12 +817,13 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
|
||||||
.ch_req_tx_shift = 27,
|
.ch_req_tx_shift = 27,
|
||||||
.ch_req_rx_shift = 22,
|
.ch_req_rx_shift = 22,
|
||||||
.ch_base_offset = 0x10000,
|
.ch_base_offset = 0x10000,
|
||||||
.has_outstanding_reqs = true,
|
|
||||||
.ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
|
|
||||||
.ch_req_mask = 0x1f,
|
.ch_req_mask = 0x1f,
|
||||||
.ch_req_max = 20,
|
.ch_req_max = 20,
|
||||||
.ch_reg_size = 0x100,
|
.ch_reg_size = 0x100,
|
||||||
.nr_channels = 32,
|
.nr_channels = 32,
|
||||||
|
.ch_fifo_size_mask = 0x1f,
|
||||||
|
.sreq_index_offset = 4,
|
||||||
|
.has_outstanding_reqs = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct of_device_id tegra_adma_of_match[] = {
|
static const struct of_device_id tegra_adma_of_match[] = {
|
||||||
|
@ -867,7 +888,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
pm_runtime_enable(&pdev->dev);
|
pm_runtime_enable(&pdev->dev);
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(&pdev->dev);
|
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto rpm_disable;
|
goto rpm_disable;
|
||||||
|
|
||||||
|
@ -940,7 +961,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
|
||||||
for (i = 0; i < tdma->nr_channels; ++i)
|
for (i = 0; i < tdma->nr_channels; ++i)
|
||||||
irq_dispose_mapping(tdma->channels[i].irq);
|
irq_dispose_mapping(tdma->channels[i].irq);
|
||||||
|
|
||||||
pm_runtime_put_sync(&pdev->dev);
|
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1348,6 +1348,7 @@ static int bcdma_get_bchan(struct udma_chan *uc)
|
||||||
{
|
{
|
||||||
struct udma_dev *ud = uc->ud;
|
struct udma_dev *ud = uc->ud;
|
||||||
enum udma_tp_level tpl;
|
enum udma_tp_level tpl;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (uc->bchan) {
|
if (uc->bchan) {
|
||||||
dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
|
dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
|
||||||
|
@ -1365,8 +1366,11 @@ static int bcdma_get_bchan(struct udma_chan *uc)
|
||||||
tpl = ud->bchan_tpl.levels - 1;
|
tpl = ud->bchan_tpl.levels - 1;
|
||||||
|
|
||||||
uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
|
uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
|
||||||
if (IS_ERR(uc->bchan))
|
if (IS_ERR(uc->bchan)) {
|
||||||
return PTR_ERR(uc->bchan);
|
ret = PTR_ERR(uc->bchan);
|
||||||
|
uc->bchan = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
uc->tchan = uc->bchan;
|
uc->tchan = uc->bchan;
|
||||||
|
|
||||||
|
@ -1376,6 +1380,7 @@ static int bcdma_get_bchan(struct udma_chan *uc)
|
||||||
static int udma_get_tchan(struct udma_chan *uc)
|
static int udma_get_tchan(struct udma_chan *uc)
|
||||||
{
|
{
|
||||||
struct udma_dev *ud = uc->ud;
|
struct udma_dev *ud = uc->ud;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (uc->tchan) {
|
if (uc->tchan) {
|
||||||
dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
|
dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
|
||||||
|
@ -1390,8 +1395,11 @@ static int udma_get_tchan(struct udma_chan *uc)
|
||||||
*/
|
*/
|
||||||
uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
|
uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
|
||||||
uc->config.mapped_channel_id);
|
uc->config.mapped_channel_id);
|
||||||
if (IS_ERR(uc->tchan))
|
if (IS_ERR(uc->tchan)) {
|
||||||
return PTR_ERR(uc->tchan);
|
ret = PTR_ERR(uc->tchan);
|
||||||
|
uc->tchan = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if (ud->tflow_cnt) {
|
if (ud->tflow_cnt) {
|
||||||
int tflow_id;
|
int tflow_id;
|
||||||
|
@ -1421,6 +1429,7 @@ static int udma_get_tchan(struct udma_chan *uc)
|
||||||
static int udma_get_rchan(struct udma_chan *uc)
|
static int udma_get_rchan(struct udma_chan *uc)
|
||||||
{
|
{
|
||||||
struct udma_dev *ud = uc->ud;
|
struct udma_dev *ud = uc->ud;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (uc->rchan) {
|
if (uc->rchan) {
|
||||||
dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
|
dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
|
||||||
|
@ -1435,8 +1444,13 @@ static int udma_get_rchan(struct udma_chan *uc)
|
||||||
*/
|
*/
|
||||||
uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
|
uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
|
||||||
uc->config.mapped_channel_id);
|
uc->config.mapped_channel_id);
|
||||||
|
if (IS_ERR(uc->rchan)) {
|
||||||
|
ret = PTR_ERR(uc->rchan);
|
||||||
|
uc->rchan = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return PTR_ERR_OR_ZERO(uc->rchan);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int udma_get_chan_pair(struct udma_chan *uc)
|
static int udma_get_chan_pair(struct udma_chan *uc)
|
||||||
|
@ -1490,6 +1504,7 @@ static int udma_get_chan_pair(struct udma_chan *uc)
|
||||||
static int udma_get_rflow(struct udma_chan *uc, int flow_id)
|
static int udma_get_rflow(struct udma_chan *uc, int flow_id)
|
||||||
{
|
{
|
||||||
struct udma_dev *ud = uc->ud;
|
struct udma_dev *ud = uc->ud;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!uc->rchan) {
|
if (!uc->rchan) {
|
||||||
dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
|
dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
|
||||||
|
@ -1503,8 +1518,13 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
uc->rflow = __udma_get_rflow(ud, flow_id);
|
uc->rflow = __udma_get_rflow(ud, flow_id);
|
||||||
|
if (IS_ERR(uc->rflow)) {
|
||||||
|
ret = PTR_ERR(uc->rflow);
|
||||||
|
uc->rflow = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return PTR_ERR_OR_ZERO(uc->rflow);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bcdma_put_bchan(struct udma_chan *uc)
|
static void bcdma_put_bchan(struct udma_chan *uc)
|
||||||
|
|
|
@ -792,7 +792,7 @@ static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xilinx_dma_tx_descriptor - Allocate transaction descriptor
|
* xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
|
||||||
* @chan: Driver specific DMA channel
|
* @chan: Driver specific DMA channel
|
||||||
*
|
*
|
||||||
* Return: The allocated descriptor on success and NULL on failure.
|
* Return: The allocated descriptor on success and NULL on failure.
|
||||||
|
@ -998,14 +998,12 @@ static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
|
||||||
struct xilinx_dma_tx_descriptor *desc,
|
struct xilinx_dma_tx_descriptor *desc,
|
||||||
unsigned long *flags)
|
unsigned long *flags)
|
||||||
{
|
{
|
||||||
dma_async_tx_callback callback;
|
struct dmaengine_desc_callback cb;
|
||||||
void *callback_param;
|
|
||||||
|
|
||||||
callback = desc->async_tx.callback;
|
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||||
callback_param = desc->async_tx.callback_param;
|
if (dmaengine_desc_callback_valid(&cb)) {
|
||||||
if (callback) {
|
|
||||||
spin_unlock_irqrestore(&chan->lock, *flags);
|
spin_unlock_irqrestore(&chan->lock, *flags);
|
||||||
callback(callback_param);
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||||
spin_lock_irqsave(&chan->lock, *flags);
|
spin_lock_irqsave(&chan->lock, *flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2483,7 +2481,7 @@ static void xilinx_dma_synchronize(struct dma_chan *dchan)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xilinx_dma_channel_set_config - Configure VDMA channel
|
* xilinx_vdma_channel_set_config - Configure VDMA channel
|
||||||
* Run-time configuration for Axi VDMA, supports:
|
* Run-time configuration for Axi VDMA, supports:
|
||||||
* . halt the channel
|
* . halt the channel
|
||||||
* . configure interrupt coalescing and inter-packet delay threshold
|
* . configure interrupt coalescing and inter-packet delay threshold
|
||||||
|
|
|
@ -271,9 +271,6 @@ struct xilinx_dpdma_device {
|
||||||
/* -----------------------------------------------------------------------------
|
/* -----------------------------------------------------------------------------
|
||||||
* DebugFS
|
* DebugFS
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
|
||||||
|
|
||||||
#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
|
#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
|
||||||
#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
|
#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
|
||||||
|
|
||||||
|
@ -299,7 +296,7 @@ struct xilinx_dpdma_debugfs_request {
|
||||||
|
|
||||||
static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
|
static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
|
||||||
{
|
{
|
||||||
if (chan->id == dpdma_debugfs.chan_id)
|
if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id)
|
||||||
dpdma_debugfs.xilinx_dpdma_irq_done_count++;
|
dpdma_debugfs.xilinx_dpdma_irq_done_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -462,16 +459,6 @@ static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
|
||||||
dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
|
dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
|
||||||
static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_DEBUG_FS */
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------------------------
|
/* -----------------------------------------------------------------------------
|
||||||
* I/O Accessors
|
* I/O Accessors
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -6,15 +6,12 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/dmapool.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/dma/xilinx_dma.h>
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of_address.h>
|
|
||||||
#include <linux/of_dma.h>
|
#include <linux/of_dma.h>
|
||||||
#include <linux/of_irq.h>
|
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
|
@ -603,22 +600,25 @@ static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
|
||||||
static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
|
static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct zynqmp_dma_desc_sw *desc, *next;
|
struct zynqmp_dma_desc_sw *desc, *next;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
|
|
||||||
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
|
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
|
||||||
dma_async_tx_callback callback;
|
struct dmaengine_desc_callback cb;
|
||||||
void *callback_param;
|
|
||||||
|
|
||||||
callback = desc->async_tx.callback;
|
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||||
callback_param = desc->async_tx.callback_param;
|
if (dmaengine_desc_callback_valid(&cb)) {
|
||||||
if (callback) {
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
spin_unlock(&chan->lock);
|
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||||
callback(callback_param);
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
spin_lock(&chan->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Run any dependencies, then free the descriptor */
|
/* Run any dependencies, then free the descriptor */
|
||||||
zynqmp_dma_free_descriptor(chan, desc);
|
zynqmp_dma_free_descriptor(chan, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -658,9 +658,13 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
|
||||||
*/
|
*/
|
||||||
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
|
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
|
||||||
{
|
{
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
zynqmp_dma_free_desc_list(chan, &chan->active_list);
|
zynqmp_dma_free_desc_list(chan, &chan->active_list);
|
||||||
zynqmp_dma_free_desc_list(chan, &chan->pending_list);
|
zynqmp_dma_free_desc_list(chan, &chan->pending_list);
|
||||||
zynqmp_dma_free_desc_list(chan, &chan->done_list);
|
zynqmp_dma_free_desc_list(chan, &chan->done_list);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -670,11 +674,8 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
|
||||||
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
|
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, irqflags);
|
|
||||||
zynqmp_dma_free_descriptors(chan);
|
zynqmp_dma_free_descriptors(chan);
|
||||||
spin_unlock_irqrestore(&chan->lock, irqflags);
|
|
||||||
dma_free_coherent(chan->dev,
|
dma_free_coherent(chan->dev,
|
||||||
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
|
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
|
||||||
chan->desc_pool_v, chan->desc_pool_p);
|
chan->desc_pool_v, chan->desc_pool_p);
|
||||||
|
@ -689,11 +690,16 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
|
||||||
*/
|
*/
|
||||||
static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
|
static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
|
||||||
{
|
{
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
zynqmp_dma_complete_descriptor(chan);
|
zynqmp_dma_complete_descriptor(chan);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
zynqmp_dma_chan_desc_cleanup(chan);
|
zynqmp_dma_chan_desc_cleanup(chan);
|
||||||
zynqmp_dma_free_descriptors(chan);
|
zynqmp_dma_free_descriptors(chan);
|
||||||
|
|
||||||
zynqmp_dma_init(chan);
|
zynqmp_dma_init(chan);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -749,27 +755,27 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
|
||||||
u32 count;
|
u32 count;
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, irqflags);
|
|
||||||
|
|
||||||
if (chan->err) {
|
if (chan->err) {
|
||||||
zynqmp_dma_reset(chan);
|
zynqmp_dma_reset(chan);
|
||||||
chan->err = false;
|
chan->err = false;
|
||||||
goto unlock;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
|
count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
|
||||||
|
|
||||||
while (count) {
|
while (count) {
|
||||||
zynqmp_dma_complete_descriptor(chan);
|
zynqmp_dma_complete_descriptor(chan);
|
||||||
zynqmp_dma_chan_desc_cleanup(chan);
|
|
||||||
count--;
|
count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chan->idle)
|
|
||||||
zynqmp_dma_start_transfer(chan);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
spin_unlock_irqrestore(&chan->lock, irqflags);
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
|
|
||||||
|
zynqmp_dma_chan_desc_cleanup(chan);
|
||||||
|
|
||||||
|
if (chan->idle) {
|
||||||
|
spin_lock_irqsave(&chan->lock, irqflags);
|
||||||
|
zynqmp_dma_start_transfer(chan);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, irqflags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -781,12 +787,9 @@ unlock:
|
||||||
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
|
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
struct zynqmp_dma_chan *chan = to_chan(dchan);
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, irqflags);
|
|
||||||
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
|
||||||
zynqmp_dma_free_descriptors(chan);
|
zynqmp_dma_free_descriptors(chan);
|
||||||
spin_unlock_irqrestore(&chan->lock, irqflags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1061,16 +1064,14 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
||||||
p->dev = &pdev->dev;
|
p->dev = &pdev->dev;
|
||||||
|
|
||||||
zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
|
zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
|
||||||
if (IS_ERR(zdev->clk_main)) {
|
if (IS_ERR(zdev->clk_main))
|
||||||
dev_err(&pdev->dev, "main clock not found.\n");
|
return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main),
|
||||||
return PTR_ERR(zdev->clk_main);
|
"main clock not found.\n");
|
||||||
}
|
|
||||||
|
|
||||||
zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
|
zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
|
||||||
if (IS_ERR(zdev->clk_apb)) {
|
if (IS_ERR(zdev->clk_apb))
|
||||||
dev_err(&pdev->dev, "apb clock not found.\n");
|
return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb),
|
||||||
return PTR_ERR(zdev->clk_apb);
|
"apb clock not found.\n");
|
||||||
}
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, zdev);
|
platform_set_drvdata(pdev, zdev);
|
||||||
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
|
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
|
||||||
|
@ -1085,7 +1086,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
ret = zynqmp_dma_chan_probe(zdev, pdev);
|
ret = zynqmp_dma_chan_probe(zdev, pdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "Probing channel failed\n");
|
dev_err_probe(&pdev->dev, ret, "Probing channel failed\n");
|
||||||
goto err_disable_pm;
|
goto err_disable_pm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1097,7 +1098,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
||||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
ret = of_dma_controller_register(pdev->dev.of_node,
|
||||||
of_zynqmp_dma_xlate, zdev);
|
of_zynqmp_dma_xlate, zdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "Unable to register DMA to DT\n");
|
dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n");
|
||||||
dma_async_device_unregister(&zdev->common);
|
dma_async_device_unregister(&zdev->common);
|
||||||
goto free_chan_resources;
|
goto free_chan_resources;
|
||||||
}
|
}
|
||||||
|
@ -1105,8 +1106,6 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
||||||
pm_runtime_mark_last_busy(zdev->dev);
|
pm_runtime_mark_last_busy(zdev->dev);
|
||||||
pm_runtime_put_sync_autosuspend(zdev->dev);
|
pm_runtime_put_sync_autosuspend(zdev->dev);
|
||||||
|
|
||||||
dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_chan_resources:
|
free_chan_resources:
|
||||||
|
|
|
@ -944,10 +944,8 @@ struct dma_device {
|
||||||
void (*device_issue_pending)(struct dma_chan *chan);
|
void (*device_issue_pending)(struct dma_chan *chan);
|
||||||
void (*device_release)(struct dma_device *dev);
|
void (*device_release)(struct dma_device *dev);
|
||||||
/* debugfs support */
|
/* debugfs support */
|
||||||
#ifdef CONFIG_DEBUG_FS
|
|
||||||
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
|
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
|
||||||
struct dentry *dbg_dev_root;
|
struct dentry *dbg_dev_root;
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int dmaengine_slave_config(struct dma_chan *chan,
|
static inline int dmaengine_slave_config(struct dma_chan *chan,
|
||||||
|
|
Loading…
Reference in New Issue