dmaengine updates for v5.7-rc1
- Core: - Some code cleanup and optimization in core by Andy - Debugfs support for displaying dmaengine channels by Peter - Drivers: - New driver for uniphier-xdmac controller - Updates to stm32 dma, mdma and dmamux drivers and PM support - More updates to idxd drivers - Bunch of changes in tegra-apb driver and cleaning up of pm functions - Bunch of spelling fixes and Replace zero-length array patches - Shutdown hook for fsl-dpaa2-qdma driver - Support for interleaved transfers for ti-edma and virtualization support for k3-dma driver - Support for reset and updates in xilinx_dma driver - Improvements and locking updates in at_hdma driver -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAl6FyokACgkQfBQHDyUj g0dnFRAAj9lvpflrL+b9eWBZkY1ElV1jAdxsTs4HnYdXQM3ijw8yOosVVSqiuiOy 2qMfSRTP7qU9gqZ7oa1fnh05DqPmuTc3OF2IZlvGzkU9CiGQ735WGGGG8FfK/dZe F4OgQGwA45b47hNIbvM4acwWZYPL+pBuYusKdjdHkouqVM4SORiNM8aRrCJ59xIn P9TER//sMpdMEASuRuUIQnXb+OzSNPn1mLiP3zT0XHSM/nBMTAm7AnCDNT/Tjs9f hwk2j8rLrwllHGqeZln8cWLhUCPrZFNe5pBWtWyV3MyY/nxlrcUX0ndJUGJIDtsb nfXc4QKemOeF1RsC8DsQ/AY8jl6HFvRzWEEkq742IrLPCu/nTnxia4dbXW9MJ0Dp BI7IPwoaOoYqBdRkBnSVS2F4x3813egsEReznlu/sUorTIG2g9sWtmuzv6eRt4ow HczGgfdJXfCvIKbRg5TIXpbaJogbbB+1YrUlWq9vrZyhVw0ULtfxlWVKDy5VI1cL 0Kiz/ZIGuoQ9h6E4G3jCpaQTV49tNbYp+vimU9kizmcm+WXrTXR7rgD4AI5tH2DQ pxYXNEl4gm1NRtWL1zzJ+B1C0MPXpc1Xafl92W39D6rphEGOdVVzay8meVIaQKDU qQaZ1dEK4uuSxwj8NrF7sXHSClafF888FFJBEMArde1HVql/HRU= =+UJ7 -----END PGP SIGNATURE----- Merge tag 'dmaengine-5.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine updates from Vinod Koul: "Core: - Some code cleanup and optimization in core by Andy - Debugfs support for displaying dmaengine channels by Peter Drivers: - New driver for uniphier-xdmac controller - Updates to stm32 dma, mdma and dmamux drivers and PM support - More updates to idxd drivers - Bunch of changes in tegra-apb driver and cleaning up of pm functions - Bunch of spelling fixes and Replace zero-length array patches - Shutdown hook for fsl-dpaa2-qdma driver - Support for interleaved transfers for ti-edma and virtualization support for k3-dma driver - Support for reset and updates in xilinx_dma driver - Improvements and locking updates in at_hdma driver" * tag 'dmaengine-5.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (89 commits) dt-bindings: dma: renesas,usb-dmac: add r8a77961 support dmaengine: uniphier-xdmac: Remove redandant error log for platform_get_irq dmaengine: tegra-apb: Improve DMA synchronization dmaengine: tegra-apb: Don't save/restore IRQ flags in interrupt handler dmaengine: tegra-apb: mark PM functions as __maybe_unused dmaengine: fix spelling mistake "exceds" -> "exceeds" dmaengine: sprd: Set request pending flag when DMA controller is active dmaengine: ppc4xx: Use scnprintf() for avoiding potential buffer overflow dmaengine: idxd: remove global token limit check dmaengine: idxd: reflect shadow copy of traffic class programming dmaengine: idxd: Merge definition of dsa_batch_desc into dsa_hw_desc dmaengine: Create debug directories for DMA devices dmaengine: ti: k3-udma: Implement custom dbg_summary_show for debugfs dmaengine: Add basic debugfs support dmaengine: fsl-dpaa2-qdma: remove set but not used variable 'dpaa2_qdma' dmaengine: ti: edma: fix null dereference because of a typo in pointer name dmaengine: fsl-dpaa2-qdma: Adding shutdown hook dmaengine: uniphier-xdmac: Add UniPhier external DMA controller driver dt-bindings: dmaengine: Add UniPhier external DMA controller bindings dmaengine: ti: k3-udma: Implement support for atype (for virtualization) ...
This commit is contained in:
commit
e964f1e04a
|
@ -16,6 +16,7 @@ Required Properties:
|
|||
- "renesas,r8a7794-usb-dmac" (R-Car E2)
|
||||
- "renesas,r8a7795-usb-dmac" (R-Car H3)
|
||||
- "renesas,r8a7796-usb-dmac" (R-Car M3-W)
|
||||
- "renesas,r8a77961-usb-dmac" (R-Car M3-W+)
|
||||
- "renesas,r8a77965-usb-dmac" (R-Car M3-N)
|
||||
- "renesas,r8a77990-usb-dmac" (R-Car E3)
|
||||
- "renesas,r8a77995-usb-dmac" (R-Car D3)
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/dma/socionext,uniphier-xdmac.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Socionext UniPhier external DMA controller
|
||||
|
||||
description: |
|
||||
This describes the devicetree bindings for an external DMA engine to perform
|
||||
memory-to-memory or peripheral-to-memory data transfer capable of supporting
|
||||
16 channels, implemented in Socionext UniPhier SoCs.
|
||||
|
||||
maintainers:
|
||||
- Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
|
||||
|
||||
allOf:
|
||||
- $ref: "dma-controller.yaml#"
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: socionext,uniphier-xdmac
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: XDMAC base register region (offset and length)
|
||||
- description: XDMAC extension register region (offset and length)
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
"#dma-cells":
|
||||
const: 2
|
||||
description: |
|
||||
DMA request from clients consists of 2 cells:
|
||||
1. Channel index
|
||||
2. Transfer request factor number, If no transfer factor, use 0.
|
||||
The number is SoC-specific, and this should be specified with
|
||||
relation to the device to use the DMA controller.
|
||||
|
||||
dma-channels:
|
||||
minimum: 1
|
||||
maximum: 16
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- "#dma-cells"
|
||||
|
||||
examples:
|
||||
- |
|
||||
xdmac: dma-controller@5fc10000 {
|
||||
compatible = "socionext,uniphier-xdmac";
|
||||
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
|
||||
interrupts = <0 188 4>;
|
||||
#dma-cells = <2>;
|
||||
dma-channels = <16>;
|
||||
};
|
||||
|
||||
...
|
|
@ -180,7 +180,7 @@ edma1_tptc0: tptc@27b0000 {
|
|||
};
|
||||
|
||||
edma1_tptc1: tptc@27b8000 {
|
||||
compatible = "ti, k2g-edma3-tptc", "ti,edma3-tptc";
|
||||
compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc";
|
||||
reg = <0x027b8000 0x400>;
|
||||
power-domains = <&k2g_pds 0x4f>;
|
||||
};
|
||||
|
|
|
@ -45,7 +45,8 @@ allOf:
|
|||
|
||||
properties:
|
||||
"#dma-cells":
|
||||
const: 1
|
||||
minimum: 1
|
||||
maximum: 2
|
||||
description: |
|
||||
The cell is the PSI-L thread ID of the remote (to UDMAP) end.
|
||||
Valid ranges for thread ID depends on the data movement direction:
|
||||
|
@ -55,6 +56,8 @@ properties:
|
|||
Please refer to the device documentation for the PSI-L thread map and also
|
||||
the PSI-L peripheral chapter for the correct thread ID.
|
||||
|
||||
When #dma-cells is 2, the second parameter is the channel ATYPE.
|
||||
|
||||
compatible:
|
||||
enum:
|
||||
- ti,am654-navss-main-udmap
|
||||
|
@ -131,6 +134,20 @@ required:
|
|||
- ti,sci-rm-range-rchan
|
||||
- ti,sci-rm-range-rflow
|
||||
|
||||
if:
|
||||
properties:
|
||||
"#dma-cells":
|
||||
const: 2
|
||||
then:
|
||||
properties:
|
||||
ti,udma-atype:
|
||||
description: ATYPE value which should be used by non slave channels
|
||||
allOf:
|
||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||
|
||||
required:
|
||||
- ti,udma-atype
|
||||
|
||||
examples:
|
||||
- |+
|
||||
cbass_main {
|
||||
|
|
|
@ -616,8 +616,8 @@ config TXX9_DMAC
|
|||
integrated in chips such as the Toshiba TX4927/38/39.
|
||||
|
||||
config TEGRA20_APB_DMA
|
||||
bool "NVIDIA Tegra20 APB DMA support"
|
||||
depends on ARCH_TEGRA
|
||||
tristate "NVIDIA Tegra20 APB DMA support"
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support for the NVIDIA Tegra20 APB DMA controller driver. The
|
||||
|
@ -658,6 +658,17 @@ config UNIPHIER_MDMAC
|
|||
UniPhier platform. This DMA controller is used as the external
|
||||
DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs.
|
||||
|
||||
config UNIPHIER_XDMAC
|
||||
tristate "UniPhier XDMAC support"
|
||||
depends on ARCH_UNIPHIER || COMPILE_TEST
|
||||
depends on OF
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Enable support for the XDMAC (external DMA controller) on the
|
||||
UniPhier platform. This DMA controller can transfer data from
|
||||
memory to memory, memory to peripheral and peripheral to memory.
|
||||
|
||||
config XGENE_DMA
|
||||
tristate "APM X-Gene DMA support"
|
||||
depends on ARCH_XGENE || COMPILE_TEST
|
||||
|
|
|
@ -78,6 +78,7 @@ obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
|
|||
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
|
||||
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
||||
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
|
||||
obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
|
||||
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
|
||||
obj-$(CONFIG_ZX_DMA) += zx_dma.o
|
||||
obj-$(CONFIG_ST_FDMA) += st_fdma.o
|
||||
|
|
|
@ -146,17 +146,8 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
|
|||
"scanned %u descriptors on freelist\n", i);
|
||||
|
||||
/* no more descriptor available in initial pool: create one more */
|
||||
if (!ret) {
|
||||
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
|
||||
if (ret) {
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
atchan->descs_allocated++;
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
} else {
|
||||
dev_err(chan2dev(&atchan->chan_common),
|
||||
"not enough descriptors available\n");
|
||||
}
|
||||
}
|
||||
if (!ret)
|
||||
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -435,17 +426,19 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
|||
* atc_chain_complete - finish work for one transaction chain
|
||||
* @atchan: channel we work on
|
||||
* @desc: descriptor at the head of the chain we want do complete
|
||||
*
|
||||
* Called with atchan->lock held and bh disabled */
|
||||
*/
|
||||
static void
|
||||
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common),
|
||||
"descriptor %u complete\n", txd->cookie);
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
|
||||
/* mark the descriptor as complete for non cyclic cases only */
|
||||
if (!atc_chan_is_cyclic(atchan))
|
||||
dma_cookie_complete(txd);
|
||||
|
@ -462,16 +455,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
|||
/* move myself to free_list */
|
||||
list_move(&desc->desc_node, &atchan->free_list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
/* for cyclic transfers,
|
||||
* no need to replay callback function while stopping */
|
||||
if (!atc_chan_is_cyclic(atchan)) {
|
||||
/*
|
||||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (!atc_chan_is_cyclic(atchan))
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
}
|
||||
|
@ -489,9 +479,12 @@ static void atc_complete_all(struct at_dma_chan *atchan)
|
|||
{
|
||||
struct at_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
|
||||
/*
|
||||
* Submit queued descriptors ASAP, i.e. before we go through
|
||||
* the completed ones.
|
||||
|
@ -503,6 +496,8 @@ static void atc_complete_all(struct at_dma_chan *atchan)
|
|||
/* empty queue list by moving descriptors (if any) to active_list */
|
||||
list_splice_init(&atchan->queue, &atchan->active_list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
}
|
||||
|
@ -510,38 +505,44 @@ static void atc_complete_all(struct at_dma_chan *atchan)
|
|||
/**
|
||||
* atc_advance_work - at the end of a transaction, move forward
|
||||
* @atchan: channel where the transaction ended
|
||||
*
|
||||
* Called with atchan->lock held and bh disabled
|
||||
*/
|
||||
static void atc_advance_work(struct at_dma_chan *atchan)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
|
||||
|
||||
if (atc_chan_is_enabled(atchan))
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
ret = atc_chan_is_enabled(atchan);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
if (list_empty(&atchan->active_list) ||
|
||||
list_is_singular(&atchan->active_list)) {
|
||||
atc_complete_all(atchan);
|
||||
} else {
|
||||
atc_chain_complete(atchan, atc_first_active(atchan));
|
||||
/* advance work */
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
}
|
||||
list_is_singular(&atchan->active_list))
|
||||
return atc_complete_all(atchan);
|
||||
|
||||
atc_chain_complete(atchan, atc_first_active(atchan));
|
||||
|
||||
/* advance work */
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* atc_handle_error - handle errors reported by DMA controller
|
||||
* @atchan: channel where error occurs
|
||||
*
|
||||
* Called with atchan->lock held and bh disabled
|
||||
*/
|
||||
static void atc_handle_error(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *bad_desc;
|
||||
struct at_desc *child;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
/*
|
||||
* The descriptor currently at the head of the active list is
|
||||
* broked. Since we don't have any way to report errors, we'll
|
||||
|
@ -573,6 +574,8 @@ static void atc_handle_error(struct at_dma_chan *atchan)
|
|||
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
||||
atc_dump_lli(atchan, &child->lli);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
/* Pretend the descriptor completed successfully */
|
||||
atc_chain_complete(atchan, bad_desc);
|
||||
}
|
||||
|
@ -580,8 +583,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
|
|||
/**
|
||||
* atc_handle_cyclic - at the end of a period, run callback function
|
||||
* @atchan: channel used for cyclic operations
|
||||
*
|
||||
* Called with atchan->lock held and bh disabled
|
||||
*/
|
||||
static void atc_handle_cyclic(struct at_dma_chan *atchan)
|
||||
{
|
||||
|
@ -600,17 +601,14 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
|
|||
static void atc_tasklet(unsigned long data)
|
||||
{
|
||||
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
|
||||
atc_handle_error(atchan);
|
||||
else if (atc_chan_is_cyclic(atchan))
|
||||
atc_handle_cyclic(atchan);
|
||||
else
|
||||
atc_advance_work(atchan);
|
||||
return atc_handle_error(atchan);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
if (atc_chan_is_cyclic(atchan))
|
||||
return atc_handle_cyclic(atchan);
|
||||
|
||||
atc_advance_work(atchan);
|
||||
}
|
||||
|
||||
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
|
||||
|
@ -940,7 +938,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
|
||||
vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
|
||||
if (!vaddr) {
|
||||
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
|
||||
__func__);
|
||||
|
@ -998,7 +996,7 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
|
||||
vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
|
||||
if (!vaddr) {
|
||||
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
|
||||
__func__);
|
||||
|
@ -1446,6 +1444,8 @@ static int atc_terminate_all(struct dma_chan *chan)
|
|||
list_splice_init(&atchan->queue, &list);
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
|
@ -1454,8 +1454,6 @@ static int atc_terminate_all(struct dma_chan *chan)
|
|||
/* if channel dedicated to cyclic operations, free it */
|
||||
clear_bit(ATC_IS_CYCLIC, &atchan->status);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1516,7 +1514,6 @@ atc_tx_status(struct dma_chan *chan,
|
|||
static void atc_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "issue_pending\n");
|
||||
|
||||
|
@ -1524,15 +1521,12 @@ static void atc_issue_pending(struct dma_chan *chan)
|
|||
if (atc_chan_is_cyclic(atchan))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
atc_advance_work(atchan);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_alloc_chan_resources - allocate resources for DMA channel
|
||||
* @chan: allocate descriptor resources for this channel
|
||||
* @client: current client requesting the channel be ready for requests
|
||||
*
|
||||
* return - the number of allocated descriptors
|
||||
*/
|
||||
|
@ -1542,10 +1536,8 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
|
|||
struct at_dma *atdma = to_at_dma(chan->device);
|
||||
struct at_desc *desc;
|
||||
struct at_dma_slave *atslave;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
u32 cfg;
|
||||
LIST_HEAD(tmp_list);
|
||||
|
||||
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
|
||||
|
||||
|
@ -1555,6 +1547,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
if (!list_empty(&atchan->free_list)) {
|
||||
dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
cfg = ATC_DEFAULT_CFG;
|
||||
|
||||
atslave = chan->private;
|
||||
|
@ -1570,11 +1567,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
|
|||
cfg = atslave->cfg;
|
||||
}
|
||||
|
||||
/* have we already been set up?
|
||||
* reconfigure channel but no need to reallocate descriptors */
|
||||
if (!list_empty(&atchan->free_list))
|
||||
return atchan->descs_allocated;
|
||||
|
||||
/* Allocate initial pool of descriptors */
|
||||
for (i = 0; i < init_nr_desc_per_channel; i++) {
|
||||
desc = atc_alloc_descriptor(chan, GFP_KERNEL);
|
||||
|
@ -1583,23 +1575,18 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
|
|||
"Only %d initial descriptors\n", i);
|
||||
break;
|
||||
}
|
||||
list_add_tail(&desc->desc_node, &tmp_list);
|
||||
list_add_tail(&desc->desc_node, &atchan->free_list);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
atchan->descs_allocated = i;
|
||||
list_splice(&tmp_list, &atchan->free_list);
|
||||
dma_cookie_init(chan);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
/* channel parameters */
|
||||
channel_writel(atchan, CFG, cfg);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"alloc_chan_resources: allocated %d descriptors\n",
|
||||
atchan->descs_allocated);
|
||||
"alloc_chan_resources: allocated %d descriptors\n", i);
|
||||
|
||||
return atchan->descs_allocated;
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1613,9 +1600,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
|
|||
struct at_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
|
||||
dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
|
||||
atchan->descs_allocated);
|
||||
|
||||
/* ASSERT: channel is idle */
|
||||
BUG_ON(!list_empty(&atchan->active_list));
|
||||
BUG_ON(!list_empty(&atchan->queue));
|
||||
|
@ -1628,7 +1612,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
|
|||
dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
|
||||
}
|
||||
list_splice_init(&atchan->free_list, &list);
|
||||
atchan->descs_allocated = 0;
|
||||
atchan->status = 0;
|
||||
|
||||
/*
|
||||
|
@ -1671,7 +1654,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
|
|||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
|
||||
atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
|
||||
if (!atslave)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -243,7 +243,6 @@ enum atc_status {
|
|||
* @active_list: list of descriptors dmaengine is being running on
|
||||
* @queue: list of descriptors ready to be submitted to engine
|
||||
* @free_list: list of descriptors usable by the channel
|
||||
* @descs_allocated: records the actual size of the descriptor pool
|
||||
*/
|
||||
struct at_dma_chan {
|
||||
struct dma_chan chan_common;
|
||||
|
@ -264,7 +263,6 @@ struct at_dma_chan {
|
|||
struct list_head active_list;
|
||||
struct list_head queue;
|
||||
struct list_head free_list;
|
||||
unsigned int descs_allocated;
|
||||
};
|
||||
|
||||
#define channel_readl(atchan, name) \
|
||||
|
|
|
@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
|
|||
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
|
||||
{
|
||||
struct at_xdmac_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
|
||||
/*
|
||||
* If channel is enabled, do nothing, advance_work will be triggered
|
||||
|
@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
|
|||
if (!desc->active_xfer)
|
||||
at_xdmac_start_xfer(atchan, desc);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
||||
|
@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
|||
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
|
||||
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
|
||||
|
||||
spin_lock_bh(&atchan->lock);
|
||||
spin_lock_irq(&atchan->lock);
|
||||
|
||||
/* Channel must be disabled first as it's not done automatically */
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
|
||||
|
@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
|
|||
struct at_xdmac_desc,
|
||||
xfer_node);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
|
||||
/* Print bad descriptor's details if needed */
|
||||
dev_dbg(chan2dev(&atchan->chan),
|
||||
|
@ -1640,31 +1635,31 @@ static void at_xdmac_tasklet(unsigned long data)
|
|||
if (atchan->irq_status & error_mask)
|
||||
at_xdmac_handle_error(atchan);
|
||||
|
||||
spin_lock(&atchan->lock);
|
||||
spin_lock_irq(&atchan->lock);
|
||||
desc = list_first_entry(&atchan->xfers_list,
|
||||
struct at_xdmac_desc,
|
||||
xfer_node);
|
||||
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
|
||||
if (!desc->active_xfer) {
|
||||
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
|
||||
spin_unlock(&atchan->lock);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
txd = &desc->tx_dma_desc;
|
||||
|
||||
at_xdmac_remove_xfer(atchan, desc);
|
||||
spin_unlock(&atchan->lock);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
|
||||
if (!at_xdmac_chan_is_cyclic(atchan)) {
|
||||
dma_cookie_complete(txd);
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
dma_cookie_complete(txd);
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
|
||||
spin_lock_irq(&atchan->lock);
|
||||
at_xdmac_advance_work(atchan);
|
||||
spin_unlock_irq(&atchan->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1725,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
|
|||
static void at_xdmac_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
|
||||
|
||||
if (!at_xdmac_chan_is_cyclic(atchan))
|
||||
if (!at_xdmac_chan_is_cyclic(atchan)) {
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
at_xdmac_advance_work(atchan);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -1822,26 +1821,21 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
|
|||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
struct at_xdmac_desc *desc;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
|
||||
if (at_xdmac_chan_is_enabled(atchan)) {
|
||||
dev_err(chan2dev(chan),
|
||||
"can't allocate channel resources (channel enabled)\n");
|
||||
i = -EIO;
|
||||
goto spin_unlock;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!list_empty(&atchan->free_descs_list)) {
|
||||
dev_err(chan2dev(chan),
|
||||
"can't allocate channel resources (channel not free from a previous use)\n");
|
||||
i = -EIO;
|
||||
goto spin_unlock;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
for (i = 0; i < init_nr_desc_per_channel; i++) {
|
||||
desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
|
||||
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
|
||||
if (!desc) {
|
||||
dev_warn(chan2dev(chan),
|
||||
"only %d descriptors have been allocated\n", i);
|
||||
|
@ -1854,8 +1848,6 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
|
|||
|
||||
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
|
||||
|
||||
spin_unlock:
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ struct sba_request {
|
|||
struct brcm_message msg;
|
||||
struct dma_async_tx_descriptor tx;
|
||||
/* SBA commands */
|
||||
struct brcm_sba_command cmds[0];
|
||||
struct brcm_sba_command cmds[];
|
||||
};
|
||||
|
||||
enum sba_version {
|
||||
|
|
|
@ -58,6 +58,87 @@ static DEFINE_IDA(dma_ida);
|
|||
static LIST_HEAD(dma_device_list);
|
||||
static long dmaengine_ref_count;
|
||||
|
||||
/* --- debugfs implementation --- */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
static struct dentry *rootdir;
|
||||
|
||||
static void dmaengine_debug_register(struct dma_device *dma_dev)
|
||||
{
|
||||
dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
|
||||
rootdir);
|
||||
if (IS_ERR(dma_dev->dbg_dev_root))
|
||||
dma_dev->dbg_dev_root = NULL;
|
||||
}
|
||||
|
||||
static void dmaengine_debug_unregister(struct dma_device *dma_dev)
|
||||
{
|
||||
debugfs_remove_recursive(dma_dev->dbg_dev_root);
|
||||
dma_dev->dbg_dev_root = NULL;
|
||||
}
|
||||
|
||||
static void dmaengine_dbg_summary_show(struct seq_file *s,
|
||||
struct dma_device *dma_dev)
|
||||
{
|
||||
struct dma_chan *chan;
|
||||
|
||||
list_for_each_entry(chan, &dma_dev->channels, device_node) {
|
||||
if (chan->client_count) {
|
||||
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
|
||||
chan->dbg_client_name ?: "in-use");
|
||||
|
||||
if (chan->router)
|
||||
seq_printf(s, " (via router: %s)\n",
|
||||
dev_name(chan->router->dev));
|
||||
else
|
||||
seq_puts(s, "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int dmaengine_summary_show(struct seq_file *s, void *data)
|
||||
{
|
||||
struct dma_device *dma_dev = NULL;
|
||||
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_for_each_entry(dma_dev, &dma_device_list, global_node) {
|
||||
seq_printf(s, "dma%d (%s): number of channels: %u\n",
|
||||
dma_dev->dev_id, dev_name(dma_dev->dev),
|
||||
dma_dev->chancnt);
|
||||
|
||||
if (dma_dev->dbg_summary_show)
|
||||
dma_dev->dbg_summary_show(s, dma_dev);
|
||||
else
|
||||
dmaengine_dbg_summary_show(s, dma_dev);
|
||||
|
||||
if (!list_is_last(&dma_dev->global_node, &dma_device_list))
|
||||
seq_puts(s, "\n");
|
||||
}
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
|
||||
|
||||
static void __init dmaengine_debugfs_init(void)
|
||||
{
|
||||
rootdir = debugfs_create_dir("dmaengine", NULL);
|
||||
|
||||
/* /sys/kernel/debug/dmaengine/summary */
|
||||
debugfs_create_file("summary", 0444, rootdir, NULL,
|
||||
&dmaengine_summary_fops);
|
||||
}
|
||||
#else
|
||||
static inline void dmaengine_debugfs_init(void) { }
|
||||
static inline int dmaengine_debug_register(struct dma_device *dma_dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
|
||||
#endif /* DEBUG_FS */
|
||||
|
||||
/* --- sysfs implementation --- */
|
||||
|
||||
#define DMA_SLAVE_NAME "slave"
|
||||
|
@ -760,6 +841,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
|
|||
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
found:
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
|
||||
name);
|
||||
#endif
|
||||
|
||||
chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
|
||||
if (!chan->name)
|
||||
return chan;
|
||||
|
@ -837,6 +923,11 @@ void dma_release_channel(struct dma_chan *chan)
|
|||
chan->name = NULL;
|
||||
chan->slave = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
kfree(chan->dbg_client_name);
|
||||
chan->dbg_client_name = NULL;
|
||||
#endif
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_release_channel);
|
||||
|
@ -1196,6 +1287,8 @@ int dma_async_device_register(struct dma_device *device)
|
|||
dma_channel_rebalance();
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
dmaengine_debug_register(device);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
|
@ -1229,6 +1322,8 @@ void dma_async_device_unregister(struct dma_device *device)
|
|||
{
|
||||
struct dma_chan *chan, *n;
|
||||
|
||||
dmaengine_debug_unregister(device);
|
||||
|
||||
list_for_each_entry_safe(chan, n, &device->channels, device_node)
|
||||
__dma_async_device_channel_unregister(device, chan);
|
||||
|
||||
|
@ -1559,6 +1654,11 @@ static int __init dma_bus_init(void)
|
|||
|
||||
if (err)
|
||||
return err;
|
||||
return class_register(&dma_devclass);
|
||||
|
||||
err = class_register(&dma_devclass);
|
||||
if (!err)
|
||||
dmaengine_debugfs_init();
|
||||
|
||||
return err;
|
||||
}
|
||||
arch_initcall(dma_bus_init);
|
||||
|
|
|
@ -182,4 +182,20 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
|
|||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
||||
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
static inline struct dentry *
|
||||
dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
|
||||
return dma_dev->dbg_dev_root;
|
||||
}
|
||||
#else
|
||||
struct dentry;
|
||||
static inline struct dentry *
|
||||
dmaengine_get_debugfs_root(struct dma_device *dma_dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -790,6 +790,20 @@ static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
|
||||
{
|
||||
struct dpaa2_qdma_priv *priv;
|
||||
struct device *dev;
|
||||
|
||||
dev = &ls_dev->dev;
|
||||
priv = dev_get_drvdata(dev);
|
||||
|
||||
dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
|
||||
dpaa2_dpdmai_dpio_unbind(priv);
|
||||
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
|
||||
dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
|
||||
}
|
||||
|
||||
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
|
||||
{
|
||||
.vendor = FSL_MC_VENDOR_FREESCALE,
|
||||
|
@ -805,6 +819,7 @@ static struct fsl_mc_driver dpaa2_qdma_driver = {
|
|||
},
|
||||
.probe = dpaa2_qdma_probe,
|
||||
.remove = dpaa2_qdma_remove,
|
||||
.shutdown = dpaa2_qdma_shutdown,
|
||||
.match_id_table = dpaa2_qdma_id_table
|
||||
};
|
||||
|
||||
|
|
|
@ -159,6 +159,27 @@ int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPDMAI object
|
||||
*
|
||||
* Return: '0' on Success; error code otherwise.
|
||||
*/
|
||||
int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
|
||||
/* prepare command */
|
||||
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
|
||||
cmd_flags, token);
|
||||
|
||||
/* send command to mc*/
|
||||
return mc_send_command(mc_io, &cmd);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dpdmai_destroy);
|
||||
|
||||
/**
|
||||
* dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
|
||||
#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
|
||||
#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
|
||||
#define DPDMAI_CMDID_DESTROY DPDMAI_CMDID_FORMAT(0x900)
|
||||
|
||||
#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
|
||||
#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
|
||||
|
@ -160,6 +161,7 @@ struct dpdmai_rx_queue_attr {
|
|||
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
int dpdmai_id, u16 *token);
|
||||
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
|
||||
int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
|
||||
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
const struct dpdmai_cfg *cfg, u16 *token);
|
||||
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
|
||||
|
|
|
@ -74,12 +74,10 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||
struct idxd_device *idxd;
|
||||
struct idxd_wq *wq;
|
||||
struct device *dev;
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
|
||||
wq = inode_wq(inode);
|
||||
idxd = wq->idxd;
|
||||
dev = &idxd->pdev->dev;
|
||||
idxd_cdev = &wq->idxd_cdev;
|
||||
|
||||
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
|
||||
|
||||
|
@ -139,6 +137,8 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
dev_dbg(&pdev->dev, "%s called\n", __func__);
|
||||
rc = check_vma(wq, vma, __func__);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
|
||||
|
|
|
@ -584,11 +584,11 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
|
|||
struct idxd_group *group = &idxd->groups[i];
|
||||
|
||||
if (group->tc_a == -1)
|
||||
group->grpcfg.flags.tc_a = 0;
|
||||
group->tc_a = group->grpcfg.flags.tc_a = 0;
|
||||
else
|
||||
group->grpcfg.flags.tc_a = group->tc_a;
|
||||
if (group->tc_b == -1)
|
||||
group->grpcfg.flags.tc_b = 1;
|
||||
group->tc_b = group->grpcfg.flags.tc_b = 1;
|
||||
else
|
||||
group->grpcfg.flags.tc_b = group->tc_b;
|
||||
group->grpcfg.flags.use_token_limit = group->use_token_limit;
|
||||
|
|
|
@ -419,7 +419,7 @@ static ssize_t engine_group_id_store(struct device *dev,
|
|||
struct idxd_device *idxd = engine->idxd;
|
||||
long id;
|
||||
int rc;
|
||||
struct idxd_group *prevg, *group;
|
||||
struct idxd_group *prevg;
|
||||
|
||||
rc = kstrtol(buf, 10, &id);
|
||||
if (rc < 0)
|
||||
|
@ -439,7 +439,6 @@ static ssize_t engine_group_id_store(struct device *dev,
|
|||
return count;
|
||||
}
|
||||
|
||||
group = &idxd->groups[id];
|
||||
prevg = engine->group;
|
||||
|
||||
if (prevg)
|
||||
|
@ -513,9 +512,6 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
|
|||
if (idxd->state == IDXD_DEV_ENABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (idxd->token_limit == 0)
|
||||
return -EPERM;
|
||||
|
||||
if (val > idxd->max_tokens)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -561,8 +557,6 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
|
|||
if (idxd->state == IDXD_DEV_ENABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (idxd->token_limit == 0)
|
||||
return -EPERM;
|
||||
if (val < 4 * group->num_engines ||
|
||||
val > group->tokens_reserved + idxd->nr_tokens)
|
||||
return -EINVAL;
|
||||
|
@ -1180,6 +1174,16 @@ static ssize_t op_cap_show(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RO(op_cap);
|
||||
|
||||
static ssize_t gen_cap_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_device *idxd =
|
||||
container_of(dev, struct idxd_device, conf_dev);
|
||||
|
||||
return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
|
||||
}
|
||||
static DEVICE_ATTR_RO(gen_cap);
|
||||
|
||||
static ssize_t configurable_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
|
@ -1317,6 +1321,7 @@ static struct attribute *idxd_device_attributes[] = {
|
|||
&dev_attr_max_batch_size.attr,
|
||||
&dev_attr_max_transfer_size.attr,
|
||||
&dev_attr_op_cap.attr,
|
||||
&dev_attr_gen_cap.attr,
|
||||
&dev_attr_configurable.attr,
|
||||
&dev_attr_clients.attr,
|
||||
&dev_attr_state.attr,
|
||||
|
|
|
@ -102,7 +102,7 @@ struct ioat_dca_priv {
|
|||
int max_requesters;
|
||||
int requester_count;
|
||||
u8 tag_map[IOAT_TAG_MAP_LEN];
|
||||
struct ioat_dca_slot req_slots[0];
|
||||
struct ioat_dca_slot req_slots[];
|
||||
};
|
||||
|
||||
static int ioat_dca_dev_managed(struct dca_provider *dca,
|
||||
|
|
|
@ -4303,7 +4303,7 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
|
|||
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
|
||||
if (ppc440spe_adma_devices[i] == -1)
|
||||
continue;
|
||||
size += snprintf(buf + size, PAGE_SIZE - size,
|
||||
size += scnprintf(buf + size, PAGE_SIZE - size,
|
||||
"PPC440SP(E)-ADMA.%d: %s\n", i,
|
||||
ppc_adma_errors[ppc440spe_adma_devices[i]]);
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ struct sa11x0_dma_desc {
|
|||
bool cyclic;
|
||||
|
||||
unsigned sglen;
|
||||
struct sa11x0_dma_sg sg[0];
|
||||
struct sa11x0_dma_sg sg[];
|
||||
};
|
||||
|
||||
struct sa11x0_dma_phy;
|
||||
|
|
|
@ -1219,7 +1219,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
|
|||
sg_len = buf_len / period_len;
|
||||
if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
|
||||
dev_err(chan->device->dev,
|
||||
"chan%u: sg length %d exceds limit %d",
|
||||
"chan%u: sg length %d exceeds limit %d",
|
||||
rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -709,7 +709,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
|
|||
BUG_ON(!schan->desc_num);
|
||||
|
||||
if (sg_len > SHDMA_MAX_SG_LEN) {
|
||||
dev_err(schan->dev, "sg length %d exceds limit %d",
|
||||
dev_err(schan->dev, "sg length %d exceeds limit %d",
|
||||
sg_len, SHDMA_MAX_SG_LEN);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -212,7 +212,7 @@ struct sprd_dma_dev {
|
|||
struct clk *ashb_clk;
|
||||
int irq;
|
||||
u32 total_chns;
|
||||
struct sprd_dma_chn channels[0];
|
||||
struct sprd_dma_chn channels[];
|
||||
};
|
||||
|
||||
static void sprd_dma_free_desc(struct virt_dma_desc *vd);
|
||||
|
@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
|
||||
{
|
||||
struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
|
||||
u32 reg, val, req_id;
|
||||
|
||||
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
|
||||
return;
|
||||
|
||||
/* The DMA request id always starts from 0. */
|
||||
req_id = schan->dev_id - 1;
|
||||
|
||||
if (req_id < 32) {
|
||||
reg = SPRD_DMA_GLB_REQ_PEND0_EN;
|
||||
val = BIT(req_id);
|
||||
} else {
|
||||
reg = SPRD_DMA_GLB_REQ_PEND1_EN;
|
||||
val = BIT(req_id - 32);
|
||||
}
|
||||
|
||||
sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
|
||||
}
|
||||
|
||||
static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
|
||||
struct sprd_dma_desc *sdesc)
|
||||
{
|
||||
|
@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
|
|||
*/
|
||||
sprd_dma_set_chn_config(schan, schan->cur_desc);
|
||||
sprd_dma_set_uid(schan);
|
||||
sprd_dma_set_pending(schan, true);
|
||||
sprd_dma_enable_chn(schan);
|
||||
|
||||
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
|
||||
|
@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
|
|||
static void sprd_dma_stop(struct sprd_dma_chn *schan)
|
||||
{
|
||||
sprd_dma_stop_and_disable(schan);
|
||||
sprd_dma_set_pending(schan, false);
|
||||
sprd_dma_unset_uid(schan);
|
||||
sprd_dma_clear_int(schan);
|
||||
schan->cur_desc = NULL;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -207,7 +208,6 @@ struct stm32_dma_device {
|
|||
struct dma_device ddev;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct reset_control *rst;
|
||||
bool mem2mem;
|
||||
struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
|
||||
};
|
||||
|
@ -422,29 +422,19 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
|
|||
static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
|
||||
{
|
||||
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
|
||||
u32 dma_scr, id;
|
||||
u32 dma_scr, id, reg;
|
||||
|
||||
id = chan->id;
|
||||
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
reg = STM32_DMA_SCR(id);
|
||||
dma_scr = stm32_dma_read(dmadev, reg);
|
||||
|
||||
if (dma_scr & STM32_DMA_SCR_EN) {
|
||||
dma_scr &= ~STM32_DMA_SCR_EN;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
|
||||
stm32_dma_write(dmadev, reg, dma_scr);
|
||||
|
||||
do {
|
||||
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
dma_scr &= STM32_DMA_SCR_EN;
|
||||
if (!dma_scr)
|
||||
break;
|
||||
|
||||
if (time_after_eq(jiffies, timeout)) {
|
||||
dev_err(chan2dev(chan), "%s: timeout!\n",
|
||||
__func__);
|
||||
return -EBUSY;
|
||||
}
|
||||
cond_resched();
|
||||
} while (1);
|
||||
return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
|
||||
dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
|
||||
10, 1000000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -488,8 +478,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
|
|||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
|
||||
if (chan->busy) {
|
||||
stm32_dma_stop(chan);
|
||||
if (chan->desc) {
|
||||
vchan_terminate_vdesc(&chan->desc->vdesc);
|
||||
if (chan->busy)
|
||||
stm32_dma_stop(chan);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
|
||||
|
@ -545,6 +537,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
|
|||
if (!vdesc)
|
||||
return;
|
||||
|
||||
list_del(&vdesc->node);
|
||||
|
||||
chan->desc = to_stm32_dma_desc(vdesc);
|
||||
chan->next_sg = 0;
|
||||
}
|
||||
|
@ -555,6 +549,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
|
|||
sg_req = &chan->desc->sg_req[chan->next_sg];
|
||||
reg = &sg_req->chan_reg;
|
||||
|
||||
reg->dma_scr &= ~STM32_DMA_SCR_EN;
|
||||
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
|
||||
stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
|
||||
stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
|
||||
|
@ -622,7 +617,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
|
|||
} else {
|
||||
chan->busy = false;
|
||||
if (chan->next_sg == chan->desc->num_sgs) {
|
||||
list_del(&chan->desc->vdesc.node);
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
|
@ -1275,6 +1269,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
|||
struct dma_device *dd;
|
||||
const struct of_device_id *match;
|
||||
struct resource *res;
|
||||
struct reset_control *rst;
|
||||
int i, ret;
|
||||
|
||||
match = of_match_device(stm32_dma_of_match, &pdev->dev);
|
||||
|
@ -1296,8 +1291,10 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
|||
|
||||
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(dmadev->clk)) {
|
||||
dev_err(&pdev->dev, "Error: Missing controller clock\n");
|
||||
return PTR_ERR(dmadev->clk);
|
||||
ret = PTR_ERR(dmadev->clk);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "Can't get clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(dmadev->clk);
|
||||
|
@ -1309,13 +1306,19 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
|||
dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
|
||||
"st,mem2mem");
|
||||
|
||||
dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (!IS_ERR(dmadev->rst)) {
|
||||
reset_control_assert(dmadev->rst);
|
||||
rst = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(rst)) {
|
||||
ret = PTR_ERR(rst);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto clk_free;
|
||||
} else {
|
||||
reset_control_assert(rst);
|
||||
udelay(2);
|
||||
reset_control_deassert(dmadev->rst);
|
||||
reset_control_deassert(rst);
|
||||
}
|
||||
|
||||
dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
|
||||
|
||||
dma_cap_set(DMA_SLAVE, dd->cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, dd->cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
|
||||
|
@ -1336,7 +1339,9 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
|||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
|
||||
dd->max_burst = STM32_DMA_MAX_BURST;
|
||||
dd->descriptor_reuse = true;
|
||||
dd->dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&dd->channels);
|
||||
|
||||
|
@ -1427,7 +1432,39 @@ static int stm32_dma_runtime_resume(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int stm32_dma_suspend(struct device *dev)
|
||||
{
|
||||
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
|
||||
int id, ret, scr;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
|
||||
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
|
||||
if (scr & STM32_DMA_SCR_EN) {
|
||||
dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
pm_runtime_force_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_dma_resume(struct device *dev)
|
||||
{
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops stm32_dma_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
|
||||
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
|
||||
stm32_dma_runtime_resume, NULL)
|
||||
};
|
||||
|
@ -1438,10 +1475,11 @@ static struct platform_driver stm32_dma_driver = {
|
|||
.of_match_table = stm32_dma_of_match,
|
||||
.pm = &stm32_dma_pm_ops,
|
||||
},
|
||||
.probe = stm32_dma_probe,
|
||||
};
|
||||
|
||||
static int __init stm32_dma_init(void)
|
||||
{
|
||||
return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
|
||||
return platform_driver_register(&stm32_dma_driver);
|
||||
}
|
||||
subsys_initcall(stm32_dma_init);
|
||||
|
|
|
@ -35,12 +35,14 @@ struct stm32_dmamux {
|
|||
struct stm32_dmamux_data {
|
||||
struct dma_router dmarouter;
|
||||
struct clk *clk;
|
||||
struct reset_control *rst;
|
||||
void __iomem *iomem;
|
||||
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
|
||||
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
|
||||
spinlock_t lock; /* Protects register access */
|
||||
unsigned long *dma_inuse; /* Used DMA channel */
|
||||
u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
|
||||
* in suspend
|
||||
*/
|
||||
u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
|
||||
* [0] holds number of DMA Masters.
|
||||
* To be kept at very end end of this structure
|
||||
|
@ -179,6 +181,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||
struct stm32_dmamux_data *stm32_dmamux;
|
||||
struct resource *res;
|
||||
void __iomem *iomem;
|
||||
struct reset_control *rst;
|
||||
int i, count, ret;
|
||||
u32 dma_req;
|
||||
|
||||
|
@ -251,16 +254,26 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||
stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(stm32_dmamux->clk)) {
|
||||
ret = PTR_ERR(stm32_dmamux->clk);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
dev_info(&pdev->dev, "Missing controller clock\n");
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "Missing clock controller\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (!IS_ERR(stm32_dmamux->rst)) {
|
||||
reset_control_assert(stm32_dmamux->rst);
|
||||
ret = clk_prepare_enable(stm32_dmamux->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rst = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(rst)) {
|
||||
ret = PTR_ERR(rst);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_clk;
|
||||
} else {
|
||||
reset_control_assert(rst);
|
||||
udelay(2);
|
||||
reset_control_deassert(stm32_dmamux->rst);
|
||||
reset_control_deassert(rst);
|
||||
}
|
||||
|
||||
stm32_dmamux->iomem = iomem;
|
||||
|
@ -271,14 +284,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
if (!IS_ERR(stm32_dmamux->clk)) {
|
||||
ret = clk_prepare_enable(stm32_dmamux->clk);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
||||
/* Reset the dmamux */
|
||||
|
@ -287,8 +292,17 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
return of_dma_router_register(node, stm32_dmamux_route_allocate,
|
||||
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
|
||||
&stm32_dmamux->dmarouter);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
|
||||
return 0;
|
||||
|
||||
err_clk:
|
||||
clk_disable_unprepare(stm32_dmamux->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -318,7 +332,54 @@ static int stm32_dmamux_runtime_resume(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int stm32_dmamux_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
|
||||
int i, ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < stm32_dmamux->dma_requests; i++)
|
||||
stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
|
||||
STM32_DMAMUX_CCR(i));
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
pm_runtime_force_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_dmamux_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
|
||||
int i, ret;
|
||||
|
||||
ret = pm_runtime_force_resume(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < stm32_dmamux->dma_requests; i++)
|
||||
stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
|
||||
stm32_dmamux->ccr[i]);
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops stm32_dmamux_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
|
||||
SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
|
||||
stm32_dmamux_runtime_resume, NULL)
|
||||
};
|
||||
|
|
|
@ -273,7 +273,6 @@ struct stm32_mdma_device {
|
|||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
int irq;
|
||||
struct reset_control *rst;
|
||||
u32 nr_channels;
|
||||
u32 nr_requests;
|
||||
u32 nr_ahb_addr_masks;
|
||||
|
@ -1127,6 +1126,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
|
|||
return;
|
||||
}
|
||||
|
||||
list_del(&vdesc->node);
|
||||
|
||||
chan->desc = to_stm32_mdma_desc(vdesc);
|
||||
hwdesc = chan->desc->node[0].hwdesc;
|
||||
chan->curr_hwdesc = 0;
|
||||
|
@ -1242,8 +1243,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
|
|||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
if (chan->busy) {
|
||||
stm32_mdma_stop(chan);
|
||||
if (chan->desc) {
|
||||
vchan_terminate_vdesc(&chan->desc->vdesc);
|
||||
if (chan->busy)
|
||||
stm32_mdma_stop(chan);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
vchan_get_all_descriptors(&chan->vchan, &head);
|
||||
|
@ -1331,7 +1334,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
|
|||
|
||||
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
|
||||
{
|
||||
list_del(&chan->desc->vdesc.node);
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
chan->desc = NULL;
|
||||
chan->busy = false;
|
||||
|
@ -1532,6 +1534,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
struct dma_device *dd;
|
||||
struct device_node *of_node;
|
||||
struct resource *res;
|
||||
struct reset_control *rst;
|
||||
u32 nr_channels, nr_requests;
|
||||
int i, count, ret;
|
||||
|
||||
|
@ -1579,8 +1582,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(dmadev->clk)) {
|
||||
ret = PTR_ERR(dmadev->clk);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
dev_info(&pdev->dev, "Missing controller clock\n");
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "Missing clock controller\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1590,11 +1593,15 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (!IS_ERR(dmadev->rst)) {
|
||||
reset_control_assert(dmadev->rst);
|
||||
rst = devm_reset_control_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(rst)) {
|
||||
ret = PTR_ERR(rst);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_clk;
|
||||
} else {
|
||||
reset_control_assert(rst);
|
||||
udelay(2);
|
||||
reset_control_deassert(dmadev->rst);
|
||||
reset_control_deassert(rst);
|
||||
}
|
||||
|
||||
dd = &dmadev->ddev;
|
||||
|
@ -1614,6 +1621,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
dd->device_resume = stm32_mdma_resume;
|
||||
dd->device_terminate_all = stm32_mdma_terminate_all;
|
||||
dd->device_synchronize = stm32_mdma_synchronize;
|
||||
dd->descriptor_reuse = true;
|
||||
|
||||
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
|
||||
|
@ -1637,25 +1646,27 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
dmadev->irq = platform_get_irq(pdev, 0);
|
||||
if (dmadev->irq < 0)
|
||||
return dmadev->irq;
|
||||
if (dmadev->irq < 0) {
|
||||
ret = dmadev->irq;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
|
||||
0, dev_name(&pdev->dev), dmadev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to request IRQ\n");
|
||||
return ret;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
ret = dmaenginem_async_device_register(dd);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_clk;
|
||||
|
||||
ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"STM32 MDMA DMA OF registration failed %d\n", ret);
|
||||
goto err_unregister;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, dmadev);
|
||||
|
@ -1668,7 +1679,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
|
||||
err_unregister:
|
||||
err_clk:
|
||||
clk_disable_unprepare(dmadev->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1697,7 +1710,40 @@ static int stm32_mdma_runtime_resume(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int stm32_mdma_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
|
||||
u32 ccr, id;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (id = 0; id < dmadev->nr_channels; id++) {
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
|
||||
if (ccr & STM32_MDMA_CCR_EN) {
|
||||
dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
pm_runtime_force_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_mdma_pm_resume(struct device *dev)
|
||||
{
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops stm32_mdma_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
|
||||
SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
|
||||
stm32_mdma_runtime_resume, NULL)
|
||||
};
|
||||
|
|
|
@ -697,11 +697,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
|
|||
dest = sconfig->dst_addr;
|
||||
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
|
||||
SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
|
||||
SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type);
|
||||
SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
|
||||
SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
|
||||
} else {
|
||||
src = sconfig->src_addr;
|
||||
dest = buf;
|
||||
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
|
||||
SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
|
||||
SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
|
||||
SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -164,7 +164,7 @@ struct tegra_adma {
|
|||
const struct tegra_adma_chip_data *cdata;
|
||||
|
||||
/* Last member of the structure */
|
||||
struct tegra_adma_chan channels[0];
|
||||
struct tegra_adma_chan channels[];
|
||||
};
|
||||
|
||||
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
|
||||
|
|
|
@ -133,7 +133,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
|
|||
const struct of_device_id *match;
|
||||
struct device_node *dma_node;
|
||||
struct ti_am335x_xbar_data *xbar;
|
||||
struct resource *res;
|
||||
void __iomem *iomem;
|
||||
int i, ret;
|
||||
|
||||
|
@ -173,8 +172,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
|
|||
xbar->xbar_events = TI_AM335X_XBAR_LINES;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
iomem = devm_ioremap_resource(&pdev->dev, res);
|
||||
iomem = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(iomem))
|
||||
return PTR_ERR(iomem);
|
||||
|
||||
|
@ -323,7 +321,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
|
|||
struct device_node *dma_node;
|
||||
struct ti_dra7_xbar_data *xbar;
|
||||
struct property *prop;
|
||||
struct resource *res;
|
||||
u32 safe_val;
|
||||
int sz;
|
||||
void __iomem *iomem;
|
||||
|
@ -403,8 +400,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
|
|||
kfree(rsv_events);
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
iomem = devm_ioremap_resource(&pdev->dev, res);
|
||||
iomem = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(iomem))
|
||||
return PTR_ERR(iomem);
|
||||
|
||||
|
|
|
@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
|||
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
edma_prep_dma_interleaved(struct dma_chan *chan,
|
||||
struct dma_interleaved_template *xt,
|
||||
unsigned long tx_flags)
|
||||
{
|
||||
struct device *dev = chan->device->dev;
|
||||
struct edma_chan *echan = to_edma_chan(chan);
|
||||
struct edmacc_param *param;
|
||||
struct edma_desc *edesc;
|
||||
size_t src_icg, dst_icg;
|
||||
int src_bidx, dst_bidx;
|
||||
|
||||
/* Slave mode is not supported */
|
||||
if (is_slave_direction(xt->dir))
|
||||
return NULL;
|
||||
|
||||
if (xt->frame_size != 1 || xt->numf == 0)
|
||||
return NULL;
|
||||
|
||||
if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
|
||||
return NULL;
|
||||
|
||||
src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
|
||||
if (src_icg) {
|
||||
src_bidx = src_icg + xt->sgl[0].size;
|
||||
} else if (xt->src_inc) {
|
||||
src_bidx = xt->sgl[0].size;
|
||||
} else {
|
||||
dev_err(dev, "%s: SRC constant addressing is not supported\n",
|
||||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
|
||||
if (dst_icg) {
|
||||
dst_bidx = dst_icg + xt->sgl[0].size;
|
||||
} else if (xt->dst_inc) {
|
||||
dst_bidx = xt->sgl[0].size;
|
||||
} else {
|
||||
dev_err(dev, "%s: DST constant addressing is not supported\n",
|
||||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
|
||||
return NULL;
|
||||
|
||||
edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
|
||||
if (!edesc)
|
||||
return NULL;
|
||||
|
||||
edesc->direction = DMA_MEM_TO_MEM;
|
||||
edesc->echan = echan;
|
||||
edesc->pset_nr = 1;
|
||||
|
||||
param = &edesc->pset[0].param;
|
||||
|
||||
param->src = xt->src_start;
|
||||
param->dst = xt->dst_start;
|
||||
param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
|
||||
param->ccnt = 1;
|
||||
param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
||||
param->src_dst_cidx = 0;
|
||||
|
||||
param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
||||
param->opt |= ITCCHEN;
|
||||
/* Enable transfer complete interrupt if requested */
|
||||
if (tx_flags & DMA_PREP_INTERRUPT)
|
||||
param->opt |= TCINTEN;
|
||||
else
|
||||
edesc->polled = true;
|
||||
|
||||
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction direction,
|
||||
|
@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
|
|||
"Legacy memcpy is enabled, things might not work\n");
|
||||
|
||||
dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
|
||||
dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
|
||||
s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
|
||||
s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
|
||||
s_ddev->directions = BIT(DMA_MEM_TO_MEM);
|
||||
}
|
||||
|
||||
|
@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
|
|||
|
||||
dma_cap_zero(m_ddev->cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
|
||||
dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
|
||||
|
||||
m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
|
||||
m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
|
||||
m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
|
||||
m_ddev->device_free_chan_resources = edma_free_chan_resources;
|
||||
m_ddev->device_issue_pending = edma_issue_pending;
|
||||
|
|
|
@ -32,6 +32,7 @@ struct k3_udma_glue_common {
|
|||
bool epib;
|
||||
u32 psdata_size;
|
||||
u32 swdata_size;
|
||||
u32 atype;
|
||||
};
|
||||
|
||||
struct k3_udma_glue_tx_channel {
|
||||
|
@ -121,6 +122,15 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
|
|||
return -ENOENT;
|
||||
|
||||
thread_id = dma_spec.args[0];
|
||||
if (dma_spec.args_count == 2) {
|
||||
if (dma_spec.args[1] > 2) {
|
||||
dev_err(common->dev, "Invalid channel atype: %u\n",
|
||||
dma_spec.args[1]);
|
||||
ret = -EINVAL;
|
||||
goto out_put_spec;
|
||||
}
|
||||
common->atype = dma_spec.args[1];
|
||||
}
|
||||
|
||||
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -202,7 +212,8 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
|
|||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
|
||||
req.nav_id = tisci_rm->tisci_dev_id;
|
||||
req.index = tx_chn->udma_tchan_id;
|
||||
if (tx_chn->tx_pause_on_err)
|
||||
|
@ -216,6 +227,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
|
|||
req.tx_supr_tdpkt = 1;
|
||||
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
|
||||
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
|
||||
req.tx_atype = tx_chn->common.atype;
|
||||
|
||||
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
|
||||
}
|
||||
|
@ -502,7 +514,8 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
|
|||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
|
||||
|
||||
req.nav_id = tisci_rm->tisci_dev_id;
|
||||
req.index = rx_chn->udma_rchan_id;
|
||||
|
@ -519,6 +532,7 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
|
|||
req.flowid_cnt = rx_chn->flow_num;
|
||||
}
|
||||
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
|
||||
req.rx_atype = rx_chn->common.atype;
|
||||
|
||||
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
|
||||
if (ret)
|
||||
|
|
|
@ -149,6 +149,7 @@ struct udma_dev {
|
|||
|
||||
struct udma_chan *channels;
|
||||
u32 psil_base;
|
||||
u32 atype;
|
||||
};
|
||||
|
||||
struct udma_desc {
|
||||
|
@ -192,6 +193,7 @@ struct udma_chan_config {
|
|||
u32 hdesc_size; /* Size of a packet descriptor in packet mode */
|
||||
bool notdpkt; /* Suppress sending TDC packet */
|
||||
int remote_thread_id;
|
||||
u32 atype;
|
||||
u32 src_thread;
|
||||
u32 dst_thread;
|
||||
enum psil_endpoint_type ep_type;
|
||||
|
@ -1569,7 +1571,8 @@ err_rflow:
|
|||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID)
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
|
||||
|
||||
#define TISCI_RCHAN_VALID_PARAMS ( \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
|
||||
|
@ -1579,7 +1582,8 @@ err_rflow:
|
|||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID)
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
|
||||
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
|
||||
|
||||
static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
|
||||
{
|
||||
|
@ -1601,6 +1605,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
|
|||
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
|
||||
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
|
||||
req_tx.txcq_qnum = tc_ring;
|
||||
req_tx.tx_atype = ud->atype;
|
||||
|
||||
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
|
||||
if (ret) {
|
||||
|
@ -1614,6 +1619,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
|
|||
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
|
||||
req_rx.rxcq_qnum = tc_ring;
|
||||
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
|
||||
req_rx.rx_atype = ud->atype;
|
||||
|
||||
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
|
||||
if (ret)
|
||||
|
@ -1649,6 +1655,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
|
|||
req_tx.tx_supr_tdpkt = uc->config.notdpkt;
|
||||
req_tx.tx_fetch_size = fetch_size >> 2;
|
||||
req_tx.txcq_qnum = tc_ring;
|
||||
req_tx.tx_atype = uc->config.atype;
|
||||
|
||||
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
|
||||
if (ret)
|
||||
|
@ -1685,6 +1692,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
|
|||
req_rx.rx_fetch_size = fetch_size >> 2;
|
||||
req_rx.rxcq_qnum = rx_ring;
|
||||
req_rx.rx_chan_type = mode;
|
||||
req_rx.rx_atype = uc->config.atype;
|
||||
|
||||
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
|
||||
if (ret) {
|
||||
|
@ -3063,13 +3071,18 @@ static void udma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
static struct platform_driver udma_driver;
|
||||
|
||||
struct udma_filter_param {
|
||||
int remote_thread_id;
|
||||
u32 atype;
|
||||
};
|
||||
|
||||
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
|
||||
{
|
||||
struct udma_chan_config *ucc;
|
||||
struct psil_endpoint_config *ep_config;
|
||||
struct udma_filter_param *filter_param;
|
||||
struct udma_chan *uc;
|
||||
struct udma_dev *ud;
|
||||
u32 *args;
|
||||
|
||||
if (chan->device->dev->driver != &udma_driver.driver)
|
||||
return false;
|
||||
|
@ -3077,9 +3090,16 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
|
|||
uc = to_udma_chan(chan);
|
||||
ucc = &uc->config;
|
||||
ud = uc->ud;
|
||||
args = param;
|
||||
filter_param = param;
|
||||
|
||||
ucc->remote_thread_id = args[0];
|
||||
if (filter_param->atype > 2) {
|
||||
dev_err(ud->dev, "Invalid channel atype: %u\n",
|
||||
filter_param->atype);
|
||||
return false;
|
||||
}
|
||||
|
||||
ucc->remote_thread_id = filter_param->remote_thread_id;
|
||||
ucc->atype = filter_param->atype;
|
||||
|
||||
if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
|
||||
ucc->dir = DMA_MEM_TO_DEV;
|
||||
|
@ -3092,6 +3112,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
|
|||
ucc->remote_thread_id);
|
||||
ucc->dir = DMA_MEM_TO_MEM;
|
||||
ucc->remote_thread_id = -1;
|
||||
ucc->atype = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -3130,13 +3151,20 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
|
|||
{
|
||||
struct udma_dev *ud = ofdma->of_dma_data;
|
||||
dma_cap_mask_t mask = ud->ddev.cap_mask;
|
||||
struct udma_filter_param filter_param;
|
||||
struct dma_chan *chan;
|
||||
|
||||
if (dma_spec->args_count != 1)
|
||||
if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
|
||||
return NULL;
|
||||
|
||||
chan = __dma_request_channel(&mask, udma_dma_filter_fn,
|
||||
&dma_spec->args[0], ofdma->of_node);
|
||||
filter_param.remote_thread_id = dma_spec->args[0];
|
||||
if (dma_spec->args_count == 2)
|
||||
filter_param.atype = dma_spec->args[1];
|
||||
else
|
||||
filter_param.atype = 0;
|
||||
|
||||
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
|
||||
ofdma->of_node);
|
||||
if (!chan) {
|
||||
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -3473,6 +3501,66 @@ static int udma_setup_rx_flush(struct udma_dev *ud)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void udma_dbg_summary_show_chan(struct seq_file *s,
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
struct udma_chan *uc = to_udma_chan(chan);
|
||||
struct udma_chan_config *ucc = &uc->config;
|
||||
|
||||
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
|
||||
chan->dbg_client_name ?: "in-use");
|
||||
seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
|
||||
|
||||
switch (uc->config.dir) {
|
||||
case DMA_MEM_TO_MEM:
|
||||
seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
|
||||
ucc->src_thread, ucc->dst_thread);
|
||||
break;
|
||||
case DMA_DEV_TO_MEM:
|
||||
seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
|
||||
ucc->src_thread, ucc->dst_thread);
|
||||
break;
|
||||
case DMA_MEM_TO_DEV:
|
||||
seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
|
||||
ucc->src_thread, ucc->dst_thread);
|
||||
break;
|
||||
default:
|
||||
seq_printf(s, ")\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (ucc->ep_type == PSIL_EP_NATIVE) {
|
||||
seq_printf(s, "PSI-L Native");
|
||||
if (ucc->metadata_size) {
|
||||
seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
|
||||
if (ucc->psd_size)
|
||||
seq_printf(s, " PSDsize:%u", ucc->psd_size);
|
||||
seq_printf(s, " ]");
|
||||
}
|
||||
} else {
|
||||
seq_printf(s, "PDMA");
|
||||
if (ucc->enable_acc32 || ucc->enable_burst)
|
||||
seq_printf(s, "[%s%s ]",
|
||||
ucc->enable_acc32 ? " ACC32" : "",
|
||||
ucc->enable_burst ? " BURST" : "");
|
||||
}
|
||||
|
||||
seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
|
||||
}
|
||||
|
||||
static void udma_dbg_summary_show(struct seq_file *s,
|
||||
struct dma_device *dma_dev)
|
||||
{
|
||||
struct dma_chan *chan;
|
||||
|
||||
list_for_each_entry(chan, &dma_dev->channels, device_node) {
|
||||
if (chan->client_count)
|
||||
udma_dbg_summary_show_chan(s, chan);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
|
||||
|
@ -3519,6 +3607,12 @@ static int udma_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
|
||||
if (!ret && ud->atype > 2) {
|
||||
dev_err(dev, "Invalid atype: %u\n", ud->atype);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
|
||||
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
|
||||
|
||||
|
@ -3553,6 +3647,9 @@ static int udma_probe(struct platform_device *pdev)
|
|||
ud->ddev.device_resume = udma_resume;
|
||||
ud->ddev.device_terminate_all = udma_terminate_all;
|
||||
ud->ddev.device_synchronize = udma_synchronize;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
ud->ddev.dbg_summary_show = udma_dbg_summary_show;
|
||||
#endif
|
||||
|
||||
ud->ddev.device_free_chan_resources = udma_free_chan_resources;
|
||||
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
|
||||
|
|
|
@ -124,7 +124,7 @@ struct omap_desc {
|
|||
uint32_t csdp; /* CSDP value */
|
||||
|
||||
unsigned sglen;
|
||||
struct omap_sg sg[0];
|
||||
struct omap_sg sg[];
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -68,7 +68,7 @@ struct uniphier_mdmac_device {
|
|||
struct dma_device ddev;
|
||||
struct clk *clk;
|
||||
void __iomem *reg_base;
|
||||
struct uniphier_mdmac_chan channels[0];
|
||||
struct uniphier_mdmac_chan channels[];
|
||||
};
|
||||
|
||||
static struct uniphier_mdmac_chan *
|
||||
|
|
|
@ -0,0 +1,609 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* External DMA controller driver for UniPhier SoCs
|
||||
* Copyright 2019 Socionext Inc.
|
||||
* Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
#include "virt-dma.h"
|
||||
|
||||
#define XDMAC_CH_WIDTH 0x100
|
||||
|
||||
#define XDMAC_TFA 0x08
|
||||
#define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
|
||||
#define XDMAC_TFA_MASK GENMASK(5, 0)
|
||||
#define XDMAC_SADM 0x10
|
||||
#define XDMAC_SADM_STW_MASK GENMASK(25, 24)
|
||||
#define XDMAC_SADM_SAM BIT(4)
|
||||
#define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
|
||||
#define XDMAC_SADM_SAM_INC 0
|
||||
#define XDMAC_DADM 0x14
|
||||
#define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
|
||||
#define XDMAC_DADM_DAM XDMAC_SADM_SAM
|
||||
#define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
|
||||
#define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
|
||||
#define XDMAC_EXSAD 0x18
|
||||
#define XDMAC_EXDAD 0x1c
|
||||
#define XDMAC_SAD 0x20
|
||||
#define XDMAC_DAD 0x24
|
||||
#define XDMAC_ITS 0x28
|
||||
#define XDMAC_ITS_MASK GENMASK(25, 0)
|
||||
#define XDMAC_TNUM 0x2c
|
||||
#define XDMAC_TNUM_MASK GENMASK(15, 0)
|
||||
#define XDMAC_TSS 0x30
|
||||
#define XDMAC_TSS_REQ BIT(0)
|
||||
#define XDMAC_IEN 0x34
|
||||
#define XDMAC_IEN_ERRIEN BIT(1)
|
||||
#define XDMAC_IEN_ENDIEN BIT(0)
|
||||
#define XDMAC_STAT 0x40
|
||||
#define XDMAC_STAT_TENF BIT(0)
|
||||
#define XDMAC_IR 0x44
|
||||
#define XDMAC_IR_ERRF BIT(1)
|
||||
#define XDMAC_IR_ENDF BIT(0)
|
||||
#define XDMAC_ID 0x48
|
||||
#define XDMAC_ID_ERRIDF BIT(1)
|
||||
#define XDMAC_ID_ENDIDF BIT(0)
|
||||
|
||||
#define XDMAC_MAX_CHANS 16
|
||||
#define XDMAC_INTERVAL_CLKS 20
|
||||
#define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
|
||||
|
||||
/* cut lower bit for maintain alignment of maximum transfer size */
|
||||
#define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
|
||||
|
||||
#define UNIPHIER_XDMAC_BUSWIDTHS \
|
||||
(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
|
||||
|
||||
struct uniphier_xdmac_desc_node {
|
||||
dma_addr_t src;
|
||||
dma_addr_t dst;
|
||||
u32 burst_size;
|
||||
u32 nr_burst;
|
||||
};
|
||||
|
||||
struct uniphier_xdmac_desc {
|
||||
struct virt_dma_desc vd;
|
||||
|
||||
unsigned int nr_node;
|
||||
unsigned int cur_node;
|
||||
enum dma_transfer_direction dir;
|
||||
struct uniphier_xdmac_desc_node nodes[];
|
||||
};
|
||||
|
||||
struct uniphier_xdmac_chan {
|
||||
struct virt_dma_chan vc;
|
||||
struct uniphier_xdmac_device *xdev;
|
||||
struct uniphier_xdmac_desc *xd;
|
||||
void __iomem *reg_ch_base;
|
||||
struct dma_slave_config sconfig;
|
||||
int id;
|
||||
unsigned int req_factor;
|
||||
};
|
||||
|
||||
struct uniphier_xdmac_device {
|
||||
struct dma_device ddev;
|
||||
void __iomem *reg_base;
|
||||
int nr_chans;
|
||||
struct uniphier_xdmac_chan channels[];
|
||||
};
|
||||
|
||||
static struct uniphier_xdmac_chan *
|
||||
to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
|
||||
{
|
||||
return container_of(vc, struct uniphier_xdmac_chan, vc);
|
||||
}
|
||||
|
||||
static struct uniphier_xdmac_desc *
|
||||
to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
|
||||
{
|
||||
return container_of(vd, struct uniphier_xdmac_desc, vd);
|
||||
}
|
||||
|
||||
/* xc->vc.lock must be held by caller */
|
||||
static struct uniphier_xdmac_desc *
|
||||
uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
|
||||
{
|
||||
struct virt_dma_desc *vd;
|
||||
|
||||
vd = vchan_next_desc(&xc->vc);
|
||||
if (!vd)
|
||||
return NULL;
|
||||
|
||||
list_del(&vd->node);
|
||||
|
||||
return to_uniphier_xdmac_desc(vd);
|
||||
}
|
||||
|
||||
/* xc->vc.lock must be held by caller */
|
||||
static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
|
||||
struct uniphier_xdmac_desc *xd)
|
||||
{
|
||||
u32 src_mode, src_addr, src_width;
|
||||
u32 dst_mode, dst_addr, dst_width;
|
||||
u32 val, its, tnum;
|
||||
enum dma_slave_buswidth buswidth;
|
||||
|
||||
src_addr = xd->nodes[xd->cur_node].src;
|
||||
dst_addr = xd->nodes[xd->cur_node].dst;
|
||||
its = xd->nodes[xd->cur_node].burst_size;
|
||||
tnum = xd->nodes[xd->cur_node].nr_burst;
|
||||
|
||||
/*
|
||||
* The width of MEM side must be 4 or 8 bytes, that does not
|
||||
* affect that of DEV side and transfer size.
|
||||
*/
|
||||
if (xd->dir == DMA_DEV_TO_MEM) {
|
||||
src_mode = XDMAC_SADM_SAM_FIXED;
|
||||
buswidth = xc->sconfig.src_addr_width;
|
||||
} else {
|
||||
src_mode = XDMAC_SADM_SAM_INC;
|
||||
buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
|
||||
}
|
||||
src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
|
||||
|
||||
if (xd->dir == DMA_MEM_TO_DEV) {
|
||||
dst_mode = XDMAC_DADM_DAM_FIXED;
|
||||
buswidth = xc->sconfig.dst_addr_width;
|
||||
} else {
|
||||
dst_mode = XDMAC_DADM_DAM_INC;
|
||||
buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
|
||||
}
|
||||
dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
|
||||
|
||||
/* setup transfer factor */
|
||||
val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
|
||||
val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
|
||||
writel(val, xc->reg_ch_base + XDMAC_TFA);
|
||||
|
||||
/* setup the channel */
|
||||
writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
|
||||
writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
|
||||
|
||||
writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
|
||||
writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
|
||||
|
||||
src_mode |= src_width;
|
||||
dst_mode |= dst_width;
|
||||
writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
|
||||
writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
|
||||
|
||||
writel(its, xc->reg_ch_base + XDMAC_ITS);
|
||||
writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
|
||||
|
||||
/* enable interrupt */
|
||||
writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
|
||||
xc->reg_ch_base + XDMAC_IEN);
|
||||
|
||||
/* start XDMAC */
|
||||
val = readl(xc->reg_ch_base + XDMAC_TSS);
|
||||
val |= XDMAC_TSS_REQ;
|
||||
writel(val, xc->reg_ch_base + XDMAC_TSS);
|
||||
}
|
||||
|
||||
/* xc->vc.lock must be held by caller */
|
||||
static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* disable interrupt */
|
||||
val = readl(xc->reg_ch_base + XDMAC_IEN);
|
||||
val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
|
||||
writel(val, xc->reg_ch_base + XDMAC_IEN);
|
||||
|
||||
/* stop XDMAC */
|
||||
val = readl(xc->reg_ch_base + XDMAC_TSS);
|
||||
val &= ~XDMAC_TSS_REQ;
|
||||
writel(0, xc->reg_ch_base + XDMAC_TSS);
|
||||
|
||||
/* wait until transfer is stopped */
|
||||
return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val,
|
||||
!(val & XDMAC_STAT_TENF), 100, 1000);
|
||||
}
|
||||
|
||||
/* xc->vc.lock must be held by caller */
|
||||
static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
|
||||
{
|
||||
struct uniphier_xdmac_desc *xd;
|
||||
|
||||
xd = uniphier_xdmac_next_desc(xc);
|
||||
if (xd)
|
||||
uniphier_xdmac_chan_start(xc, xd);
|
||||
|
||||
/* set desc to chan regardless of xd is null */
|
||||
xc->xd = xd;
|
||||
}
|
||||
|
||||
static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
|
||||
{
|
||||
u32 stat;
|
||||
int ret;
|
||||
|
||||
spin_lock(&xc->vc.lock);
|
||||
|
||||
stat = readl(xc->reg_ch_base + XDMAC_ID);
|
||||
|
||||
if (stat & XDMAC_ID_ERRIDF) {
|
||||
ret = uniphier_xdmac_chan_stop(xc);
|
||||
if (ret)
|
||||
dev_err(xc->xdev->ddev.dev,
|
||||
"DMA transfer error with aborting issue\n");
|
||||
else
|
||||
dev_err(xc->xdev->ddev.dev,
|
||||
"DMA transfer error\n");
|
||||
|
||||
} else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
|
||||
xc->xd->cur_node++;
|
||||
if (xc->xd->cur_node >= xc->xd->nr_node) {
|
||||
vchan_cookie_complete(&xc->xd->vd);
|
||||
uniphier_xdmac_start(xc);
|
||||
} else {
|
||||
uniphier_xdmac_chan_start(xc, xc->xd);
|
||||
}
|
||||
}
|
||||
|
||||
/* write bits to clear */
|
||||
writel(stat, xc->reg_ch_base + XDMAC_IR);
|
||||
|
||||
spin_unlock(&xc->vc.lock);
|
||||
}
|
||||
|
||||
static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct uniphier_xdmac_device *xdev = dev_id;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < xdev->nr_chans; i++)
|
||||
uniphier_xdmac_chan_irq(&xdev->channels[i]);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
vchan_free_chan_resources(to_virt_chan(chan));
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
|
||||
dma_addr_t src, size_t len, unsigned long flags)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(chan);
|
||||
struct uniphier_xdmac_desc *xd;
|
||||
unsigned int nr;
|
||||
size_t burst_size, tlen;
|
||||
int i;
|
||||
|
||||
if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
|
||||
return NULL;
|
||||
|
||||
nr = 1 + len / XDMAC_MAX_WORD_SIZE;
|
||||
|
||||
xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
|
||||
if (!xd)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
|
||||
xd->nodes[i].src = src;
|
||||
xd->nodes[i].dst = dst;
|
||||
xd->nodes[i].burst_size = burst_size;
|
||||
xd->nodes[i].nr_burst = len / burst_size;
|
||||
tlen = rounddown(len, burst_size);
|
||||
src += tlen;
|
||||
dst += tlen;
|
||||
len -= tlen;
|
||||
}
|
||||
|
||||
xd->dir = DMA_MEM_TO_MEM;
|
||||
xd->nr_node = nr;
|
||||
xd->cur_node = 0;
|
||||
|
||||
return vchan_tx_prep(vc, &xd->vd, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags, void *context)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(chan);
|
||||
struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
|
||||
struct uniphier_xdmac_desc *xd;
|
||||
struct scatterlist *sg;
|
||||
enum dma_slave_buswidth buswidth;
|
||||
u32 maxburst;
|
||||
int i;
|
||||
|
||||
if (!is_slave_direction(direction))
|
||||
return NULL;
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM) {
|
||||
buswidth = xc->sconfig.src_addr_width;
|
||||
maxburst = xc->sconfig.src_maxburst;
|
||||
} else {
|
||||
buswidth = xc->sconfig.dst_addr_width;
|
||||
maxburst = xc->sconfig.dst_maxburst;
|
||||
}
|
||||
|
||||
if (!maxburst)
|
||||
maxburst = 1;
|
||||
if (maxburst > xc->xdev->ddev.max_burst) {
|
||||
dev_err(xc->xdev->ddev.dev,
|
||||
"Exceed maximum number of burst words\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
|
||||
if (!xd)
|
||||
return NULL;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
|
||||
? xc->sconfig.src_addr : sg_dma_address(sg);
|
||||
xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
|
||||
? xc->sconfig.dst_addr : sg_dma_address(sg);
|
||||
xd->nodes[i].burst_size = maxburst * buswidth;
|
||||
xd->nodes[i].nr_burst =
|
||||
sg_dma_len(sg) / xd->nodes[i].burst_size;
|
||||
|
||||
/*
|
||||
* Currently transfer that size doesn't align the unit size
|
||||
* (the number of burst words * bus-width) is not allowed,
|
||||
* because the driver does not support the way to transfer
|
||||
* residue size. As a matter of fact, in order to transfer
|
||||
* arbitrary size, 'src_maxburst' or 'dst_maxburst' of
|
||||
* dma_slave_config must be 1.
|
||||
*/
|
||||
if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
|
||||
dev_err(xc->xdev->ddev.dev,
|
||||
"Unaligned transfer size: %d", sg_dma_len(sg));
|
||||
kfree(xd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
|
||||
dev_err(xc->xdev->ddev.dev,
|
||||
"Exceed maximum transfer size");
|
||||
kfree(xd);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
xd->dir = direction;
|
||||
xd->nr_node = sg_len;
|
||||
xd->cur_node = 0;
|
||||
|
||||
return vchan_tx_prep(vc, &xd->vd, flags);
|
||||
}
|
||||
|
||||
static int uniphier_xdmac_slave_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(chan);
|
||||
struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
|
||||
|
||||
memcpy(&xc->sconfig, config, sizeof(*config));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(chan);
|
||||
struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
|
||||
if (xc->xd) {
|
||||
vchan_terminate_vdesc(&xc->xd->vd);
|
||||
xc->xd = NULL;
|
||||
ret = uniphier_xdmac_chan_stop(xc);
|
||||
}
|
||||
|
||||
vchan_get_all_descriptors(vc, &head);
|
||||
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(vc, &head);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uniphier_xdmac_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
vchan_synchronize(to_virt_chan(chan));
|
||||
}
|
||||
|
||||
static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(chan);
|
||||
struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
|
||||
if (vchan_issue_pending(vc) && !xc->xd)
|
||||
uniphier_xdmac_start(xc);
|
||||
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
}
|
||||
|
||||
static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
kfree(to_uniphier_xdmac_desc(vd));
|
||||
}
|
||||
|
||||
static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
|
||||
int ch)
|
||||
{
|
||||
struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
|
||||
|
||||
xc->xdev = xdev;
|
||||
xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
|
||||
xc->vc.desc_free = uniphier_xdmac_desc_free;
|
||||
|
||||
vchan_init(&xc->vc, &xdev->ddev);
|
||||
}
|
||||
|
||||
static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
|
||||
int chan_id = dma_spec->args[0];
|
||||
|
||||
if (chan_id >= xdev->nr_chans)
|
||||
return NULL;
|
||||
|
||||
xdev->channels[chan_id].id = chan_id;
|
||||
xdev->channels[chan_id].req_factor = dma_spec->args[1];
|
||||
|
||||
return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
|
||||
}
|
||||
|
||||
static int uniphier_xdmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct uniphier_xdmac_device *xdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dma_device *ddev;
|
||||
int irq;
|
||||
int nr_chans;
|
||||
int i, ret;
|
||||
|
||||
if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
|
||||
return -EINVAL;
|
||||
if (nr_chans > XDMAC_MAX_CHANS)
|
||||
nr_chans = XDMAC_MAX_CHANS;
|
||||
|
||||
xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
|
||||
GFP_KERNEL);
|
||||
if (!xdev)
|
||||
return -ENOMEM;
|
||||
|
||||
xdev->nr_chans = nr_chans;
|
||||
xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(xdev->reg_base))
|
||||
return PTR_ERR(xdev->reg_base);
|
||||
|
||||
ddev = &xdev->ddev;
|
||||
ddev->dev = dev;
|
||||
dma_cap_zero(ddev->cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
|
||||
ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
|
||||
ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
|
||||
ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
|
||||
BIT(DMA_MEM_TO_MEM);
|
||||
ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
ddev->max_burst = XDMAC_MAX_WORDS;
|
||||
ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
|
||||
ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
|
||||
ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
|
||||
ddev->device_config = uniphier_xdmac_slave_config;
|
||||
ddev->device_terminate_all = uniphier_xdmac_terminate_all;
|
||||
ddev->device_synchronize = uniphier_xdmac_synchronize;
|
||||
ddev->device_tx_status = dma_cookie_status;
|
||||
ddev->device_issue_pending = uniphier_xdmac_issue_pending;
|
||||
INIT_LIST_HEAD(&ddev->channels);
|
||||
|
||||
for (i = 0; i < nr_chans; i++)
|
||||
uniphier_xdmac_chan_init(xdev, i);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
|
||||
IRQF_SHARED, "xdmac", xdev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to request IRQ\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_async_device_register(ddev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register XDMA device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = of_dma_controller_register(dev->of_node,
|
||||
of_dma_uniphier_xlate, xdev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register XDMA controller\n");
|
||||
goto out_unregister_dmac;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, xdev);
|
||||
|
||||
dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
|
||||
nr_chans);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unregister_dmac:
|
||||
dma_async_device_unregister(ddev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int uniphier_xdmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
|
||||
struct dma_device *ddev = &xdev->ddev;
|
||||
struct dma_chan *chan;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Before reaching here, almost all descriptors have been freed by the
|
||||
* ->device_free_chan_resources() hook. However, each channel might
|
||||
* be still holding one descriptor that was on-flight at that moment.
|
||||
* Terminate it to make sure this hardware is no longer running. Then,
|
||||
* free the channel resources once again to avoid memory leak.
|
||||
*/
|
||||
list_for_each_entry(chan, &ddev->channels, device_node) {
|
||||
ret = dmaengine_terminate_sync(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
uniphier_xdmac_free_chan_resources(chan);
|
||||
}
|
||||
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
dma_async_device_unregister(ddev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id uniphier_xdmac_match[] = {
|
||||
{ .compatible = "socionext,uniphier-xdmac" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
|
||||
|
||||
static struct platform_driver uniphier_xdmac_driver = {
|
||||
.probe = uniphier_xdmac_probe,
|
||||
.remove = uniphier_xdmac_remove,
|
||||
.driver = {
|
||||
.name = "uniphier-xdmac",
|
||||
.of_match_table = uniphier_xdmac_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(uniphier_xdmac_driver);
|
||||
|
||||
MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
|
||||
MODULE_DESCRIPTION("UniPhier external DMA controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -125,7 +125,9 @@
|
|||
#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
|
||||
|
||||
/* HW specific definitions */
|
||||
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
|
||||
#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
|
||||
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
|
||||
#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
|
||||
|
||||
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
|
||||
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
|
||||
|
@ -468,6 +470,7 @@ struct xilinx_dma_config {
|
|||
struct clk **tx_clk, struct clk **txs_clk,
|
||||
struct clk **rx_clk, struct clk **rxs_clk);
|
||||
irqreturn_t (*irq_handler)(int irq, void *data);
|
||||
const int max_channels;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -485,16 +488,15 @@ struct xilinx_dma_config {
|
|||
* @txs_clk: DMA mm2s stream clock
|
||||
* @rx_clk: DMA s2mm clock
|
||||
* @rxs_clk: DMA s2mm stream clock
|
||||
* @nr_channels: Number of channels DMA device supports
|
||||
* @chan_id: DMA channel identifier
|
||||
* @s2mm_chan_id: DMA s2mm channel identifier
|
||||
* @mm2s_chan_id: DMA mm2s channel identifier
|
||||
* @max_buffer_len: Max buffer length
|
||||
* @s2mm_index: S2MM channel index
|
||||
*/
|
||||
struct xilinx_dma_device {
|
||||
void __iomem *regs;
|
||||
struct device *dev;
|
||||
struct dma_device common;
|
||||
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
|
||||
struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
|
||||
u32 flush_on_fsync;
|
||||
bool ext_addr;
|
||||
struct platform_device *pdev;
|
||||
|
@ -504,10 +506,9 @@ struct xilinx_dma_device {
|
|||
struct clk *txs_clk;
|
||||
struct clk *rx_clk;
|
||||
struct clk *rxs_clk;
|
||||
u32 nr_channels;
|
||||
u32 chan_id;
|
||||
u32 s2mm_chan_id;
|
||||
u32 mm2s_chan_id;
|
||||
u32 max_buffer_len;
|
||||
u32 s2mm_index;
|
||||
};
|
||||
|
||||
/* Macros */
|
||||
|
@ -1745,7 +1746,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
|
|||
return IRQ_NONE;
|
||||
|
||||
if (chan->direction == DMA_DEV_TO_MEM)
|
||||
chan_offset = chan->xdev->s2mm_index;
|
||||
chan_offset = chan->xdev->dma_config->max_channels / 2;
|
||||
|
||||
chan_offset = chan_offset + (chan_id - 1);
|
||||
chan = chan->xdev->chan[chan_offset];
|
||||
|
@ -2404,16 +2405,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
|
|||
u32 reg;
|
||||
int err;
|
||||
|
||||
if (chan->cyclic)
|
||||
xilinx_dma_chan_reset(chan);
|
||||
|
||||
err = chan->stop_transfer(chan);
|
||||
if (err) {
|
||||
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
|
||||
chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
|
||||
chan->err = true;
|
||||
if (!chan->cyclic) {
|
||||
err = chan->stop_transfer(chan);
|
||||
if (err) {
|
||||
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
|
||||
chan, dma_ctrl_read(chan,
|
||||
XILINX_DMA_REG_DMASR));
|
||||
chan->err = true;
|
||||
}
|
||||
}
|
||||
|
||||
xilinx_dma_chan_reset(chan);
|
||||
/* Remove and free all of the descriptors in the lists */
|
||||
xilinx_dma_free_descriptors(chan);
|
||||
chan->idle = true;
|
||||
|
@ -2730,12 +2732,11 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
|
|||
*
|
||||
* @xdev: Driver specific device structure
|
||||
* @node: Device node
|
||||
* @chan_id: DMA Channel id
|
||||
*
|
||||
* Return: '0' on success and failure value on error
|
||||
*/
|
||||
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
||||
struct device_node *node, int chan_id)
|
||||
struct device_node *node)
|
||||
{
|
||||
struct xilinx_dma_chan *chan;
|
||||
bool has_dre = false;
|
||||
|
@ -2787,8 +2788,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
|
||||
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
|
||||
chan->direction = DMA_MEM_TO_DEV;
|
||||
chan->id = chan_id;
|
||||
chan->tdest = chan_id;
|
||||
chan->id = xdev->mm2s_chan_id++;
|
||||
chan->tdest = chan->id;
|
||||
|
||||
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
|
||||
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
|
||||
|
@ -2804,9 +2805,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
|||
of_device_is_compatible(node,
|
||||
"xlnx,axi-dma-s2mm-channel")) {
|
||||
chan->direction = DMA_DEV_TO_MEM;
|
||||
chan->id = chan_id;
|
||||
xdev->s2mm_index = xdev->nr_channels;
|
||||
chan->tdest = chan_id - xdev->nr_channels;
|
||||
chan->id = xdev->s2mm_chan_id++;
|
||||
chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
|
||||
chan->has_vflip = of_property_read_bool(node,
|
||||
"xlnx,enable-vert-flip");
|
||||
if (chan->has_vflip) {
|
||||
|
@ -2908,9 +2908,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
|
|||
dev_warn(xdev->dev, "missing dma-channels property\n");
|
||||
|
||||
for (i = 0; i < nr_channels; i++)
|
||||
xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
|
||||
|
||||
xdev->nr_channels += nr_channels;
|
||||
xilinx_dma_chan_probe(xdev, node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2928,7 +2926,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
|
|||
struct xilinx_dma_device *xdev = ofdma->of_dma_data;
|
||||
int chan_id = dma_spec->args[0];
|
||||
|
||||
if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
|
||||
if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
|
||||
return NULL;
|
||||
|
||||
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
|
||||
|
@ -2938,23 +2936,27 @@ static const struct xilinx_dma_config axidma_config = {
|
|||
.dmatype = XDMA_TYPE_AXIDMA,
|
||||
.clk_init = axidma_clk_init,
|
||||
.irq_handler = xilinx_dma_irq_handler,
|
||||
.max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
|
||||
};
|
||||
|
||||
static const struct xilinx_dma_config aximcdma_config = {
|
||||
.dmatype = XDMA_TYPE_AXIMCDMA,
|
||||
.clk_init = axidma_clk_init,
|
||||
.irq_handler = xilinx_mcdma_irq_handler,
|
||||
.max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
|
||||
};
|
||||
static const struct xilinx_dma_config axicdma_config = {
|
||||
.dmatype = XDMA_TYPE_CDMA,
|
||||
.clk_init = axicdma_clk_init,
|
||||
.irq_handler = xilinx_dma_irq_handler,
|
||||
.max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
|
||||
};
|
||||
|
||||
static const struct xilinx_dma_config axivdma_config = {
|
||||
.dmatype = XDMA_TYPE_VDMA,
|
||||
.clk_init = axivdma_clk_init,
|
||||
.irq_handler = xilinx_dma_irq_handler,
|
||||
.max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
|
||||
};
|
||||
|
||||
static const struct of_device_id xilinx_dma_of_ids[] = {
|
||||
|
@ -3011,6 +3013,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
|||
|
||||
/* Retrieve the DMA engine properties from the device tree */
|
||||
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
|
||||
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
|
||||
|
||||
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
|
||||
xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
|
||||
|
@ -3104,7 +3107,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
|
||||
for (i = 0; i < xdev->nr_channels; i++)
|
||||
for (i = 0; i < xdev->dma_config->max_channels; i++)
|
||||
if (xdev->chan[i])
|
||||
xdev->chan[i]->num_frms = num_frames;
|
||||
}
|
||||
|
@ -3134,7 +3137,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
|||
disable_clks:
|
||||
xdma_disable_allclks(xdev);
|
||||
error:
|
||||
for (i = 0; i < xdev->nr_channels; i++)
|
||||
for (i = 0; i < xdev->dma_config->max_channels; i++)
|
||||
if (xdev->chan[i])
|
||||
xilinx_dma_chan_remove(xdev->chan[i]);
|
||||
|
||||
|
@ -3156,7 +3159,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
|
|||
|
||||
dma_async_device_unregister(&xdev->common);
|
||||
|
||||
for (i = 0; i < xdev->nr_channels; i++)
|
||||
for (i = 0; i < xdev->dma_config->max_channels; i++)
|
||||
if (xdev->chan[i])
|
||||
xilinx_dma_chan_remove(xdev->chan[i]);
|
||||
|
||||
|
|
|
@ -300,6 +300,8 @@ struct dma_router {
|
|||
* @chan_id: channel ID for sysfs
|
||||
* @dev: class device for sysfs
|
||||
* @name: backlink name for sysfs
|
||||
* @dbg_client_name: slave name for debugfs in format:
|
||||
* dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
|
||||
* @device_node: used to add this to the device chan list
|
||||
* @local: per-cpu pointer to a struct dma_chan_percpu
|
||||
* @client_count: how many clients are using this channel
|
||||
|
@ -318,6 +320,9 @@ struct dma_chan {
|
|||
int chan_id;
|
||||
struct dma_chan_dev *dev;
|
||||
const char *name;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
char *dbg_client_name;
|
||||
#endif
|
||||
|
||||
struct list_head device_node;
|
||||
struct dma_chan_percpu __percpu *local;
|
||||
|
@ -618,10 +623,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
|
|||
|
||||
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
if (tx->unmap) {
|
||||
dmaengine_unmap_put(tx->unmap);
|
||||
tx->unmap = NULL;
|
||||
}
|
||||
if (!tx->unmap)
|
||||
return;
|
||||
|
||||
dmaengine_unmap_put(tx->unmap);
|
||||
tx->unmap = NULL;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
|
@ -805,7 +811,9 @@ struct dma_filter {
|
|||
* called and there are no further references to this structure. This
|
||||
* must be implemented to free resources however many existing drivers
|
||||
* do not and are therefore not safe to unbind while in use.
|
||||
*
|
||||
* @dbg_summary_show: optional routine to show contents in debugfs; default code
|
||||
* will be used when this is omitted, but custom code can show extra,
|
||||
* controller specific information.
|
||||
*/
|
||||
struct dma_device {
|
||||
struct kref ref;
|
||||
|
@ -891,6 +899,11 @@ struct dma_device {
|
|||
struct dma_tx_state *txstate);
|
||||
void (*device_issue_pending)(struct dma_chan *chan);
|
||||
void (*device_release)(struct dma_device *dev);
|
||||
/* debugfs support */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
|
||||
struct dentry *dbg_dev_root;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline int dmaengine_slave_config(struct dma_chan *chan,
|
||||
|
@ -1155,14 +1168,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
|
|||
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
|
||||
size_t off1, size_t off2, size_t len)
|
||||
{
|
||||
size_t mask;
|
||||
|
||||
if (!align)
|
||||
return true;
|
||||
mask = (1 << align) - 1;
|
||||
if (mask & (off1 | off2 | len))
|
||||
return false;
|
||||
return true;
|
||||
return !(((1 << align) - 1) & (off1 | off2 | len));
|
||||
}
|
||||
|
||||
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
|
||||
|
@ -1236,9 +1242,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
|
|||
{
|
||||
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
|
||||
return dma_dev_to_maxpq(dma);
|
||||
else if (dmaf_p_disabled_continue(flags))
|
||||
if (dmaf_p_disabled_continue(flags))
|
||||
return dma_dev_to_maxpq(dma) - 1;
|
||||
else if (dmaf_continue(flags))
|
||||
if (dmaf_continue(flags))
|
||||
return dma_dev_to_maxpq(dma) - 3;
|
||||
BUG();
|
||||
}
|
||||
|
@ -1249,7 +1255,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
|
|||
if (inc) {
|
||||
if (dir_icg)
|
||||
return dir_icg;
|
||||
else if (sgl)
|
||||
if (sgl)
|
||||
return icg;
|
||||
}
|
||||
|
||||
|
@ -1415,11 +1421,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
|
|||
static inline void
|
||||
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
|
||||
{
|
||||
if (st) {
|
||||
st->last = last;
|
||||
st->used = used;
|
||||
st->residue = residue;
|
||||
}
|
||||
if (!st)
|
||||
return;
|
||||
|
||||
st->last = last;
|
||||
st->used = used;
|
||||
st->residue = residue;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_ENGINE
|
||||
|
@ -1496,12 +1503,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (caps.descriptor_reuse) {
|
||||
tx->flags |= DMA_CTRL_REUSE;
|
||||
return 0;
|
||||
} else {
|
||||
if (!caps.descriptor_reuse)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
tx->flags |= DMA_CTRL_REUSE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
|
||||
|
@ -1517,10 +1523,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
|
|||
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
|
||||
{
|
||||
/* this is supported for reusable desc, so check that */
|
||||
if (dmaengine_desc_test_reuse(desc))
|
||||
return desc->desc_free(desc);
|
||||
else
|
||||
if (!dmaengine_desc_test_reuse(desc))
|
||||
return -EPERM;
|
||||
|
||||
return desc->desc_free(desc);
|
||||
}
|
||||
|
||||
/* --- DMA device --- */
|
||||
|
@ -1566,9 +1572,7 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir)
|
|||
case DMA_DEV_TO_DEV:
|
||||
return "DEV_TO_DEV";
|
||||
default:
|
||||
break;
|
||||
return "invalid";
|
||||
}
|
||||
|
||||
return "invalid";
|
||||
}
|
||||
#endif /* DMAENGINE_H */
|
||||
|
|
|
@ -83,21 +83,6 @@ enum dsa_completion_status {
|
|||
#define DSA_COMP_STATUS_MASK 0x7f
|
||||
#define DSA_COMP_STATUS_WRITE 0x80
|
||||
|
||||
struct dsa_batch_desc {
|
||||
uint32_t pasid:20;
|
||||
uint32_t rsvd:11;
|
||||
uint32_t priv:1;
|
||||
uint32_t flags:24;
|
||||
uint32_t opcode:8;
|
||||
uint64_t completion_addr;
|
||||
uint64_t desc_list_addr;
|
||||
uint64_t rsvd1;
|
||||
uint32_t desc_count;
|
||||
uint16_t interrupt_handle;
|
||||
uint16_t rsvd2;
|
||||
uint8_t rsvd3[24];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct dsa_hw_desc {
|
||||
uint32_t pasid:20;
|
||||
uint32_t rsvd:11;
|
||||
|
@ -109,6 +94,7 @@ struct dsa_hw_desc {
|
|||
uint64_t src_addr;
|
||||
uint64_t rdback_addr;
|
||||
uint64_t pattern;
|
||||
uint64_t desc_list_addr;
|
||||
};
|
||||
union {
|
||||
uint64_t dst_addr;
|
||||
|
@ -116,7 +102,10 @@ struct dsa_hw_desc {
|
|||
uint64_t src2_addr;
|
||||
uint64_t comp_pattern;
|
||||
};
|
||||
uint32_t xfer_size;
|
||||
union {
|
||||
uint32_t xfer_size;
|
||||
uint32_t desc_count;
|
||||
};
|
||||
uint16_t int_handle;
|
||||
uint16_t rsvd1;
|
||||
union {
|
||||
|
|
Loading…
Reference in New Issue