dmaengine updates for 4.10-rc1
Fairly routine update this time around with all changes specific to drivers. o New driver for STMicroelectronics FDMA o Memory-to-memory transfers on dw dmac o Support for slave maps on pl08x devices o Bunch of driver fixes to use dma_pool_zalloc o Bunch of compile and warning fixes spread across drivers -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJYUg7NAAoJEHwUBw8lI4NH5/gP/j81+2RzCUX8PiLQxNUt0Vj+ tVJEizpWCwN1cnhc8ibZdI1DAwyj+GbN2lghYTjqqEng4yOm3czPzUl99grBrpQl t+Qylr9PSpck/paRhd2lgZzG8Nk+B5HJDcxBQbW4pwmbc69YAbqYzt44i4bDpR5K u3mBve1Ulng7peP45EZB8BA32ffCpOEAC/9SdkaPokrSv6XxxPEFvzewy+mLtioU a0zY0iuHqVGpOTABK65fXO/zkGiZLPXJ1T5vK7Iz8mOwuvtYVif0yktQSrx3BWbc 9r64W7Si633wWt/C9LkuMMSmQ7nI/PyHk811cDOcxp3SA79JV5SWwdQl+5QPdtoP hyToaISfAY0BiNI9ltdscx3MPjlwSp08xXvi46RjSs8E2TXnbHUw+J5mTsxYuocl Yi61nlL5ClhCbySf9Q3GFsuAJ3O2Nq9WkCTNRIvJtrMhe3NeqDDTfBZJRD4Bfg1G q8RAc5oqGZDtqKHtLfwULr7Ec2Ru0hIZAyN907OwW+4jBR/eBJB1y+nGrNPtTWPT OOcvrxe85/+ZNROGCZKr0L8UA/MBBMZtjvMY8RMXjBE4YJbakq7tV+7l5VolKeNH G6I/1CC06qVPHrnetM6YejhtnmOQ4F8P1sE0wvpG0QTyHJoFq+aOhHNKJC8F9Eln CQM2apvL4BHvS7OHt9XL =Pf0d -----END PGP SIGNATURE----- Merge tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine updates from Vinod Koul: "Fairly routine update this time around with all changes specific to drivers: - New driver for STMicroelectronics FDMA - Memory-to-memory transfers on dw dmac - Support for slave maps on pl08x devices - Bunch of driver fixes to use dma_pool_zalloc - Bunch of compile and warning fixes spread across drivers" [ The ST FDMA driver already came in earlier through the remoteproc tree ] * tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits) dmaengine: sirf-dma: remove unused ‘sdesc’ dmaengine: pl330: remove unused ‘regs’ dmaengine: s3c24xx: remove unused ‘cdata’ dmaengine: stm32-dma: remove unused ‘src_addr’ dmaengine: stm32-dma: remove unused ‘dst_addr’ dmaengine: stm32-dma: remove unused ‘sfcr’ dmaengine: pch_dma: remove unused ‘cookie’ dmaengine: mic_x100_dma: remove unused ‘data’ dmaengine: img-mdc: remove unused ‘prev_phys’ dmaengine: usb-dmac: remove unused ‘uchan’ dmaengine: ioat: remove unused ‘res’ dmaengine: ioat: remove unused ‘ioat_dma’ dmaengine: ioat: remove unused ‘is_raid_device’ dmaengine: pl330: do not generate unaligned access dmaengine: k3dma: move to dma_pool_zalloc dmaengine: at_hdmac: move to dma_pool_zalloc dmaengine: at_xdmac: don't restore unsaved status dmaengine: ioat: set error code on failures dmaengine: ioat: set error code on failures dmaengine: DW DMAC: add multi-block property to device tree ...
This commit is contained in:
commit
e3842cbfe0
|
@ -23,6 +23,14 @@ Required properties
|
||||||
#define NBPF_SLAVE_RQ_LEVEL 4
|
#define NBPF_SLAVE_RQ_LEVEL 4
|
||||||
|
|
||||||
Optional properties:
|
Optional properties:
|
||||||
|
- max-burst-mem-read: limit burst size for memory reads
|
||||||
|
(DMA_MEM_TO_MEM/DMA_MEM_TO_DEV) to this value, specified in bytes, rather
|
||||||
|
than using the maximum burst size allowed by the hardware's buffer size.
|
||||||
|
- max-burst-mem-write: limit burst size for memory writes
|
||||||
|
(DMA_DEV_TO_MEM/DMA_MEM_TO_MEM) to this value, specified in bytes, rather
|
||||||
|
than using the maximum burst size allowed by the hardware's buffer size.
|
||||||
|
If both max-burst-mem-read and max-burst-mem-write are set, DMA_MEM_TO_MEM
|
||||||
|
will use the lower value.
|
||||||
|
|
||||||
You can use dma-channels and dma-requests as described in dma.txt, although they
|
You can use dma-channels and dma-requests as described in dma.txt, although they
|
||||||
won't be used, this information is derived from the compatibility string.
|
won't be used, this information is derived from the compatibility string.
|
||||||
|
|
|
@ -5,13 +5,13 @@ memcpy and memset capabilities. It has been designed for virtualized
|
||||||
environments.
|
environments.
|
||||||
|
|
||||||
Each HIDMA HW instance consists of multiple DMA channels. These channels
|
Each HIDMA HW instance consists of multiple DMA channels. These channels
|
||||||
share the same bandwidth. The bandwidth utilization can be parititioned
|
share the same bandwidth. The bandwidth utilization can be partitioned
|
||||||
among channels based on the priority and weight assignments.
|
among channels based on the priority and weight assignments.
|
||||||
|
|
||||||
There are only two priority levels and 15 weigh assignments possible.
|
There are only two priority levels and 15 weigh assignments possible.
|
||||||
|
|
||||||
Other parameters here determine how much of the system bus this HIDMA
|
Other parameters here determine how much of the system bus this HIDMA
|
||||||
instance can use like maximum read/write request and and number of bytes to
|
instance can use like maximum read/write request and number of bytes to
|
||||||
read/write in a single burst.
|
read/write in a single burst.
|
||||||
|
|
||||||
Main node required properties:
|
Main node required properties:
|
||||||
|
@ -47,12 +47,18 @@ When the OS is not in control of the management interface (i.e. it's a guest),
|
||||||
the channel nodes appear on their own, not under a management node.
|
the channel nodes appear on their own, not under a management node.
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: must contain "qcom,hidma-1.0"
|
- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1"
|
||||||
|
for MSI capable HW.
|
||||||
- reg: Addresses for the transfer and event channel
|
- reg: Addresses for the transfer and event channel
|
||||||
- interrupts: Should contain the event interrupt
|
- interrupts: Should contain the event interrupt
|
||||||
- desc-count: Number of asynchronous requests this channel can handle
|
- desc-count: Number of asynchronous requests this channel can handle
|
||||||
- iommus: required a iommu node
|
- iommus: required a iommu node
|
||||||
|
|
||||||
|
Optional properties for MSI:
|
||||||
|
- msi-parent : See the generic MSI binding described in
|
||||||
|
devicetree/bindings/interrupt-controller/msi.txt for a description of the
|
||||||
|
msi-parent property.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
Hypervisor OS configuration:
|
Hypervisor OS configuration:
|
||||||
|
|
|
@ -24,6 +24,7 @@ Required Properties:
|
||||||
- "renesas,dmac-r8a7793" (R-Car M2-N)
|
- "renesas,dmac-r8a7793" (R-Car M2-N)
|
||||||
- "renesas,dmac-r8a7794" (R-Car E2)
|
- "renesas,dmac-r8a7794" (R-Car E2)
|
||||||
- "renesas,dmac-r8a7795" (R-Car H3)
|
- "renesas,dmac-r8a7795" (R-Car H3)
|
||||||
|
- "renesas,dmac-r8a7796" (R-Car M3-W)
|
||||||
|
|
||||||
- reg: base address and length of the registers block for the DMAC
|
- reg: base address and length of the registers block for the DMAC
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,8 @@ Optional properties:
|
||||||
that services interrupts for this device
|
that services interrupts for this device
|
||||||
- is_private: The device channels should be marked as private and not for by the
|
- is_private: The device channels should be marked as private and not for by the
|
||||||
general purpose DMA channel allocator. False if not passed.
|
general purpose DMA channel allocator. False if not passed.
|
||||||
|
- multi-block: Multi block transfers supported by hardware. Array property with
|
||||||
|
one cell per channel. 0: not supported, 1 (default): supported.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,8 @@ The slave DMA usage consists of following steps:
|
||||||
2. Set slave and controller specific parameters
|
2. Set slave and controller specific parameters
|
||||||
|
|
||||||
Next step is always to pass some specific information to the DMA
|
Next step is always to pass some specific information to the DMA
|
||||||
driver. Most of the generic information which a slave DMA can use
|
driver. Most of the generic information which a slave DMA can use
|
||||||
is in struct dma_slave_config. This allows the clients to specify
|
is in struct dma_slave_config. This allows the clients to specify
|
||||||
DMA direction, DMA addresses, bus widths, DMA burst lengths etc
|
DMA direction, DMA addresses, bus widths, DMA burst lengths etc
|
||||||
for the peripheral.
|
for the peripheral.
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ The slave DMA usage consists of following steps:
|
||||||
struct dma_slave_config *config)
|
struct dma_slave_config *config)
|
||||||
|
|
||||||
Please see the dma_slave_config structure definition in dmaengine.h
|
Please see the dma_slave_config structure definition in dmaengine.h
|
||||||
for a detailed explanation of the struct members. Please note
|
for a detailed explanation of the struct members. Please note
|
||||||
that the 'direction' member will be going away as it duplicates the
|
that the 'direction' member will be going away as it duplicates the
|
||||||
direction given in the prepare call.
|
direction given in the prepare call.
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ The slave DMA usage consists of following steps:
|
||||||
desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
|
desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
|
||||||
|
|
||||||
Once a descriptor has been obtained, the callback information can be
|
Once a descriptor has been obtained, the callback information can be
|
||||||
added and the descriptor must then be submitted. Some DMA engine
|
added and the descriptor must then be submitted. Some DMA engine
|
||||||
drivers may hold a spinlock between a successful preparation and
|
drivers may hold a spinlock between a successful preparation and
|
||||||
submission so it is important that these two operations are closely
|
submission so it is important that these two operations are closely
|
||||||
paired.
|
paired.
|
||||||
|
@ -138,7 +138,7 @@ The slave DMA usage consists of following steps:
|
||||||
activity via other DMA engine calls not covered in this document.
|
activity via other DMA engine calls not covered in this document.
|
||||||
|
|
||||||
dmaengine_submit() will not start the DMA operation, it merely adds
|
dmaengine_submit() will not start the DMA operation, it merely adds
|
||||||
it to the pending queue. For this, see step 5, dma_async_issue_pending.
|
it to the pending queue. For this, see step 5, dma_async_issue_pending.
|
||||||
|
|
||||||
5. Issue pending DMA requests and wait for callback notification
|
5. Issue pending DMA requests and wait for callback notification
|
||||||
|
|
||||||
|
@ -184,13 +184,13 @@ Further APIs:
|
||||||
|
|
||||||
3. int dmaengine_resume(struct dma_chan *chan)
|
3. int dmaengine_resume(struct dma_chan *chan)
|
||||||
|
|
||||||
Resume a previously paused DMA channel. It is invalid to resume a
|
Resume a previously paused DMA channel. It is invalid to resume a
|
||||||
channel which is not currently paused.
|
channel which is not currently paused.
|
||||||
|
|
||||||
4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
|
4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
|
||||||
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
|
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
|
||||||
|
|
||||||
This can be used to check the status of the channel. Please see
|
This can be used to check the status of the channel. Please see
|
||||||
the documentation in include/linux/dmaengine.h for a more complete
|
the documentation in include/linux/dmaengine.h for a more complete
|
||||||
description of this API.
|
description of this API.
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ Further APIs:
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
Not all DMA engine drivers can return reliable information for
|
Not all DMA engine drivers can return reliable information for
|
||||||
a running DMA channel. It is recommended that DMA engine users
|
a running DMA channel. It is recommended that DMA engine users
|
||||||
pause or stop (via dmaengine_terminate_all()) the channel before
|
pause or stop (via dmaengine_terminate_all()) the channel before
|
||||||
using this API.
|
using this API.
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ command:
|
||||||
% ls -1 /sys/class/dma/
|
% ls -1 /sys/class/dma/
|
||||||
|
|
||||||
Once started a message like "dmatest: Started 1 threads using dma0chan0" is
|
Once started a message like "dmatest: Started 1 threads using dma0chan0" is
|
||||||
emitted. After that only test failure messages are reported until the test
|
emitted. After that only test failure messages are reported until the test
|
||||||
stops.
|
stops.
|
||||||
|
|
||||||
Note that running a new test will not stop any in progress test.
|
Note that running a new test will not stop any in progress test.
|
||||||
|
@ -43,11 +43,11 @@ The following command returns the state of the test.
|
||||||
% cat /sys/module/dmatest/parameters/run
|
% cat /sys/module/dmatest/parameters/run
|
||||||
|
|
||||||
To wait for test completion userpace can poll 'run' until it is false, or use
|
To wait for test completion userpace can poll 'run' until it is false, or use
|
||||||
the wait parameter. Specifying 'wait=1' when loading the module causes module
|
the wait parameter. Specifying 'wait=1' when loading the module causes module
|
||||||
initialization to pause until a test run has completed, while reading
|
initialization to pause until a test run has completed, while reading
|
||||||
/sys/module/dmatest/parameters/wait waits for any running test to complete
|
/sys/module/dmatest/parameters/wait waits for any running test to complete
|
||||||
before returning. For example, the following scripts wait for 42 tests
|
before returning. For example, the following scripts wait for 42 tests
|
||||||
to complete before exiting. Note that if 'iterations' is set to 'infinite' then
|
to complete before exiting. Note that if 'iterations' is set to 'infinite' then
|
||||||
waiting is disabled.
|
waiting is disabled.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
@ -81,7 +81,7 @@ Example of output:
|
||||||
|
|
||||||
The message format is unified across the different types of errors. A number in
|
The message format is unified across the different types of errors. A number in
|
||||||
the parens represents additional information, e.g. error code, error counter,
|
the parens represents additional information, e.g. error code, error counter,
|
||||||
or status. A test thread also emits a summary line at completion listing the
|
or status. A test thread also emits a summary line at completion listing the
|
||||||
number of tests executed, number that failed, and a result code.
|
number of tests executed, number that failed, and a result code.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
|
@ -384,7 +384,7 @@ where to put them)
|
||||||
- The descriptor should be prepared for reuse by invoking
|
- The descriptor should be prepared for reuse by invoking
|
||||||
dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
|
dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
|
||||||
- dmaengine_desc_set_reuse() will succeed only when channel support
|
- dmaengine_desc_set_reuse() will succeed only when channel support
|
||||||
reusable descriptor as exhibited by capablities
|
reusable descriptor as exhibited by capabilities
|
||||||
- As a consequence, if a device driver wants to skip the dma_map_sg() and
|
- As a consequence, if a device driver wants to skip the dma_map_sg() and
|
||||||
dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
|
dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
|
||||||
it can resubmit the transfer right after its completion.
|
it can resubmit the transfer right after its completion.
|
||||||
|
|
|
@ -29,7 +29,7 @@ Constraints
|
||||||
|
|
||||||
d) Bandwidth guarantee
|
d) Bandwidth guarantee
|
||||||
The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
|
The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
|
||||||
The high prorities get twice as much bandwidth as the normal, which get twice
|
The high priorities get twice as much bandwidth as the normal, which get twice
|
||||||
as much as the low priorities.
|
as much as the low priorities.
|
||||||
A driver should be able to request a priority, especially the real-time
|
A driver should be able to request a priority, especially the real-time
|
||||||
ones such as pxa_camera with (big) throughputs.
|
ones such as pxa_camera with (big) throughputs.
|
||||||
|
|
|
@ -129,6 +129,7 @@
|
||||||
data-width = <4>;
|
data-width = <4>;
|
||||||
clocks = <&ahb_clk>;
|
clocks = <&ahb_clk>;
|
||||||
clock-names = "hclk";
|
clock-names = "hclk";
|
||||||
|
multi-block = <1 1 1 1 1 1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
i2c0: i2c@FF120000 {
|
i2c0: i2c@FF120000 {
|
||||||
|
|
|
@ -118,6 +118,7 @@
|
||||||
block_size = <0xfff>;
|
block_size = <0xfff>;
|
||||||
dma-masters = <2>;
|
dma-masters = <2>;
|
||||||
data-width = <8 8>;
|
data-width = <8 8>;
|
||||||
|
multi-block = <1 1 1 1 1 1 1 1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
dma@eb000000 {
|
dma@eb000000 {
|
||||||
|
@ -134,6 +135,7 @@
|
||||||
chan_priority = <1>;
|
chan_priority = <1>;
|
||||||
block_size = <0xfff>;
|
block_size = <0xfff>;
|
||||||
data-width = <8 8>;
|
data-width = <8 8>;
|
||||||
|
multi-block = <1 1 1 1 1 1 1 1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
fsmc: flash@b0000000 {
|
fsmc: flash@b0000000 {
|
||||||
|
|
|
@ -117,6 +117,25 @@ static struct pl08x_channel_data s3c64xx_dma0_info[] = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct dma_slave_map s3c64xx_dma0_slave_map[] = {
|
||||||
|
{ "s3c6400-uart.0", "tx", &s3c64xx_dma0_info[0] },
|
||||||
|
{ "s3c6400-uart.0", "rx", &s3c64xx_dma0_info[1] },
|
||||||
|
{ "s3c6400-uart.1", "tx", &s3c64xx_dma0_info[2] },
|
||||||
|
{ "s3c6400-uart.1", "rx", &s3c64xx_dma0_info[3] },
|
||||||
|
{ "s3c6400-uart.2", "tx", &s3c64xx_dma0_info[4] },
|
||||||
|
{ "s3c6400-uart.2", "rx", &s3c64xx_dma0_info[5] },
|
||||||
|
{ "s3c6400-uart.3", "tx", &s3c64xx_dma0_info[6] },
|
||||||
|
{ "s3c6400-uart.3", "rx", &s3c64xx_dma0_info[7] },
|
||||||
|
{ "samsung-pcm.0", "tx", &s3c64xx_dma0_info[8] },
|
||||||
|
{ "samsung-pcm.0", "rx", &s3c64xx_dma0_info[9] },
|
||||||
|
{ "samsung-i2s.0", "tx", &s3c64xx_dma0_info[10] },
|
||||||
|
{ "samsung-i2s.0", "rx", &s3c64xx_dma0_info[11] },
|
||||||
|
{ "s3c6410-spi.0", "tx", &s3c64xx_dma0_info[12] },
|
||||||
|
{ "s3c6410-spi.0", "rx", &s3c64xx_dma0_info[13] },
|
||||||
|
{ "samsung-i2s.2", "tx", &s3c64xx_dma0_info[14] },
|
||||||
|
{ "samsung-i2s.2", "rx", &s3c64xx_dma0_info[15] },
|
||||||
|
};
|
||||||
|
|
||||||
struct pl08x_platform_data s3c64xx_dma0_plat_data = {
|
struct pl08x_platform_data s3c64xx_dma0_plat_data = {
|
||||||
.memcpy_channel = {
|
.memcpy_channel = {
|
||||||
.bus_id = "memcpy",
|
.bus_id = "memcpy",
|
||||||
|
@ -134,6 +153,8 @@ struct pl08x_platform_data s3c64xx_dma0_plat_data = {
|
||||||
.put_xfer_signal = pl08x_put_xfer_signal,
|
.put_xfer_signal = pl08x_put_xfer_signal,
|
||||||
.slave_channels = s3c64xx_dma0_info,
|
.slave_channels = s3c64xx_dma0_info,
|
||||||
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info),
|
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info),
|
||||||
|
.slave_map = s3c64xx_dma0_slave_map,
|
||||||
|
.slave_map_len = ARRAY_SIZE(s3c64xx_dma0_slave_map),
|
||||||
};
|
};
|
||||||
|
|
||||||
static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0,
|
static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0,
|
||||||
|
@ -207,6 +228,15 @@ static struct pl08x_channel_data s3c64xx_dma1_info[] = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct dma_slave_map s3c64xx_dma1_slave_map[] = {
|
||||||
|
{ "samsung-pcm.1", "tx", &s3c64xx_dma1_info[0] },
|
||||||
|
{ "samsung-pcm.1", "rx", &s3c64xx_dma1_info[1] },
|
||||||
|
{ "samsung-i2s.1", "tx", &s3c64xx_dma1_info[2] },
|
||||||
|
{ "samsung-i2s.1", "rx", &s3c64xx_dma1_info[3] },
|
||||||
|
{ "s3c6410-spi.1", "tx", &s3c64xx_dma1_info[4] },
|
||||||
|
{ "s3c6410-spi.1", "rx", &s3c64xx_dma1_info[5] },
|
||||||
|
};
|
||||||
|
|
||||||
struct pl08x_platform_data s3c64xx_dma1_plat_data = {
|
struct pl08x_platform_data s3c64xx_dma1_plat_data = {
|
||||||
.memcpy_channel = {
|
.memcpy_channel = {
|
||||||
.bus_id = "memcpy",
|
.bus_id = "memcpy",
|
||||||
|
@ -224,6 +254,8 @@ struct pl08x_platform_data s3c64xx_dma1_plat_data = {
|
||||||
.put_xfer_signal = pl08x_put_xfer_signal,
|
.put_xfer_signal = pl08x_put_xfer_signal,
|
||||||
.slave_channels = s3c64xx_dma1_info,
|
.slave_channels = s3c64xx_dma1_info,
|
||||||
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info),
|
.num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info),
|
||||||
|
.slave_map = s3c64xx_dma1_slave_map,
|
||||||
|
.slave_map_len = ARRAY_SIZE(s3c64xx_dma1_slave_map),
|
||||||
};
|
};
|
||||||
|
|
||||||
static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0,
|
static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0,
|
||||||
|
|
|
@ -1124,15 +1124,6 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
|
||||||
pd.num_cs = num_cs;
|
pd.num_cs = num_cs;
|
||||||
pd.src_clk_nr = src_clk_nr;
|
pd.src_clk_nr = src_clk_nr;
|
||||||
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
|
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
|
||||||
pd.dma_tx = (void *)DMACH_SPI0_TX;
|
|
||||||
pd.dma_rx = (void *)DMACH_SPI0_RX;
|
|
||||||
#if defined(CONFIG_PL330_DMA)
|
|
||||||
pd.filter = pl330_filter;
|
|
||||||
#elif defined(CONFIG_S3C64XX_PL080)
|
|
||||||
pd.filter = pl08x_filter_id;
|
|
||||||
#elif defined(CONFIG_S3C24XX_DMAC)
|
|
||||||
pd.filter = s3c24xx_dma_filter;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
|
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
|
||||||
}
|
}
|
||||||
|
@ -1169,14 +1160,6 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
|
||||||
pd.num_cs = num_cs;
|
pd.num_cs = num_cs;
|
||||||
pd.src_clk_nr = src_clk_nr;
|
pd.src_clk_nr = src_clk_nr;
|
||||||
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
|
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
|
||||||
pd.dma_tx = (void *)DMACH_SPI1_TX;
|
|
||||||
pd.dma_rx = (void *)DMACH_SPI1_RX;
|
|
||||||
#if defined(CONFIG_PL330_DMA)
|
|
||||||
pd.filter = pl330_filter;
|
|
||||||
#elif defined(CONFIG_S3C64XX_PL080)
|
|
||||||
pd.filter = pl08x_filter_id;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
|
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
|
||||||
}
|
}
|
||||||
|
@ -1213,13 +1196,6 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
|
||||||
pd.num_cs = num_cs;
|
pd.num_cs = num_cs;
|
||||||
pd.src_clk_nr = src_clk_nr;
|
pd.src_clk_nr = src_clk_nr;
|
||||||
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
|
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
|
||||||
pd.dma_tx = (void *)DMACH_SPI2_TX;
|
|
||||||
pd.dma_rx = (void *)DMACH_SPI2_RX;
|
|
||||||
#if defined(CONFIG_PL330_DMA)
|
|
||||||
pd.filter = pl330_filter;
|
|
||||||
#elif defined(CONFIG_S3C64XX_PL080)
|
|
||||||
pd.filter = pl08x_filter_id;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
|
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
|
||||||
}
|
}
|
||||||
|
|
|
@ -494,7 +494,7 @@ config TEGRA20_APB_DMA
|
||||||
or vice versa. It does not support memory to memory data transfer.
|
or vice versa. It does not support memory to memory data transfer.
|
||||||
|
|
||||||
config TEGRA210_ADMA
|
config TEGRA210_ADMA
|
||||||
bool "NVIDIA Tegra210 ADMA support"
|
tristate "NVIDIA Tegra210 ADMA support"
|
||||||
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
|
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
select DMA_VIRTUAL_CHANNELS
|
select DMA_VIRTUAL_CHANNELS
|
||||||
|
|
|
@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pl08x_filter_id);
|
EXPORT_SYMBOL_GPL(pl08x_filter_id);
|
||||||
|
|
||||||
|
static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
|
||||||
|
{
|
||||||
|
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
|
||||||
|
|
||||||
|
return plchan->cd == chan_id;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Just check that the device is there and active
|
* Just check that the device is there and active
|
||||||
* TODO: turn this bit on/off depending on the number of physical channels
|
* TODO: turn this bit on/off depending on the number of physical channels
|
||||||
|
@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_no_platdata;
|
goto out_no_platdata;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pl08x->slave.filter.map = pl08x->pd->slave_map;
|
||||||
|
pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
|
||||||
|
pl08x->slave.filter.fn = pl08x_filter_fn;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* By default, AHB1 only. If dualmaster, from platform */
|
/* By default, AHB1 only. If dualmaster, from platform */
|
||||||
|
|
|
@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
|
||||||
struct at_dma *atdma = to_at_dma(chan->device);
|
struct at_dma *atdma = to_at_dma(chan->device);
|
||||||
dma_addr_t phys;
|
dma_addr_t phys;
|
||||||
|
|
||||||
desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
|
desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
|
||||||
if (desc) {
|
if (desc) {
|
||||||
memset(desc, 0, sizeof(struct at_desc));
|
|
||||||
INIT_LIST_HEAD(&desc->tx_list);
|
INIT_LIST_HEAD(&desc->tx_list);
|
||||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||||
/* txd.flags will be overwritten in prep functions */
|
/* txd.flags will be overwritten in prep functions */
|
||||||
|
|
|
@ -221,7 +221,6 @@ struct at_xdmac {
|
||||||
int irq;
|
int irq;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
u32 save_gim;
|
u32 save_gim;
|
||||||
u32 save_gs;
|
|
||||||
struct dma_pool *at_xdmac_desc_pool;
|
struct dma_pool *at_xdmac_desc_pool;
|
||||||
struct at_xdmac_chan chan[0];
|
struct at_xdmac_chan chan[0];
|
||||||
};
|
};
|
||||||
|
@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
|
||||||
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
|
struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
|
||||||
dma_addr_t phys;
|
dma_addr_t phys;
|
||||||
|
|
||||||
desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
|
desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
|
||||||
if (desc) {
|
if (desc) {
|
||||||
memset(desc, 0, sizeof(*desc));
|
|
||||||
INIT_LIST_HEAD(&desc->descs_list);
|
INIT_LIST_HEAD(&desc->descs_list);
|
||||||
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
|
dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
|
||||||
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
|
desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
|
||||||
|
@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
|
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
|
||||||
at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
|
|
||||||
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
|
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
|
||||||
atchan = to_at_xdmac_chan(chan);
|
atchan = to_at_xdmac_chan(chan);
|
||||||
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
|
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
|
||||||
|
|
|
@ -164,7 +164,9 @@ struct dmatest_thread {
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
u8 **srcs;
|
u8 **srcs;
|
||||||
|
u8 **usrcs;
|
||||||
u8 **dsts;
|
u8 **dsts;
|
||||||
|
u8 **udsts;
|
||||||
enum dma_transaction_type type;
|
enum dma_transaction_type type;
|
||||||
bool done;
|
bool done;
|
||||||
};
|
};
|
||||||
|
@ -431,6 +433,7 @@ static int dmatest_func(void *data)
|
||||||
ktime_t comparetime = ktime_set(0, 0);
|
ktime_t comparetime = ktime_set(0, 0);
|
||||||
s64 runtime = 0;
|
s64 runtime = 0;
|
||||||
unsigned long long total_len = 0;
|
unsigned long long total_len = 0;
|
||||||
|
u8 align = 0;
|
||||||
|
|
||||||
set_freezable();
|
set_freezable();
|
||||||
|
|
||||||
|
@ -441,20 +444,24 @@ static int dmatest_func(void *data)
|
||||||
params = &info->params;
|
params = &info->params;
|
||||||
chan = thread->chan;
|
chan = thread->chan;
|
||||||
dev = chan->device;
|
dev = chan->device;
|
||||||
if (thread->type == DMA_MEMCPY)
|
if (thread->type == DMA_MEMCPY) {
|
||||||
|
align = dev->copy_align;
|
||||||
src_cnt = dst_cnt = 1;
|
src_cnt = dst_cnt = 1;
|
||||||
else if (thread->type == DMA_SG)
|
} else if (thread->type == DMA_SG) {
|
||||||
|
align = dev->copy_align;
|
||||||
src_cnt = dst_cnt = sg_buffers;
|
src_cnt = dst_cnt = sg_buffers;
|
||||||
else if (thread->type == DMA_XOR) {
|
} else if (thread->type == DMA_XOR) {
|
||||||
/* force odd to ensure dst = src */
|
/* force odd to ensure dst = src */
|
||||||
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
|
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
|
||||||
dst_cnt = 1;
|
dst_cnt = 1;
|
||||||
|
align = dev->xor_align;
|
||||||
} else if (thread->type == DMA_PQ) {
|
} else if (thread->type == DMA_PQ) {
|
||||||
/* force odd to ensure dst = src */
|
/* force odd to ensure dst = src */
|
||||||
src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
|
src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
|
||||||
dst_cnt = 2;
|
dst_cnt = 2;
|
||||||
|
align = dev->pq_align;
|
||||||
|
|
||||||
pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
|
pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
|
||||||
if (!pq_coefs)
|
if (!pq_coefs)
|
||||||
goto err_thread_type;
|
goto err_thread_type;
|
||||||
|
|
||||||
|
@ -463,23 +470,47 @@ static int dmatest_func(void *data)
|
||||||
} else
|
} else
|
||||||
goto err_thread_type;
|
goto err_thread_type;
|
||||||
|
|
||||||
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
|
thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
|
||||||
if (!thread->srcs)
|
if (!thread->srcs)
|
||||||
goto err_srcs;
|
goto err_srcs;
|
||||||
|
|
||||||
|
thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
|
||||||
|
if (!thread->usrcs)
|
||||||
|
goto err_usrcs;
|
||||||
|
|
||||||
for (i = 0; i < src_cnt; i++) {
|
for (i = 0; i < src_cnt; i++) {
|
||||||
thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
|
thread->usrcs[i] = kmalloc(params->buf_size + align,
|
||||||
if (!thread->srcs[i])
|
GFP_KERNEL);
|
||||||
|
if (!thread->usrcs[i])
|
||||||
goto err_srcbuf;
|
goto err_srcbuf;
|
||||||
|
|
||||||
|
/* align srcs to alignment restriction */
|
||||||
|
if (align)
|
||||||
|
thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
|
||||||
|
else
|
||||||
|
thread->srcs[i] = thread->usrcs[i];
|
||||||
}
|
}
|
||||||
thread->srcs[i] = NULL;
|
thread->srcs[i] = NULL;
|
||||||
|
|
||||||
thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
|
thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
|
||||||
if (!thread->dsts)
|
if (!thread->dsts)
|
||||||
goto err_dsts;
|
goto err_dsts;
|
||||||
|
|
||||||
|
thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
|
||||||
|
if (!thread->udsts)
|
||||||
|
goto err_udsts;
|
||||||
|
|
||||||
for (i = 0; i < dst_cnt; i++) {
|
for (i = 0; i < dst_cnt; i++) {
|
||||||
thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
|
thread->udsts[i] = kmalloc(params->buf_size + align,
|
||||||
if (!thread->dsts[i])
|
GFP_KERNEL);
|
||||||
|
if (!thread->udsts[i])
|
||||||
goto err_dstbuf;
|
goto err_dstbuf;
|
||||||
|
|
||||||
|
/* align dsts to alignment restriction */
|
||||||
|
if (align)
|
||||||
|
thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
|
||||||
|
else
|
||||||
|
thread->dsts[i] = thread->udsts[i];
|
||||||
}
|
}
|
||||||
thread->dsts[i] = NULL;
|
thread->dsts[i] = NULL;
|
||||||
|
|
||||||
|
@ -498,20 +529,11 @@ static int dmatest_func(void *data)
|
||||||
dma_addr_t srcs[src_cnt];
|
dma_addr_t srcs[src_cnt];
|
||||||
dma_addr_t *dsts;
|
dma_addr_t *dsts;
|
||||||
unsigned int src_off, dst_off, len;
|
unsigned int src_off, dst_off, len;
|
||||||
u8 align = 0;
|
|
||||||
struct scatterlist tx_sg[src_cnt];
|
struct scatterlist tx_sg[src_cnt];
|
||||||
struct scatterlist rx_sg[src_cnt];
|
struct scatterlist rx_sg[src_cnt];
|
||||||
|
|
||||||
total_tests++;
|
total_tests++;
|
||||||
|
|
||||||
/* honor alignment restrictions */
|
|
||||||
if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
|
|
||||||
align = dev->copy_align;
|
|
||||||
else if (thread->type == DMA_XOR)
|
|
||||||
align = dev->xor_align;
|
|
||||||
else if (thread->type == DMA_PQ)
|
|
||||||
align = dev->pq_align;
|
|
||||||
|
|
||||||
if (1 << align > params->buf_size) {
|
if (1 << align > params->buf_size) {
|
||||||
pr_err("%u-byte buffer too small for %d-byte alignment\n",
|
pr_err("%u-byte buffer too small for %d-byte alignment\n",
|
||||||
params->buf_size, 1 << align);
|
params->buf_size, 1 << align);
|
||||||
|
@ -549,7 +571,7 @@ static int dmatest_func(void *data)
|
||||||
filltime = ktime_add(filltime, diff);
|
filltime = ktime_add(filltime, diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
|
um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!um) {
|
if (!um) {
|
||||||
failed_tests++;
|
failed_tests++;
|
||||||
|
@ -729,13 +751,17 @@ static int dmatest_func(void *data)
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
err_dstbuf:
|
err_dstbuf:
|
||||||
for (i = 0; thread->dsts[i]; i++)
|
for (i = 0; thread->udsts[i]; i++)
|
||||||
kfree(thread->dsts[i]);
|
kfree(thread->udsts[i]);
|
||||||
|
kfree(thread->udsts);
|
||||||
|
err_udsts:
|
||||||
kfree(thread->dsts);
|
kfree(thread->dsts);
|
||||||
err_dsts:
|
err_dsts:
|
||||||
err_srcbuf:
|
err_srcbuf:
|
||||||
for (i = 0; thread->srcs[i]; i++)
|
for (i = 0; thread->usrcs[i]; i++)
|
||||||
kfree(thread->srcs[i]);
|
kfree(thread->usrcs[i]);
|
||||||
|
kfree(thread->usrcs);
|
||||||
|
err_usrcs:
|
||||||
kfree(thread->srcs);
|
kfree(thread->srcs);
|
||||||
err_srcs:
|
err_srcs:
|
||||||
kfree(pq_coefs);
|
kfree(pq_coefs);
|
||||||
|
|
|
@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
|
||||||
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
|
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
|
||||||
} else {
|
} else {
|
||||||
dwc->block_size = pdata->block_size;
|
dwc->block_size = pdata->block_size;
|
||||||
dwc->nollp = pdata->is_nollp;
|
dwc->nollp = !pdata->multi_block[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device_node *np = pdev->dev.of_node;
|
struct device_node *np = pdev->dev.of_node;
|
||||||
struct dw_dma_platform_data *pdata;
|
struct dw_dma_platform_data *pdata;
|
||||||
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
|
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
|
||||||
u32 nr_masters;
|
u32 nr_masters;
|
||||||
u32 nr_channels;
|
u32 nr_channels;
|
||||||
|
|
||||||
|
@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev)
|
||||||
|
|
||||||
if (of_property_read_u32(np, "dma-channels", &nr_channels))
|
if (of_property_read_u32(np, "dma-channels", &nr_channels))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||||
if (!pdata)
|
if (!pdata)
|
||||||
|
@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev)
|
||||||
if (of_property_read_bool(np, "is_private"))
|
if (of_property_read_bool(np, "is_private"))
|
||||||
pdata->is_private = true;
|
pdata->is_private = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* All known devices, which use DT for configuration, support
|
||||||
|
* memory-to-memory transfers. So enable it by default.
|
||||||
|
*/
|
||||||
|
pdata->is_memcpy = true;
|
||||||
|
|
||||||
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
|
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
|
||||||
pdata->chan_allocation_order = (unsigned char)tmp;
|
pdata->chan_allocation_order = (unsigned char)tmp;
|
||||||
|
|
||||||
|
@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev)
|
||||||
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
|
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
|
||||||
|
for (tmp = 0; tmp < nr_channels; tmp++)
|
||||||
|
pdata->multi_block[tmp] = mb[tmp];
|
||||||
|
} else {
|
||||||
|
for (tmp = 0; tmp < nr_channels; tmp++)
|
||||||
|
pdata->multi_block[tmp] = 1;
|
||||||
|
}
|
||||||
|
|
||||||
return pdata;
|
return pdata;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
|
|
||||||
#define DW_DMA_MAX_NR_CHANNELS 8
|
#include "internal.h"
|
||||||
|
|
||||||
#define DW_DMA_MAX_NR_REQUESTS 16
|
#define DW_DMA_MAX_NR_REQUESTS 16
|
||||||
|
|
||||||
/* flow controller */
|
/* flow controller */
|
||||||
|
|
|
@ -2451,6 +2451,9 @@ static int edma_pm_resume(struct device *dev)
|
||||||
int i;
|
int i;
|
||||||
s8 (*queue_priority_mapping)[2];
|
s8 (*queue_priority_mapping)[2];
|
||||||
|
|
||||||
|
/* re initialize dummy slot to dummy param set */
|
||||||
|
edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
|
||||||
|
|
||||||
queue_priority_mapping = ecc->info->queue_priority_mapping;
|
queue_priority_mapping = ecc->info->queue_priority_mapping;
|
||||||
|
|
||||||
/* Event queue priority mapping */
|
/* Event queue priority mapping */
|
||||||
|
|
|
@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = {
|
||||||
{ .compatible = "fsl,raideng-v1.0", },
|
{ .compatible = "fsl,raideng-v1.0", },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, fsl_re_ids);
|
||||||
|
|
||||||
static struct platform_driver fsl_re_driver = {
|
static struct platform_driver fsl_re_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
|
|
|
@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
if (!chip)
|
if (!chip)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
chip->dev = &pdev->dev;
|
chip->dev = &pdev->dev;
|
||||||
chip->regs = pcim_iomap_table(pdev)[0];
|
chip->regs = pcim_iomap_table(pdev)[0];
|
||||||
chip->length = pci_resource_len(pdev, 0);
|
chip->length = pci_resource_len(pdev, 0);
|
||||||
chip->offset = HSU_PCI_CHAN_OFFSET;
|
chip->offset = HSU_PCI_CHAN_OFFSET;
|
||||||
chip->irq = pdev->irq;
|
chip->irq = pci_irq_vector(pdev, 0);
|
||||||
|
|
||||||
pci_enable_msi(pdev);
|
|
||||||
|
|
||||||
ret = hsu_dma_probe(chip);
|
ret = hsu_dma_probe(chip);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
|
||||||
struct mdc_dma *mdma = mchan->mdma;
|
struct mdc_dma *mdma = mchan->mdma;
|
||||||
struct mdc_tx_desc *mdesc;
|
struct mdc_tx_desc *mdesc;
|
||||||
struct mdc_hw_list_desc *curr, *prev = NULL;
|
struct mdc_hw_list_desc *curr, *prev = NULL;
|
||||||
dma_addr_t curr_phys, prev_phys;
|
dma_addr_t curr_phys;
|
||||||
|
|
||||||
if (!len)
|
if (!len)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
|
||||||
xfer_size);
|
xfer_size);
|
||||||
|
|
||||||
prev = curr;
|
prev = curr;
|
||||||
prev_phys = curr_phys;
|
|
||||||
|
|
||||||
mdesc->list_len++;
|
mdesc->list_len++;
|
||||||
src += xfer_size;
|
src += xfer_size;
|
||||||
|
@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
|
||||||
struct mdc_dma *mdma = mchan->mdma;
|
struct mdc_dma *mdma = mchan->mdma;
|
||||||
struct mdc_tx_desc *mdesc;
|
struct mdc_tx_desc *mdesc;
|
||||||
struct mdc_hw_list_desc *curr, *prev = NULL;
|
struct mdc_hw_list_desc *curr, *prev = NULL;
|
||||||
dma_addr_t curr_phys, prev_phys;
|
dma_addr_t curr_phys;
|
||||||
|
|
||||||
if (!buf_len && !period_len)
|
if (!buf_len && !period_len)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
|
||||||
}
|
}
|
||||||
|
|
||||||
prev = curr;
|
prev = curr;
|
||||||
prev_phys = curr_phys;
|
|
||||||
|
|
||||||
mdesc->list_len++;
|
mdesc->list_len++;
|
||||||
buf_addr += xfer_size;
|
buf_addr += xfer_size;
|
||||||
|
@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
|
||||||
struct mdc_tx_desc *mdesc;
|
struct mdc_tx_desc *mdesc;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
struct mdc_hw_list_desc *curr, *prev = NULL;
|
struct mdc_hw_list_desc *curr, *prev = NULL;
|
||||||
dma_addr_t curr_phys, prev_phys;
|
dma_addr_t curr_phys;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (!sgl)
|
if (!sgl)
|
||||||
|
@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
|
||||||
}
|
}
|
||||||
|
|
||||||
prev = curr;
|
prev = curr;
|
||||||
prev_phys = curr_phys;
|
|
||||||
|
|
||||||
mdesc->list_len++;
|
mdesc->list_len++;
|
||||||
mdesc->list_xfer_size += xfer_size;
|
mdesc->list_xfer_size += xfer_size;
|
||||||
|
|
|
@ -298,6 +298,7 @@ struct sdma_engine;
|
||||||
* @event_id1 for channels that use 2 events
|
* @event_id1 for channels that use 2 events
|
||||||
* @word_size peripheral access size
|
* @word_size peripheral access size
|
||||||
* @buf_tail ID of the buffer that was processed
|
* @buf_tail ID of the buffer that was processed
|
||||||
|
* @buf_ptail ID of the previous buffer that was processed
|
||||||
* @num_bd max NUM_BD. number of descriptors currently handling
|
* @num_bd max NUM_BD. number of descriptors currently handling
|
||||||
*/
|
*/
|
||||||
struct sdma_channel {
|
struct sdma_channel {
|
||||||
|
@ -309,6 +310,7 @@ struct sdma_channel {
|
||||||
unsigned int event_id1;
|
unsigned int event_id1;
|
||||||
enum dma_slave_buswidth word_size;
|
enum dma_slave_buswidth word_size;
|
||||||
unsigned int buf_tail;
|
unsigned int buf_tail;
|
||||||
|
unsigned int buf_ptail;
|
||||||
unsigned int num_bd;
|
unsigned int num_bd;
|
||||||
unsigned int period_len;
|
unsigned int period_len;
|
||||||
struct sdma_buffer_descriptor *bd;
|
struct sdma_buffer_descriptor *bd;
|
||||||
|
@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
|
||||||
sdmac->chn_real_count = bd->mode.count;
|
sdmac->chn_real_count = bd->mode.count;
|
||||||
bd->mode.status |= BD_DONE;
|
bd->mode.status |= BD_DONE;
|
||||||
bd->mode.count = sdmac->period_len;
|
bd->mode.count = sdmac->period_len;
|
||||||
|
sdmac->buf_ptail = sdmac->buf_tail;
|
||||||
|
sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The callback is called from the interrupt context in order
|
* The callback is called from the interrupt context in order
|
||||||
|
@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
|
||||||
|
|
||||||
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
|
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
|
||||||
|
|
||||||
sdmac->buf_tail++;
|
|
||||||
sdmac->buf_tail %= sdmac->num_bd;
|
|
||||||
|
|
||||||
if (error)
|
if (error)
|
||||||
sdmac->status = old_status;
|
sdmac->status = old_status;
|
||||||
}
|
}
|
||||||
|
@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
|
||||||
sdmac->flags = 0;
|
sdmac->flags = 0;
|
||||||
|
|
||||||
sdmac->buf_tail = 0;
|
sdmac->buf_tail = 0;
|
||||||
|
sdmac->buf_ptail = 0;
|
||||||
|
sdmac->chn_real_count = 0;
|
||||||
|
|
||||||
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
|
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
|
||||||
sg_len, channel);
|
sg_len, channel);
|
||||||
|
@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
|
||||||
sdmac->status = DMA_IN_PROGRESS;
|
sdmac->status = DMA_IN_PROGRESS;
|
||||||
|
|
||||||
sdmac->buf_tail = 0;
|
sdmac->buf_tail = 0;
|
||||||
|
sdmac->buf_ptail = 0;
|
||||||
|
sdmac->chn_real_count = 0;
|
||||||
sdmac->period_len = period_len;
|
sdmac->period_len = period_len;
|
||||||
|
|
||||||
sdmac->flags |= IMX_DMA_SG_LOOP;
|
sdmac->flags |= IMX_DMA_SG_LOOP;
|
||||||
|
@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
|
||||||
u32 residue;
|
u32 residue;
|
||||||
|
|
||||||
if (sdmac->flags & IMX_DMA_SG_LOOP)
|
if (sdmac->flags & IMX_DMA_SG_LOOP)
|
||||||
residue = (sdmac->num_bd - sdmac->buf_tail) *
|
residue = (sdmac->num_bd - sdmac->buf_ptail) *
|
||||||
sdmac->period_len - sdmac->chn_real_count;
|
sdmac->period_len - sdmac->chn_real_count;
|
||||||
else
|
else
|
||||||
residue = sdmac->chn_count - sdmac->chn_real_count;
|
residue = sdmac->chn_count - sdmac->chn_real_count;
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include "../dmaengine.h"
|
#include "../dmaengine.h"
|
||||||
|
|
||||||
static char *chanerr_str[] = {
|
static char *chanerr_str[] = {
|
||||||
|
"DMA Transfer Source Address Error",
|
||||||
"DMA Transfer Destination Address Error",
|
"DMA Transfer Destination Address Error",
|
||||||
"Next Descriptor Address Error",
|
"Next Descriptor Address Error",
|
||||||
"Descriptor Error",
|
"Descriptor Error",
|
||||||
|
@ -66,7 +67,6 @@ static char *chanerr_str[] = {
|
||||||
"Result Guard Tag verification Error",
|
"Result Guard Tag verification Error",
|
||||||
"Result Application Tag verification Error",
|
"Result Application Tag verification Error",
|
||||||
"Result Reference Tag verification Error",
|
"Result Reference Tag verification Error",
|
||||||
NULL
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ioat_eh(struct ioatdma_chan *ioat_chan);
|
static void ioat_eh(struct ioatdma_chan *ioat_chan);
|
||||||
|
@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
|
||||||
if ((chanerr >> i) & 1) {
|
if ((chanerr >> i) & 1) {
|
||||||
if (chanerr_str[i]) {
|
dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
|
||||||
dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
|
i, chanerr_str[i]);
|
||||||
i, chanerr_str[i]);
|
|
||||||
} else
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
|
||||||
{
|
{
|
||||||
struct ioat_dma_descriptor *hw;
|
struct ioat_dma_descriptor *hw;
|
||||||
struct ioat_ring_ent *desc;
|
struct ioat_ring_ent *desc;
|
||||||
struct ioatdma_device *ioat_dma;
|
|
||||||
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
|
struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
|
||||||
int chunk;
|
int chunk;
|
||||||
dma_addr_t phys;
|
dma_addr_t phys;
|
||||||
u8 *pos;
|
u8 *pos;
|
||||||
off_t offs;
|
off_t offs;
|
||||||
|
|
||||||
ioat_dma = to_ioatdma_device(chan->device);
|
|
||||||
|
|
||||||
chunk = idx / IOAT_DESCS_PER_2M;
|
chunk = idx / IOAT_DESCS_PER_2M;
|
||||||
idx &= (IOAT_DESCS_PER_2M - 1);
|
idx &= (IOAT_DESCS_PER_2M - 1);
|
||||||
offs = idx * IOAT_DESC_SZ;
|
offs = idx * IOAT_DESC_SZ;
|
||||||
|
@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
|
||||||
|
|
||||||
tx = &desc->txd;
|
tx = &desc->txd;
|
||||||
if (tx->cookie) {
|
if (tx->cookie) {
|
||||||
struct dmaengine_result res;
|
|
||||||
|
|
||||||
dma_cookie_complete(tx);
|
dma_cookie_complete(tx);
|
||||||
dma_descriptor_unmap(tx);
|
dma_descriptor_unmap(tx);
|
||||||
res.result = DMA_TRANS_NOERROR;
|
|
||||||
dmaengine_desc_get_callback_invoke(tx, NULL);
|
dmaengine_desc_get_callback_invoke(tx, NULL);
|
||||||
tx->callback = NULL;
|
tx->callback = NULL;
|
||||||
tx->callback_result = NULL;
|
tx->callback_result = NULL;
|
||||||
|
|
|
@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
|
||||||
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
|
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, dma_src)) {
|
if (dma_mapping_error(dev, dma_src)) {
|
||||||
dev_err(dev, "mapping src buffer failed\n");
|
dev_err(dev, "mapping src buffer failed\n");
|
||||||
|
err = -ENOMEM;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
}
|
}
|
||||||
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
|
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dev, dma_dest)) {
|
if (dma_mapping_error(dev, dma_dest)) {
|
||||||
dev_err(dev, "mapping dest buffer failed\n");
|
dev_err(dev, "mapping dest buffer failed\n");
|
||||||
|
err = -ENOMEM;
|
||||||
goto unmap_src;
|
goto unmap_src;
|
||||||
}
|
}
|
||||||
flags = DMA_PREP_INTERRUPT;
|
flags = DMA_PREP_INTERRUPT;
|
||||||
|
@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
|
||||||
op = IOAT_OP_XOR;
|
op = IOAT_OP_XOR;
|
||||||
|
|
||||||
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dev, dest_dma))
|
if (dma_mapping_error(dev, dest_dma)) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto free_resources;
|
goto free_resources;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
|
||||||
dma_srcs[i] = DMA_ERROR_CODE;
|
dma_srcs[i] = DMA_ERROR_CODE;
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
|
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
|
||||||
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
|
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, dma_srcs[i]))
|
if (dma_mapping_error(dev, dma_srcs[i])) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto dma_unmap;
|
goto dma_unmap;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
||||||
IOAT_NUM_SRC_TEST, PAGE_SIZE,
|
IOAT_NUM_SRC_TEST, PAGE_SIZE,
|
||||||
|
@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
|
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
|
||||||
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
|
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, dma_srcs[i]))
|
if (dma_mapping_error(dev, dma_srcs[i])) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto dma_unmap;
|
goto dma_unmap;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
||||||
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
||||||
|
@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
|
||||||
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
|
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
|
||||||
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
|
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, dma_srcs[i]))
|
if (dma_mapping_error(dev, dma_srcs[i])) {
|
||||||
|
err = -ENOMEM;
|
||||||
goto dma_unmap;
|
goto dma_unmap;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
|
||||||
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
|
||||||
|
@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
|
||||||
struct dma_device *dma;
|
struct dma_device *dma;
|
||||||
struct dma_chan *c;
|
struct dma_chan *c;
|
||||||
struct ioatdma_chan *ioat_chan;
|
struct ioatdma_chan *ioat_chan;
|
||||||
bool is_raid_device = false;
|
|
||||||
int err;
|
int err;
|
||||||
u16 val16;
|
u16 val16;
|
||||||
|
|
||||||
|
@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
|
||||||
ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
|
ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
|
||||||
|
|
||||||
if (ioat_dma->cap & IOAT_CAP_XOR) {
|
if (ioat_dma->cap & IOAT_CAP_XOR) {
|
||||||
is_raid_device = true;
|
|
||||||
dma->max_xor = 8;
|
dma->max_xor = 8;
|
||||||
|
|
||||||
dma_cap_set(DMA_XOR, dma->cap_mask);
|
dma_cap_set(DMA_XOR, dma->cap_mask);
|
||||||
|
@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ioat_dma->cap & IOAT_CAP_PQ) {
|
if (ioat_dma->cap & IOAT_CAP_PQ) {
|
||||||
is_raid_device = true;
|
|
||||||
|
|
||||||
dma->device_prep_dma_pq = ioat_prep_pq;
|
dma->device_prep_dma_pq = ioat_prep_pq;
|
||||||
dma->device_prep_dma_pq_val = ioat_prep_pq_val;
|
dma->device_prep_dma_pq_val = ioat_prep_pq_val;
|
||||||
|
|
|
@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
|
||||||
if (!ds)
|
if (!ds)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
|
ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
|
||||||
if (!ds->desc_hw) {
|
if (!ds->desc_hw) {
|
||||||
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
|
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
|
||||||
kfree(ds);
|
kfree(ds);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
|
|
||||||
ds->desc_num = num;
|
ds->desc_num = num;
|
||||||
return ds;
|
return ds;
|
||||||
}
|
}
|
||||||
|
|
|
@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
|
for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
|
||||||
unsigned long data;
|
|
||||||
ch = &mic_dma_dev->mic_ch[i];
|
ch = &mic_dma_dev->mic_ch[i];
|
||||||
data = (unsigned long)ch;
|
|
||||||
ch->ch_num = i;
|
ch->ch_num = i;
|
||||||
ch->owner = owner;
|
ch->owner = owner;
|
||||||
spin_lock_init(&ch->cleanup_lock);
|
spin_lock_init(&ch->cleanup_lock);
|
||||||
|
|
|
@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
|
||||||
hw_desc->byte_count = byte_count;
|
hw_desc->byte_count = byte_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Populate the descriptor */
|
||||||
|
static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
|
||||||
|
dma_addr_t dma_src, dma_addr_t dma_dst,
|
||||||
|
u32 len, struct mv_xor_desc_slot *prev)
|
||||||
|
{
|
||||||
|
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||||
|
|
||||||
|
hw_desc->status = XOR_DESC_DMA_OWNED;
|
||||||
|
hw_desc->phy_next_desc = 0;
|
||||||
|
/* Configure for XOR with only one src address -> MEMCPY */
|
||||||
|
hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
|
||||||
|
hw_desc->phy_dest_addr = dma_dst;
|
||||||
|
hw_desc->phy_src_addr[0] = dma_src;
|
||||||
|
hw_desc->byte_count = len;
|
||||||
|
|
||||||
|
if (prev) {
|
||||||
|
struct mv_xor_desc *hw_prev = prev->hw_desc;
|
||||||
|
|
||||||
|
hw_prev->phy_next_desc = desc->async_tx.phys;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
|
||||||
|
{
|
||||||
|
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||||
|
|
||||||
|
/* Enable end-of-descriptor interrupt */
|
||||||
|
hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
|
||||||
|
}
|
||||||
|
|
||||||
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
|
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
|
||||||
{
|
{
|
||||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||||
|
@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
|
||||||
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
|
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
|
||||||
node) {
|
node) {
|
||||||
|
|
||||||
if (async_tx_test_ack(&iter->async_tx))
|
if (async_tx_test_ack(&iter->async_tx)) {
|
||||||
list_move_tail(&iter->node, &mv_chan->free_slots);
|
list_move_tail(&iter->node, &mv_chan->free_slots);
|
||||||
|
if (!list_empty(&iter->sg_tx_list)) {
|
||||||
|
list_splice_tail_init(&iter->sg_tx_list,
|
||||||
|
&mv_chan->free_slots);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
|
||||||
/* the client is allowed to attach dependent operations
|
/* the client is allowed to attach dependent operations
|
||||||
* until 'ack' is set
|
* until 'ack' is set
|
||||||
*/
|
*/
|
||||||
if (!async_tx_test_ack(&desc->async_tx))
|
if (!async_tx_test_ack(&desc->async_tx)) {
|
||||||
/* move this slot to the completed_slots */
|
/* move this slot to the completed_slots */
|
||||||
list_move_tail(&desc->node, &mv_chan->completed_slots);
|
list_move_tail(&desc->node, &mv_chan->completed_slots);
|
||||||
else
|
if (!list_empty(&desc->sg_tx_list)) {
|
||||||
|
list_splice_tail_init(&desc->sg_tx_list,
|
||||||
|
&mv_chan->completed_slots);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
list_move_tail(&desc->node, &mv_chan->free_slots);
|
list_move_tail(&desc->node, &mv_chan->free_slots);
|
||||||
|
if (!list_empty(&desc->sg_tx_list)) {
|
||||||
|
list_splice_tail_init(&desc->sg_tx_list,
|
||||||
|
&mv_chan->free_slots);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
|
||||||
dma_async_tx_descriptor_init(&slot->async_tx, chan);
|
dma_async_tx_descriptor_init(&slot->async_tx, chan);
|
||||||
slot->async_tx.tx_submit = mv_xor_tx_submit;
|
slot->async_tx.tx_submit = mv_xor_tx_submit;
|
||||||
INIT_LIST_HEAD(&slot->node);
|
INIT_LIST_HEAD(&slot->node);
|
||||||
|
INIT_LIST_HEAD(&slot->sg_tx_list);
|
||||||
dma_desc = mv_chan->dma_desc_pool;
|
dma_desc = mv_chan->dma_desc_pool;
|
||||||
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
|
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
|
||||||
slot->idx = idx++;
|
slot->idx = idx++;
|
||||||
|
@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
|
||||||
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
|
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
|
||||||
|
* @chan: DMA channel
|
||||||
|
* @dst_sg: Destination scatter list
|
||||||
|
* @dst_sg_len: Number of entries in destination scatter list
|
||||||
|
* @src_sg: Source scatter list
|
||||||
|
* @src_sg_len: Number of entries in source scatter list
|
||||||
|
* @flags: transfer ack flags
|
||||||
|
*
|
||||||
|
* Return: Async transaction descriptor on success and NULL on failure
|
||||||
|
*/
|
||||||
|
static struct dma_async_tx_descriptor *
|
||||||
|
mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
|
||||||
|
unsigned int dst_sg_len, struct scatterlist *src_sg,
|
||||||
|
unsigned int src_sg_len, unsigned long flags)
|
||||||
|
{
|
||||||
|
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
|
||||||
|
struct mv_xor_desc_slot *new;
|
||||||
|
struct mv_xor_desc_slot *first = NULL;
|
||||||
|
struct mv_xor_desc_slot *prev = NULL;
|
||||||
|
size_t len, dst_avail, src_avail;
|
||||||
|
dma_addr_t dma_dst, dma_src;
|
||||||
|
int desc_cnt = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev_dbg(mv_chan_to_devp(mv_chan),
|
||||||
|
"%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
|
||||||
|
__func__, dst_sg_len, src_sg_len, flags);
|
||||||
|
|
||||||
|
dst_avail = sg_dma_len(dst_sg);
|
||||||
|
src_avail = sg_dma_len(src_sg);
|
||||||
|
|
||||||
|
/* Run until we are out of scatterlist entries */
|
||||||
|
while (true) {
|
||||||
|
/* Allocate and populate the descriptor */
|
||||||
|
desc_cnt++;
|
||||||
|
new = mv_chan_alloc_slot(mv_chan);
|
||||||
|
if (!new) {
|
||||||
|
dev_err(mv_chan_to_devp(mv_chan),
|
||||||
|
"Out of descriptors (desc_cnt=%d)!\n",
|
||||||
|
desc_cnt);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
len = min_t(size_t, src_avail, dst_avail);
|
||||||
|
len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
|
||||||
|
if (len == 0)
|
||||||
|
goto fetch;
|
||||||
|
|
||||||
|
if (len < MV_XOR_MIN_BYTE_COUNT) {
|
||||||
|
dev_err(mv_chan_to_devp(mv_chan),
|
||||||
|
"Transfer size of %zu too small!\n", len);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
|
||||||
|
dst_avail;
|
||||||
|
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
|
||||||
|
src_avail;
|
||||||
|
|
||||||
|
/* Check if a new window needs to get added for 'dst' */
|
||||||
|
ret = mv_xor_add_io_win(mv_chan, dma_dst);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
/* Check if a new window needs to get added for 'src' */
|
||||||
|
ret = mv_xor_add_io_win(mv_chan, dma_src);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
/* Populate the descriptor */
|
||||||
|
mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
|
||||||
|
prev = new;
|
||||||
|
dst_avail -= len;
|
||||||
|
src_avail -= len;
|
||||||
|
|
||||||
|
if (!first)
|
||||||
|
first = new;
|
||||||
|
else
|
||||||
|
list_move_tail(&new->node, &first->sg_tx_list);
|
||||||
|
|
||||||
|
fetch:
|
||||||
|
/* Fetch the next dst scatterlist entry */
|
||||||
|
if (dst_avail == 0) {
|
||||||
|
if (dst_sg_len == 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Fetch the next entry: if there are no more: done */
|
||||||
|
dst_sg = sg_next(dst_sg);
|
||||||
|
if (dst_sg == NULL)
|
||||||
|
break;
|
||||||
|
|
||||||
|
dst_sg_len--;
|
||||||
|
dst_avail = sg_dma_len(dst_sg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fetch the next src scatterlist entry */
|
||||||
|
if (src_avail == 0) {
|
||||||
|
if (src_sg_len == 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Fetch the next entry: if there are no more: done */
|
||||||
|
src_sg = sg_next(src_sg);
|
||||||
|
if (src_sg == NULL)
|
||||||
|
break;
|
||||||
|
|
||||||
|
src_sg_len--;
|
||||||
|
src_avail = sg_dma_len(src_sg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the EOD flag in the last descriptor */
|
||||||
|
mv_xor_desc_config_eod(new);
|
||||||
|
first->async_tx.flags = flags;
|
||||||
|
|
||||||
|
return &first->async_tx;
|
||||||
|
|
||||||
|
err:
|
||||||
|
/* Cleanup: Move all descriptors back into the free list */
|
||||||
|
spin_lock_bh(&mv_chan->lock);
|
||||||
|
mv_desc_clean_slot(first, mv_chan);
|
||||||
|
spin_unlock_bh(&mv_chan->lock);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void mv_xor_free_chan_resources(struct dma_chan *chan)
|
static void mv_xor_free_chan_resources(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
|
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
|
||||||
|
@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||||
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
|
dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
|
||||||
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
|
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
|
||||||
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
|
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
|
||||||
|
if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
|
||||||
|
dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
|
||||||
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
|
||||||
dma_dev->max_xor = 8;
|
dma_dev->max_xor = 8;
|
||||||
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
|
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
|
||||||
|
@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||||
goto err_free_irq;
|
goto err_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
|
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
|
||||||
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
|
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
|
||||||
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
|
||||||
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
|
||||||
|
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
|
||||||
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
|
||||||
|
|
||||||
dma_async_device_register(dma_dev);
|
dma_async_device_register(dma_dev);
|
||||||
|
@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
dma_cap_zero(cap_mask);
|
dma_cap_zero(cap_mask);
|
||||||
dma_cap_set(DMA_MEMCPY, cap_mask);
|
dma_cap_set(DMA_MEMCPY, cap_mask);
|
||||||
|
dma_cap_set(DMA_SG, cap_mask);
|
||||||
dma_cap_set(DMA_XOR, cap_mask);
|
dma_cap_set(DMA_XOR, cap_mask);
|
||||||
dma_cap_set(DMA_INTERRUPT, cap_mask);
|
dma_cap_set(DMA_INTERRUPT, cap_mask);
|
||||||
|
|
||||||
|
@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
builtin_platform_driver(mv_xor_driver);
|
||||||
static int __init mv_xor_init(void)
|
|
||||||
{
|
|
||||||
return platform_driver_register(&mv_xor_driver);
|
|
||||||
}
|
|
||||||
device_initcall(mv_xor_init);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
|
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
|
||||||
|
|
|
@ -148,6 +148,7 @@ struct mv_xor_chan {
|
||||||
*/
|
*/
|
||||||
struct mv_xor_desc_slot {
|
struct mv_xor_desc_slot {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
|
struct list_head sg_tx_list;
|
||||||
enum dma_transaction_type type;
|
enum dma_transaction_type type;
|
||||||
void *hw_desc;
|
void *hw_desc;
|
||||||
u16 idx;
|
u16 idx;
|
||||||
|
|
|
@ -225,6 +225,8 @@ struct nbpf_channel {
|
||||||
struct nbpf_device {
|
struct nbpf_device {
|
||||||
struct dma_device dma_dev;
|
struct dma_device dma_dev;
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
|
u32 max_burst_mem_read;
|
||||||
|
u32 max_burst_mem_write;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
const struct nbpf_config *config;
|
const struct nbpf_config *config;
|
||||||
unsigned int eirq;
|
unsigned int eirq;
|
||||||
|
@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan)
|
||||||
nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
|
nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
|
static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
|
||||||
|
enum dma_transfer_direction direction)
|
||||||
{
|
{
|
||||||
|
int max_burst = nbpf->config->buffer_size * 8;
|
||||||
|
|
||||||
|
if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
|
||||||
|
switch (direction) {
|
||||||
|
case DMA_MEM_TO_MEM:
|
||||||
|
max_burst = min_not_zero(nbpf->max_burst_mem_read,
|
||||||
|
nbpf->max_burst_mem_write);
|
||||||
|
break;
|
||||||
|
case DMA_MEM_TO_DEV:
|
||||||
|
if (nbpf->max_burst_mem_read)
|
||||||
|
max_burst = nbpf->max_burst_mem_read;
|
||||||
|
break;
|
||||||
|
case DMA_DEV_TO_MEM:
|
||||||
|
if (nbpf->max_burst_mem_write)
|
||||||
|
max_burst = nbpf->max_burst_mem_write;
|
||||||
|
break;
|
||||||
|
case DMA_DEV_TO_DEV:
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Maximum supported bursts depend on the buffer size */
|
/* Maximum supported bursts depend on the buffer size */
|
||||||
return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
|
return min_t(int, __ffs(size), ilog2(max_burst));
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
|
static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
|
||||||
|
@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
|
||||||
size = burst;
|
size = burst;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nbpf_xfer_ds(nbpf, size);
|
return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
|
||||||
* transfers we enable the SBE bit and terminate the transfer in our
|
* transfers we enable the SBE bit and terminate the transfer in our
|
||||||
* .device_pause handler.
|
* .device_pause handler.
|
||||||
*/
|
*/
|
||||||
mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
|
mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
|
||||||
|
|
||||||
switch (direction) {
|
switch (direction) {
|
||||||
case DMA_DEV_TO_MEM:
|
case DMA_DEV_TO_MEM:
|
||||||
|
@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev)
|
||||||
if (IS_ERR(nbpf->clk))
|
if (IS_ERR(nbpf->clk))
|
||||||
return PTR_ERR(nbpf->clk);
|
return PTR_ERR(nbpf->clk);
|
||||||
|
|
||||||
|
of_property_read_u32(np, "max-burst-mem-read",
|
||||||
|
&nbpf->max_burst_mem_read);
|
||||||
|
of_property_read_u32(np, "max-burst-mem-write",
|
||||||
|
&nbpf->max_burst_mem_write);
|
||||||
|
|
||||||
nbpf->config = cfg;
|
nbpf->config = cfg;
|
||||||
|
|
||||||
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
|
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
|
||||||
|
|
|
@ -166,6 +166,9 @@ enum {
|
||||||
CSDP_DST_BURST_16 = 1 << 14,
|
CSDP_DST_BURST_16 = 1 << 14,
|
||||||
CSDP_DST_BURST_32 = 2 << 14,
|
CSDP_DST_BURST_32 = 2 << 14,
|
||||||
CSDP_DST_BURST_64 = 3 << 14,
|
CSDP_DST_BURST_64 = 3 << 14,
|
||||||
|
CSDP_WRITE_NON_POSTED = 0 << 16,
|
||||||
|
CSDP_WRITE_POSTED = 1 << 16,
|
||||||
|
CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
|
||||||
|
|
||||||
CICR_TOUT_IE = BIT(0), /* OMAP1 only */
|
CICR_TOUT_IE = BIT(0), /* OMAP1 only */
|
||||||
CICR_DROP_IE = BIT(1),
|
CICR_DROP_IE = BIT(1),
|
||||||
|
@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
|
||||||
c->running = true;
|
c->running = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap_dma_stop(struct omap_chan *c)
|
static void omap_dma_drain_chan(struct omap_chan *c)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* Wait for sDMA FIFO to drain */
|
||||||
|
for (i = 0; ; i++) {
|
||||||
|
val = omap_dma_chan_read(c, CCR);
|
||||||
|
if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (i > 100)
|
||||||
|
break;
|
||||||
|
|
||||||
|
udelay(5);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
|
||||||
|
dev_err(c->vc.chan.device->dev,
|
||||||
|
"DMA drain did not complete on lch %d\n",
|
||||||
|
c->dma_ch);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int omap_dma_stop(struct omap_chan *c)
|
||||||
{
|
{
|
||||||
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
|
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c)
|
||||||
val = omap_dma_chan_read(c, CCR);
|
val = omap_dma_chan_read(c, CCR);
|
||||||
if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
|
if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
|
||||||
uint32_t sysconfig;
|
uint32_t sysconfig;
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
|
sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
|
||||||
val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
|
val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
|
||||||
|
@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c)
|
||||||
val &= ~CCR_ENABLE;
|
val &= ~CCR_ENABLE;
|
||||||
omap_dma_chan_write(c, CCR, val);
|
omap_dma_chan_write(c, CCR, val);
|
||||||
|
|
||||||
/* Wait for sDMA FIFO to drain */
|
if (!(c->ccr & CCR_BUFFERING_DISABLE))
|
||||||
for (i = 0; ; i++) {
|
omap_dma_drain_chan(c);
|
||||||
val = omap_dma_chan_read(c, CCR);
|
|
||||||
if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (i > 100)
|
|
||||||
break;
|
|
||||||
|
|
||||||
udelay(5);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
|
|
||||||
dev_err(c->vc.chan.device->dev,
|
|
||||||
"DMA drain did not complete on lch %d\n",
|
|
||||||
c->dma_ch);
|
|
||||||
|
|
||||||
omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
|
omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
|
||||||
} else {
|
} else {
|
||||||
|
if (!(val & CCR_ENABLE))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
val &= ~CCR_ENABLE;
|
val &= ~CCR_ENABLE;
|
||||||
omap_dma_chan_write(c, CCR, val);
|
omap_dma_chan_write(c, CCR, val);
|
||||||
|
|
||||||
|
if (!(c->ccr & CCR_BUFFERING_DISABLE))
|
||||||
|
omap_dma_drain_chan(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
mb();
|
mb();
|
||||||
|
@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c)
|
||||||
|
|
||||||
omap_dma_chan_write(c, CLNK_CTRL, val);
|
omap_dma_chan_write(c, CLNK_CTRL, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
c->running = false;
|
c->running = false;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
|
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
|
||||||
|
@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
|
||||||
} else {
|
} else {
|
||||||
txstate->residue = 0;
|
txstate->residue = 0;
|
||||||
}
|
}
|
||||||
|
if (ret == DMA_IN_PROGRESS && c->paused)
|
||||||
|
ret = DMA_PAUSED;
|
||||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||||||
unsigned i, es, en, frame_bytes;
|
unsigned i, es, en, frame_bytes;
|
||||||
bool ll_failed = false;
|
bool ll_failed = false;
|
||||||
u32 burst;
|
u32 burst;
|
||||||
|
u32 port_window, port_window_bytes;
|
||||||
|
|
||||||
if (dir == DMA_DEV_TO_MEM) {
|
if (dir == DMA_DEV_TO_MEM) {
|
||||||
dev_addr = c->cfg.src_addr;
|
dev_addr = c->cfg.src_addr;
|
||||||
dev_width = c->cfg.src_addr_width;
|
dev_width = c->cfg.src_addr_width;
|
||||||
burst = c->cfg.src_maxburst;
|
burst = c->cfg.src_maxburst;
|
||||||
|
port_window = c->cfg.src_port_window_size;
|
||||||
} else if (dir == DMA_MEM_TO_DEV) {
|
} else if (dir == DMA_MEM_TO_DEV) {
|
||||||
dev_addr = c->cfg.dst_addr;
|
dev_addr = c->cfg.dst_addr;
|
||||||
dev_width = c->cfg.dst_addr_width;
|
dev_width = c->cfg.dst_addr_width;
|
||||||
burst = c->cfg.dst_maxburst;
|
burst = c->cfg.dst_maxburst;
|
||||||
|
port_window = c->cfg.dst_port_window_size;
|
||||||
} else {
|
} else {
|
||||||
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
|
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* When the port_window is used, one frame must cover the window */
|
||||||
|
if (port_window) {
|
||||||
|
burst = port_window;
|
||||||
|
port_window_bytes = port_window * es_bytes[es];
|
||||||
|
}
|
||||||
|
|
||||||
/* Now allocate and setup the descriptor. */
|
/* Now allocate and setup the descriptor. */
|
||||||
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
|
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
|
||||||
if (!d)
|
if (!d)
|
||||||
|
@ -905,11 +933,45 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||||||
|
|
||||||
d->ccr = c->ccr | CCR_SYNC_FRAME;
|
d->ccr = c->ccr | CCR_SYNC_FRAME;
|
||||||
if (dir == DMA_DEV_TO_MEM) {
|
if (dir == DMA_DEV_TO_MEM) {
|
||||||
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
|
|
||||||
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
|
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
|
||||||
|
|
||||||
|
d->ccr |= CCR_DST_AMODE_POSTINC;
|
||||||
|
if (port_window) {
|
||||||
|
d->ccr |= CCR_SRC_AMODE_DBLIDX;
|
||||||
|
d->ei = 1;
|
||||||
|
/*
|
||||||
|
* One frame covers the port_window and by configure
|
||||||
|
* the source frame index to be -1 * (port_window - 1)
|
||||||
|
* we instruct the sDMA that after a frame is processed
|
||||||
|
* it should move back to the start of the window.
|
||||||
|
*/
|
||||||
|
d->fi = -(port_window_bytes - 1);
|
||||||
|
|
||||||
|
if (port_window_bytes >= 64)
|
||||||
|
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
|
||||||
|
else if (port_window_bytes >= 32)
|
||||||
|
d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
|
||||||
|
else if (port_window_bytes >= 16)
|
||||||
|
d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
|
||||||
|
} else {
|
||||||
|
d->ccr |= CCR_SRC_AMODE_CONSTANT;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
|
|
||||||
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
|
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
|
||||||
|
|
||||||
|
d->ccr |= CCR_SRC_AMODE_POSTINC;
|
||||||
|
if (port_window) {
|
||||||
|
d->ccr |= CCR_DST_AMODE_DBLIDX;
|
||||||
|
|
||||||
|
if (port_window_bytes >= 64)
|
||||||
|
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
|
||||||
|
else if (port_window_bytes >= 32)
|
||||||
|
d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
|
||||||
|
else if (port_window_bytes >= 16)
|
||||||
|
d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
|
||||||
|
} else {
|
||||||
|
d->ccr |= CCR_DST_AMODE_CONSTANT;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
|
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
|
||||||
|
@ -927,6 +989,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||||||
d->ccr |= CCR_TRIGGER_SRC;
|
d->ccr |= CCR_TRIGGER_SRC;
|
||||||
|
|
||||||
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
|
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
|
||||||
|
|
||||||
|
if (port_window)
|
||||||
|
d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
|
||||||
}
|
}
|
||||||
if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
|
if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
|
||||||
d->clnk_ctrl = c->dma_ch;
|
d->clnk_ctrl = c->dma_ch;
|
||||||
|
@ -952,6 +1017,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||||||
osg->addr = sg_dma_address(sgent);
|
osg->addr = sg_dma_address(sgent);
|
||||||
osg->en = en;
|
osg->en = en;
|
||||||
osg->fn = sg_dma_len(sgent) / frame_bytes;
|
osg->fn = sg_dma_len(sgent) / frame_bytes;
|
||||||
|
if (port_window && dir == DMA_MEM_TO_DEV) {
|
||||||
|
osg->ei = 1;
|
||||||
|
/*
|
||||||
|
* One frame covers the port_window and by configure
|
||||||
|
* the source frame index to be -1 * (port_window - 1)
|
||||||
|
* we instruct the sDMA that after a frame is processed
|
||||||
|
* it should move back to the start of the window.
|
||||||
|
*/
|
||||||
|
osg->fi = -(port_window_bytes - 1);
|
||||||
|
}
|
||||||
|
|
||||||
if (d->using_ll) {
|
if (d->using_ll) {
|
||||||
osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
|
osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
|
||||||
|
@ -1247,10 +1322,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
|
||||||
omap_dma_stop(c);
|
omap_dma_stop(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->cyclic) {
|
c->cyclic = false;
|
||||||
c->cyclic = false;
|
c->paused = false;
|
||||||
c->paused = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
vchan_get_all_descriptors(&c->vc, &head);
|
vchan_get_all_descriptors(&c->vc, &head);
|
||||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||||
|
@ -1269,28 +1342,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
|
||||||
static int omap_dma_pause(struct dma_chan *chan)
|
static int omap_dma_pause(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||||
|
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
|
||||||
|
unsigned long flags;
|
||||||
|
int ret = -EINVAL;
|
||||||
|
bool can_pause = false;
|
||||||
|
|
||||||
/* Pause/Resume only allowed with cyclic mode */
|
spin_lock_irqsave(&od->irq_lock, flags);
|
||||||
if (!c->cyclic)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!c->paused) {
|
if (!c->desc)
|
||||||
omap_dma_stop(c);
|
goto out;
|
||||||
c->paused = true;
|
|
||||||
|
if (c->cyclic)
|
||||||
|
can_pause = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We do not allow DMA_MEM_TO_DEV transfers to be paused.
|
||||||
|
* From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
|
||||||
|
* "When a channel is disabled during a transfer, the channel undergoes
|
||||||
|
* an abort, unless it is hardware-source-synchronized …".
|
||||||
|
* A source-synchronised channel is one where the fetching of data is
|
||||||
|
* under control of the device. In other words, a device-to-memory
|
||||||
|
* transfer. So, a destination-synchronised channel (which would be a
|
||||||
|
* memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
|
||||||
|
* bit is cleared.
|
||||||
|
* From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
|
||||||
|
* aborts immediately after completion of current read/write
|
||||||
|
* transactions and then the FIFO is cleaned up." The term "cleaned up"
|
||||||
|
* is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
|
||||||
|
* are both clear _before_ disabling the channel, otherwise data loss
|
||||||
|
* will occur.
|
||||||
|
* The problem is that if the channel is active, then device activity
|
||||||
|
* can result in DMA activity starting between reading those as both
|
||||||
|
* clear and the write to DMA_CCR to clear the enable bit hitting the
|
||||||
|
* hardware. If the DMA hardware can't drain the data in its FIFO to the
|
||||||
|
* destination, then data loss "might" occur (say if we write to an UART
|
||||||
|
* and the UART is not accepting any further data).
|
||||||
|
*/
|
||||||
|
else if (c->desc->dir == DMA_DEV_TO_MEM)
|
||||||
|
can_pause = true;
|
||||||
|
|
||||||
|
if (can_pause && !c->paused) {
|
||||||
|
ret = omap_dma_stop(c);
|
||||||
|
if (!ret)
|
||||||
|
c->paused = true;
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
spin_unlock_irqrestore(&od->irq_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_dma_resume(struct dma_chan *chan)
|
static int omap_dma_resume(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||||
|
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
|
||||||
|
unsigned long flags;
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
/* Pause/Resume only allowed with cyclic mode */
|
spin_lock_irqsave(&od->irq_lock, flags);
|
||||||
if (!c->cyclic)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (c->paused) {
|
if (c->paused && c->desc) {
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
/* Restore channel link register */
|
/* Restore channel link register */
|
||||||
|
@ -1298,9 +1409,11 @@ static int omap_dma_resume(struct dma_chan *chan)
|
||||||
|
|
||||||
omap_dma_start(c, c->desc);
|
omap_dma_start(c, c->desc);
|
||||||
c->paused = false;
|
c->paused = false;
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&od->irq_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_dma_chan_init(struct omap_dmadev *od)
|
static int omap_dma_chan_init(struct omap_dmadev *od)
|
||||||
|
|
|
@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
|
||||||
{
|
{
|
||||||
struct pch_dma_desc *desc = to_pd_desc(txd);
|
struct pch_dma_desc *desc = to_pd_desc(txd);
|
||||||
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
|
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
|
||||||
dma_cookie_t cookie;
|
|
||||||
|
|
||||||
spin_lock(&pd_chan->lock);
|
spin_lock(&pd_chan->lock);
|
||||||
cookie = dma_cookie_assign(txd);
|
|
||||||
|
|
||||||
if (list_empty(&pd_chan->active_list)) {
|
if (list_empty(&pd_chan->active_list)) {
|
||||||
list_add_tail(&desc->desc_node, &pd_chan->active_list);
|
list_add_tail(&desc->desc_node, &pd_chan->active_list);
|
||||||
|
@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
|
||||||
struct pch_dma *pd = to_pd(chan->device);
|
struct pch_dma *pd = to_pd(chan->device);
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
|
|
||||||
desc = pci_pool_alloc(pd->pool, flags, &addr);
|
desc = pci_pool_zalloc(pd->pool, flags, &addr);
|
||||||
if (desc) {
|
if (desc) {
|
||||||
memset(desc, 0, sizeof(struct pch_dma_desc));
|
|
||||||
INIT_LIST_HEAD(&desc->tx_list);
|
INIT_LIST_HEAD(&desc->tx_list);
|
||||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||||
desc->txd.tx_submit = pd_tx_submit;
|
desc->txd.tx_submit = pd_tx_submit;
|
||||||
|
|
|
@ -570,7 +570,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
|
||||||
|
|
||||||
buf[0] = CMD_DMAADDH;
|
buf[0] = CMD_DMAADDH;
|
||||||
buf[0] |= (da << 1);
|
buf[0] |= (da << 1);
|
||||||
*((__le16 *)&buf[1]) = cpu_to_le16(val);
|
buf[1] = val;
|
||||||
|
buf[2] = val >> 8;
|
||||||
|
|
||||||
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
|
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
|
||||||
da == 1 ? "DA" : "SA", val);
|
da == 1 ? "DA" : "SA", val);
|
||||||
|
@ -724,7 +725,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
|
||||||
|
|
||||||
buf[0] = CMD_DMAMOV;
|
buf[0] = CMD_DMAMOV;
|
||||||
buf[1] = dst;
|
buf[1] = dst;
|
||||||
*((__le32 *)&buf[2]) = cpu_to_le32(val);
|
buf[2] = val;
|
||||||
|
buf[3] = val >> 8;
|
||||||
|
buf[4] = val >> 16;
|
||||||
|
buf[5] = val >> 24;
|
||||||
|
|
||||||
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
|
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
|
||||||
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
|
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
|
||||||
|
@ -899,10 +903,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
|
||||||
|
|
||||||
buf[0] = CMD_DMAGO;
|
buf[0] = CMD_DMAGO;
|
||||||
buf[0] |= (ns << 1);
|
buf[0] |= (ns << 1);
|
||||||
|
|
||||||
buf[1] = chan & 0x7;
|
buf[1] = chan & 0x7;
|
||||||
|
buf[2] = addr;
|
||||||
*((__le32 *)&buf[2]) = cpu_to_le32(addr);
|
buf[3] = addr >> 8;
|
||||||
|
buf[4] = addr >> 16;
|
||||||
|
buf[5] = addr >> 24;
|
||||||
|
|
||||||
return SZ_DMAGO;
|
return SZ_DMAGO;
|
||||||
}
|
}
|
||||||
|
@ -1883,11 +1888,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
|
||||||
|
|
||||||
static int pl330_add(struct pl330_dmac *pl330)
|
static int pl330_add(struct pl330_dmac *pl330)
|
||||||
{
|
{
|
||||||
void __iomem *regs;
|
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
regs = pl330->base;
|
|
||||||
|
|
||||||
/* Check if we can handle this DMAC */
|
/* Check if we can handle this DMAC */
|
||||||
if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
|
if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
|
||||||
dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
|
dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
|
||||||
|
@ -2263,6 +2265,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
|
||||||
}
|
}
|
||||||
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
|
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
|
||||||
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
||||||
|
|
||||||
|
/* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
|
||||||
|
if (!val)
|
||||||
|
return 0;
|
||||||
|
|
||||||
return val - addr;
|
return val - addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
|
||||||
static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
|
static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* In the transition phase where legacy pxa handling is done at the same time as
|
|
||||||
* mmp_dma, the DMA physical channel split between the 2 DMA providers is done
|
|
||||||
* through legacy_reserved. Legacy code reserves DMA channels by settings
|
|
||||||
* corresponding bits in legacy_reserved.
|
|
||||||
*/
|
|
||||||
static u32 legacy_reserved;
|
|
||||||
static u32 legacy_unavailable;
|
|
||||||
|
|
||||||
static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
|
static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
|
||||||
{
|
{
|
||||||
int prio, i;
|
int prio, i;
|
||||||
|
@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
|
||||||
for (i = 0; i < pdev->nr_chans; i++) {
|
for (i = 0; i < pdev->nr_chans; i++) {
|
||||||
if (prio != (i & 0xf) >> 2)
|
if (prio != (i & 0xf) >> 2)
|
||||||
continue;
|
continue;
|
||||||
if ((i < 32) && (legacy_reserved & BIT(i)))
|
|
||||||
continue;
|
|
||||||
phy = &pdev->phys[i];
|
phy = &pdev->phys[i];
|
||||||
if (!phy->vchan) {
|
if (!phy->vchan) {
|
||||||
phy->vchan = pchan;
|
phy->vchan = pchan;
|
||||||
found = phy;
|
found = phy;
|
||||||
if (i < 32)
|
|
||||||
legacy_unavailable |= BIT(i);
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
|
||||||
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
|
struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
int i;
|
|
||||||
|
|
||||||
dev_dbg(&chan->vc.chan.dev->device,
|
dev_dbg(&chan->vc.chan.dev->device,
|
||||||
"%s(): freeing\n", __func__);
|
"%s(): freeing\n", __func__);
|
||||||
|
@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&pdev->phy_lock, flags);
|
spin_lock_irqsave(&pdev->phy_lock, flags);
|
||||||
for (i = 0; i < 32; i++)
|
|
||||||
if (chan->phy == &pdev->phys[i])
|
|
||||||
legacy_unavailable &= ~BIT(i);
|
|
||||||
chan->phy->vchan = NULL;
|
chan->phy->vchan = NULL;
|
||||||
chan->phy = NULL;
|
chan->phy = NULL;
|
||||||
spin_unlock_irqrestore(&pdev->phy_lock, flags);
|
spin_unlock_irqrestore(&pdev->phy_lock, flags);
|
||||||
|
@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id)
|
||||||
i = __ffs(dint);
|
i = __ffs(dint);
|
||||||
dint &= (dint - 1);
|
dint &= (dint - 1);
|
||||||
phy = &pdev->phys[i];
|
phy = &pdev->phys[i];
|
||||||
if ((i < 32) && (legacy_reserved & BIT(i)))
|
|
||||||
continue;
|
|
||||||
if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
|
if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pxad_filter_fn);
|
EXPORT_SYMBOL_GPL(pxad_filter_fn);
|
||||||
|
|
||||||
int pxad_toggle_reserved_channel(int legacy_channel)
|
|
||||||
{
|
|
||||||
if (legacy_unavailable & (BIT(legacy_channel)))
|
|
||||||
return -EBUSY;
|
|
||||||
legacy_reserved ^= BIT(legacy_channel);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
|
|
||||||
|
|
||||||
module_platform_driver(pxad_driver);
|
module_platform_driver(pxad_driver);
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
|
MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
|
||||||
|
|
|
@ -56,6 +56,7 @@
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/msi.h>
|
||||||
|
|
||||||
#include "../dmaengine.h"
|
#include "../dmaengine.h"
|
||||||
#include "hidma.h"
|
#include "hidma.h"
|
||||||
|
@ -70,6 +71,7 @@
|
||||||
#define HIDMA_ERR_INFO_SW 0xFF
|
#define HIDMA_ERR_INFO_SW 0xFF
|
||||||
#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
|
#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
|
||||||
#define HIDMA_NR_DEFAULT_DESC 10
|
#define HIDMA_NR_DEFAULT_DESC 10
|
||||||
|
#define HIDMA_MSI_INTS 11
|
||||||
|
|
||||||
static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
|
static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
|
||||||
{
|
{
|
||||||
|
@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
|
||||||
return hidma_ll_inthandler(chirq, lldev);
|
return hidma_ll_inthandler(chirq, lldev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
||||||
|
static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
|
||||||
|
{
|
||||||
|
struct hidma_lldev **lldevp = arg;
|
||||||
|
struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
|
||||||
|
|
||||||
|
return hidma_ll_inthandler_msi(chirq, *lldevp,
|
||||||
|
1 << (chirq - dmadev->msi_virqbase));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static ssize_t hidma_show_values(struct device *dev,
|
static ssize_t hidma_show_values(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev,
|
||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
|
static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
|
||||||
int mode)
|
{
|
||||||
|
device_remove_file(dev->ddev.dev, dev->chid_attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct device_attribute*
|
||||||
|
hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
|
||||||
{
|
{
|
||||||
struct device_attribute *attrs;
|
struct device_attribute *attrs;
|
||||||
char *name_copy;
|
char *name_copy;
|
||||||
|
@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
|
||||||
attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
|
attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!attrs)
|
if (!attrs)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
|
name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
|
||||||
if (!name_copy)
|
if (!name_copy)
|
||||||
return -ENOMEM;
|
return NULL;
|
||||||
|
|
||||||
attrs->attr.name = name_copy;
|
attrs->attr.name = name_copy;
|
||||||
attrs->attr.mode = mode;
|
attrs->attr.mode = mode;
|
||||||
attrs->show = hidma_show_values;
|
attrs->show = hidma_show_values;
|
||||||
sysfs_attr_init(&attrs->attr);
|
sysfs_attr_init(&attrs->attr);
|
||||||
|
|
||||||
return device_create_file(dev->ddev.dev, attrs);
|
return attrs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hidma_sysfs_init(struct hidma_dev *dev)
|
||||||
|
{
|
||||||
|
dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
|
||||||
|
if (!dev->chid_attrs)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return device_create_file(dev->ddev.dev, dev->chid_attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
||||||
|
static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
|
||||||
|
{
|
||||||
|
struct device *dev = msi_desc_to_dev(desc);
|
||||||
|
struct hidma_dev *dmadev = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
if (!desc->platform.msi_index) {
|
||||||
|
writel(msg->address_lo, dmadev->dev_evca + 0x118);
|
||||||
|
writel(msg->address_hi, dmadev->dev_evca + 0x11C);
|
||||||
|
writel(msg->data, dmadev->dev_evca + 0x120);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void hidma_free_msis(struct hidma_dev *dmadev)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
||||||
|
struct device *dev = dmadev->ddev.dev;
|
||||||
|
struct msi_desc *desc;
|
||||||
|
|
||||||
|
/* free allocated MSI interrupts above */
|
||||||
|
for_each_msi_entry(desc, dev)
|
||||||
|
devm_free_irq(dev, desc->irq, &dmadev->lldev);
|
||||||
|
|
||||||
|
platform_msi_domain_free_irqs(dev);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hidma_request_msi(struct hidma_dev *dmadev,
|
||||||
|
struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
||||||
|
int rc;
|
||||||
|
struct msi_desc *desc;
|
||||||
|
struct msi_desc *failed_desc = NULL;
|
||||||
|
|
||||||
|
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
|
||||||
|
hidma_write_msi_msg);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
for_each_msi_entry(desc, &pdev->dev) {
|
||||||
|
if (!desc->platform.msi_index)
|
||||||
|
dmadev->msi_virqbase = desc->irq;
|
||||||
|
|
||||||
|
rc = devm_request_irq(&pdev->dev, desc->irq,
|
||||||
|
hidma_chirq_handler_msi,
|
||||||
|
0, "qcom-hidma-msi",
|
||||||
|
&dmadev->lldev);
|
||||||
|
if (rc) {
|
||||||
|
failed_desc = desc;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rc) {
|
||||||
|
/* free allocated MSI interrupts above */
|
||||||
|
for_each_msi_entry(desc, &pdev->dev) {
|
||||||
|
if (desc == failed_desc)
|
||||||
|
break;
|
||||||
|
devm_free_irq(&pdev->dev, desc->irq,
|
||||||
|
&dmadev->lldev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Add callback to free MSIs on teardown */
|
||||||
|
hidma_ll_setup_irq(dmadev->lldev, true);
|
||||||
|
|
||||||
|
}
|
||||||
|
if (rc)
|
||||||
|
dev_warn(&pdev->dev,
|
||||||
|
"failed to request MSI irq, falling back to wired IRQ\n");
|
||||||
|
return rc;
|
||||||
|
#else
|
||||||
|
return -EINVAL;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool hidma_msi_capable(struct device *dev)
|
||||||
|
{
|
||||||
|
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||||
|
const char *of_compat;
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
if (!adev || acpi_disabled) {
|
||||||
|
ret = device_property_read_string(dev, "compatible",
|
||||||
|
&of_compat);
|
||||||
|
if (ret)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ret = strcmp(of_compat, "qcom,hidma-1.1");
|
||||||
|
} else {
|
||||||
|
#ifdef CONFIG_ACPI
|
||||||
|
ret = strcmp(acpi_device_hid(adev), "QCOM8062");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
return ret == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hidma_probe(struct platform_device *pdev)
|
static int hidma_probe(struct platform_device *pdev)
|
||||||
|
@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev)
|
||||||
void __iomem *evca;
|
void __iomem *evca;
|
||||||
void __iomem *trca;
|
void __iomem *trca;
|
||||||
int rc;
|
int rc;
|
||||||
|
bool msi;
|
||||||
|
|
||||||
pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
|
pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
|
||||||
pm_runtime_use_autosuspend(&pdev->dev);
|
pm_runtime_use_autosuspend(&pdev->dev);
|
||||||
|
@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev)
|
||||||
dmadev->ddev.device_terminate_all = hidma_terminate_all;
|
dmadev->ddev.device_terminate_all = hidma_terminate_all;
|
||||||
dmadev->ddev.copy_align = 8;
|
dmadev->ddev.copy_align = 8;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine the MSI capability of the platform. Old HW doesn't
|
||||||
|
* support MSI.
|
||||||
|
*/
|
||||||
|
msi = hidma_msi_capable(&pdev->dev);
|
||||||
|
|
||||||
device_property_read_u32(&pdev->dev, "desc-count",
|
device_property_read_u32(&pdev->dev, "desc-count",
|
||||||
&dmadev->nr_descriptors);
|
&dmadev->nr_descriptors);
|
||||||
|
|
||||||
|
@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev)
|
||||||
goto dmafree;
|
goto dmafree;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
|
platform_set_drvdata(pdev, dmadev);
|
||||||
"qcom-hidma", dmadev->lldev);
|
if (msi)
|
||||||
if (rc)
|
rc = hidma_request_msi(dmadev, pdev);
|
||||||
goto uninit;
|
|
||||||
|
if (!msi || rc) {
|
||||||
|
hidma_ll_setup_irq(dmadev->lldev, false);
|
||||||
|
rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
|
||||||
|
0, "qcom-hidma", dmadev->lldev);
|
||||||
|
if (rc)
|
||||||
|
goto uninit;
|
||||||
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dmadev->ddev.channels);
|
INIT_LIST_HEAD(&dmadev->ddev.channels);
|
||||||
rc = hidma_chan_init(dmadev, 0);
|
rc = hidma_chan_init(dmadev, 0);
|
||||||
|
@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev)
|
||||||
dmadev->irq = chirq;
|
dmadev->irq = chirq;
|
||||||
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
|
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
|
||||||
hidma_debug_init(dmadev);
|
hidma_debug_init(dmadev);
|
||||||
hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
|
hidma_sysfs_init(dmadev);
|
||||||
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
|
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
|
||||||
platform_set_drvdata(pdev, dmadev);
|
|
||||||
pm_runtime_mark_last_busy(dmadev->ddev.dev);
|
pm_runtime_mark_last_busy(dmadev->ddev.dev);
|
||||||
pm_runtime_put_autosuspend(dmadev->ddev.dev);
|
pm_runtime_put_autosuspend(dmadev->ddev.dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
uninit:
|
uninit:
|
||||||
|
if (msi)
|
||||||
|
hidma_free_msis(dmadev);
|
||||||
|
|
||||||
hidma_debug_uninit(dmadev);
|
hidma_debug_uninit(dmadev);
|
||||||
hidma_ll_uninit(dmadev->lldev);
|
hidma_ll_uninit(dmadev->lldev);
|
||||||
dmafree:
|
dmafree:
|
||||||
|
@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
pm_runtime_get_sync(dmadev->ddev.dev);
|
pm_runtime_get_sync(dmadev->ddev.dev);
|
||||||
dma_async_device_unregister(&dmadev->ddev);
|
dma_async_device_unregister(&dmadev->ddev);
|
||||||
devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
|
if (!dmadev->lldev->msi_support)
|
||||||
|
devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
|
||||||
|
else
|
||||||
|
hidma_free_msis(dmadev);
|
||||||
|
|
||||||
tasklet_kill(&dmadev->task);
|
tasklet_kill(&dmadev->task);
|
||||||
|
hidma_sysfs_uninit(dmadev);
|
||||||
hidma_debug_uninit(dmadev);
|
hidma_debug_uninit(dmadev);
|
||||||
hidma_ll_uninit(dmadev->lldev);
|
hidma_ll_uninit(dmadev->lldev);
|
||||||
hidma_free(dmadev);
|
hidma_free(dmadev);
|
||||||
|
@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev)
|
||||||
#if IS_ENABLED(CONFIG_ACPI)
|
#if IS_ENABLED(CONFIG_ACPI)
|
||||||
static const struct acpi_device_id hidma_acpi_ids[] = {
|
static const struct acpi_device_id hidma_acpi_ids[] = {
|
||||||
{"QCOM8061"},
|
{"QCOM8061"},
|
||||||
|
{"QCOM8062"},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct of_device_id hidma_match[] = {
|
static const struct of_device_id hidma_match[] = {
|
||||||
{.compatible = "qcom,hidma-1.0",},
|
{.compatible = "qcom,hidma-1.0",},
|
||||||
|
{.compatible = "qcom,hidma-1.1",},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, hidma_match);
|
MODULE_DEVICE_TABLE(of, hidma_match);
|
||||||
|
|
|
@ -46,6 +46,7 @@ struct hidma_tre {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hidma_lldev {
|
struct hidma_lldev {
|
||||||
|
bool msi_support; /* flag indicating MSI support */
|
||||||
bool initialized; /* initialized flag */
|
bool initialized; /* initialized flag */
|
||||||
u8 trch_state; /* trch_state of the device */
|
u8 trch_state; /* trch_state of the device */
|
||||||
u8 evch_state; /* evch_state of the device */
|
u8 evch_state; /* evch_state of the device */
|
||||||
|
@ -58,7 +59,7 @@ struct hidma_lldev {
|
||||||
void __iomem *evca; /* Event Channel address */
|
void __iomem *evca; /* Event Channel address */
|
||||||
struct hidma_tre
|
struct hidma_tre
|
||||||
**pending_tre_list; /* Pointers to pending TREs */
|
**pending_tre_list; /* Pointers to pending TREs */
|
||||||
s32 pending_tre_count; /* Number of TREs pending */
|
atomic_t pending_tre_count; /* Number of TREs pending */
|
||||||
|
|
||||||
void *tre_ring; /* TRE ring */
|
void *tre_ring; /* TRE ring */
|
||||||
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
|
dma_addr_t tre_dma; /* TRE ring to be shared with HW */
|
||||||
|
@ -114,6 +115,7 @@ struct hidma_dev {
|
||||||
int irq;
|
int irq;
|
||||||
int chidx;
|
int chidx;
|
||||||
u32 nr_descriptors;
|
u32 nr_descriptors;
|
||||||
|
int msi_virqbase;
|
||||||
|
|
||||||
struct hidma_lldev *lldev;
|
struct hidma_lldev *lldev;
|
||||||
void __iomem *dev_trca;
|
void __iomem *dev_trca;
|
||||||
|
@ -128,6 +130,9 @@ struct hidma_dev {
|
||||||
struct dentry *debugfs;
|
struct dentry *debugfs;
|
||||||
struct dentry *stats;
|
struct dentry *stats;
|
||||||
|
|
||||||
|
/* sysfs entry for the channel id */
|
||||||
|
struct device_attribute *chid_attrs;
|
||||||
|
|
||||||
/* Task delivering issue_pending */
|
/* Task delivering issue_pending */
|
||||||
struct tasklet_struct task;
|
struct tasklet_struct task;
|
||||||
};
|
};
|
||||||
|
@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev);
|
||||||
int hidma_ll_enable(struct hidma_lldev *llhndl);
|
int hidma_ll_enable(struct hidma_lldev *llhndl);
|
||||||
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
|
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
|
||||||
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
|
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
|
||||||
|
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
|
||||||
int hidma_ll_setup(struct hidma_lldev *lldev);
|
int hidma_ll_setup(struct hidma_lldev *lldev);
|
||||||
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
|
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
|
||||||
void __iomem *trca, void __iomem *evca,
|
void __iomem *trca, void __iomem *evca,
|
||||||
u8 chidx);
|
u8 chidx);
|
||||||
int hidma_ll_uninit(struct hidma_lldev *llhndl);
|
int hidma_ll_uninit(struct hidma_lldev *llhndl);
|
||||||
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
|
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
|
||||||
|
irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
|
||||||
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
|
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
|
||||||
u8 err_code);
|
u8 err_code);
|
||||||
int hidma_debug_init(struct hidma_dev *dmadev);
|
int hidma_debug_init(struct hidma_dev *dmadev);
|
||||||
|
|
|
@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
|
||||||
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
|
seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
|
||||||
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
|
seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
|
||||||
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
|
seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
|
||||||
seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
|
seq_printf(s, "pending_tre_count=%d\n",
|
||||||
|
atomic_read(&lldev->pending_tre_count));
|
||||||
seq_printf(s, "evca=%p\n", lldev->evca);
|
seq_printf(s, "evca=%p\n", lldev->evca);
|
||||||
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
|
seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
|
||||||
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
|
seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
|
||||||
|
@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = {
|
||||||
void hidma_debug_uninit(struct hidma_dev *dmadev)
|
void hidma_debug_uninit(struct hidma_dev *dmadev)
|
||||||
{
|
{
|
||||||
debugfs_remove_recursive(dmadev->debugfs);
|
debugfs_remove_recursive(dmadev->debugfs);
|
||||||
debugfs_remove_recursive(dmadev->stats);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int hidma_debug_init(struct hidma_dev *dmadev)
|
int hidma_debug_init(struct hidma_dev *dmadev)
|
||||||
|
|
|
@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
|
static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
|
||||||
u8 err_info, u8 err_code)
|
u8 err_code)
|
||||||
{
|
{
|
||||||
struct hidma_tre *tre;
|
struct hidma_tre *tre;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u32 tre_iterator;
|
||||||
|
|
||||||
spin_lock_irqsave(&lldev->lock, flags);
|
spin_lock_irqsave(&lldev->lock, flags);
|
||||||
|
|
||||||
|
tre_iterator = lldev->tre_processed_off;
|
||||||
tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
|
tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
|
||||||
if (!tre) {
|
if (!tre) {
|
||||||
spin_unlock_irqrestore(&lldev->lock, flags);
|
spin_unlock_irqrestore(&lldev->lock, flags);
|
||||||
|
@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
|
||||||
* Keep track of pending TREs that SW is expecting to receive
|
* Keep track of pending TREs that SW is expecting to receive
|
||||||
* from HW. We got one now. Decrement our counter.
|
* from HW. We got one now. Decrement our counter.
|
||||||
*/
|
*/
|
||||||
lldev->pending_tre_count--;
|
if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
|
||||||
if (lldev->pending_tre_count < 0) {
|
|
||||||
dev_warn(lldev->dev, "tre count mismatch on completion");
|
dev_warn(lldev->dev, "tre count mismatch on completion");
|
||||||
lldev->pending_tre_count = 0;
|
atomic_set(&lldev->pending_tre_count, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
|
||||||
|
lldev->tre_ring_size);
|
||||||
|
lldev->tre_processed_off = tre_iterator;
|
||||||
spin_unlock_irqrestore(&lldev->lock, flags);
|
spin_unlock_irqrestore(&lldev->lock, flags);
|
||||||
|
|
||||||
tre->err_info = err_info;
|
tre->err_info = err_info;
|
||||||
|
@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
|
||||||
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
|
static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
|
||||||
{
|
{
|
||||||
u32 evre_ring_size = lldev->evre_ring_size;
|
u32 evre_ring_size = lldev->evre_ring_size;
|
||||||
u32 tre_ring_size = lldev->tre_ring_size;
|
|
||||||
u32 err_info, err_code, evre_write_off;
|
u32 err_info, err_code, evre_write_off;
|
||||||
u32 tre_iterator, evre_iterator;
|
u32 evre_iterator;
|
||||||
u32 num_completed = 0;
|
u32 num_completed = 0;
|
||||||
|
|
||||||
evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
|
evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
|
||||||
tre_iterator = lldev->tre_processed_off;
|
|
||||||
evre_iterator = lldev->evre_processed_off;
|
evre_iterator = lldev->evre_processed_off;
|
||||||
|
|
||||||
if ((evre_write_off > evre_ring_size) ||
|
if ((evre_write_off > evre_ring_size) ||
|
||||||
|
@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
|
||||||
err_code =
|
err_code =
|
||||||
(cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
|
(cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
|
||||||
|
|
||||||
if (hidma_post_completed(lldev, tre_iterator, err_info,
|
if (hidma_post_completed(lldev, err_info, err_code))
|
||||||
err_code))
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
|
|
||||||
tre_ring_size);
|
|
||||||
HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
|
HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
|
||||||
evre_ring_size);
|
evre_ring_size);
|
||||||
|
|
||||||
|
@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
|
||||||
evre_write_off =
|
evre_write_off =
|
||||||
readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
|
readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
|
||||||
num_completed++;
|
num_completed++;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An error interrupt might have arrived while we are processing
|
||||||
|
* the completed interrupt.
|
||||||
|
*/
|
||||||
|
if (!hidma_ll_isenabled(lldev))
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_completed) {
|
if (num_completed) {
|
||||||
u32 evre_read_off = (lldev->evre_processed_off +
|
u32 evre_read_off = (lldev->evre_processed_off +
|
||||||
HIDMA_EVRE_SIZE * num_completed);
|
HIDMA_EVRE_SIZE * num_completed);
|
||||||
u32 tre_read_off = (lldev->tre_processed_off +
|
|
||||||
HIDMA_TRE_SIZE * num_completed);
|
|
||||||
|
|
||||||
evre_read_off = evre_read_off % evre_ring_size;
|
evre_read_off = evre_read_off % evre_ring_size;
|
||||||
tre_read_off = tre_read_off % tre_ring_size;
|
|
||||||
|
|
||||||
writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
|
writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
|
||||||
|
|
||||||
/* record the last processed tre offset */
|
/* record the last processed tre offset */
|
||||||
lldev->tre_processed_off = tre_read_off;
|
|
||||||
lldev->evre_processed_off = evre_read_off;
|
lldev->evre_processed_off = evre_read_off;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
|
||||||
void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
|
void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
|
||||||
u8 err_code)
|
u8 err_code)
|
||||||
{
|
{
|
||||||
u32 tre_iterator;
|
while (atomic_read(&lldev->pending_tre_count)) {
|
||||||
u32 tre_ring_size = lldev->tre_ring_size;
|
if (hidma_post_completed(lldev, err_info, err_code))
|
||||||
int num_completed = 0;
|
|
||||||
u32 tre_read_off;
|
|
||||||
|
|
||||||
tre_iterator = lldev->tre_processed_off;
|
|
||||||
while (lldev->pending_tre_count) {
|
|
||||||
if (hidma_post_completed(lldev, tre_iterator, err_info,
|
|
||||||
err_code))
|
|
||||||
break;
|
break;
|
||||||
HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
|
|
||||||
tre_ring_size);
|
|
||||||
num_completed++;
|
|
||||||
}
|
}
|
||||||
tre_read_off = (lldev->tre_processed_off +
|
|
||||||
HIDMA_TRE_SIZE * num_completed);
|
|
||||||
|
|
||||||
tre_read_off = tre_read_off % tre_ring_size;
|
|
||||||
|
|
||||||
/* record the last processed tre offset */
|
|
||||||
lldev->tre_processed_off = tre_read_off;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hidma_ll_reset(struct hidma_lldev *lldev)
|
static int hidma_ll_reset(struct hidma_lldev *lldev)
|
||||||
|
@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
|
||||||
* requests traditionally to the destination, this concept does not apply
|
* requests traditionally to the destination, this concept does not apply
|
||||||
* here for this HW.
|
* here for this HW.
|
||||||
*/
|
*/
|
||||||
irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
|
static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
|
||||||
{
|
{
|
||||||
struct hidma_lldev *lldev = arg;
|
if (cause & HIDMA_ERR_INT_MASK) {
|
||||||
u32 status;
|
dev_err(lldev->dev, "error 0x%x, disabling...\n",
|
||||||
u32 enable;
|
cause);
|
||||||
u32 cause;
|
|
||||||
|
/* Clear out pending interrupts */
|
||||||
|
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
||||||
|
|
||||||
|
/* No further submissions. */
|
||||||
|
hidma_ll_disable(lldev);
|
||||||
|
|
||||||
|
/* Driver completes the txn and intimates the client.*/
|
||||||
|
hidma_cleanup_pending_tre(lldev, 0xFF,
|
||||||
|
HIDMA_EVRE_STATUS_ERROR);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fine tuned for this HW...
|
* Fine tuned for this HW...
|
||||||
|
@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
|
||||||
* read and write accessors are used for performance reasons due to
|
* read and write accessors are used for performance reasons due to
|
||||||
* interrupt delivery guarantees. Do not copy this code blindly and
|
* interrupt delivery guarantees. Do not copy this code blindly and
|
||||||
* expect that to work.
|
* expect that to work.
|
||||||
|
*
|
||||||
|
* Try to consume as many EVREs as possible.
|
||||||
*/
|
*/
|
||||||
|
hidma_handle_tre_completion(lldev);
|
||||||
|
|
||||||
|
/* We consumed TREs or there are pending TREs or EVREs. */
|
||||||
|
writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
||||||
|
}
|
||||||
|
|
||||||
|
irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
|
||||||
|
{
|
||||||
|
struct hidma_lldev *lldev = arg;
|
||||||
|
u32 status;
|
||||||
|
u32 enable;
|
||||||
|
u32 cause;
|
||||||
|
|
||||||
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
|
status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
|
||||||
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
||||||
cause = status & enable;
|
cause = status & enable;
|
||||||
|
|
||||||
while (cause) {
|
while (cause) {
|
||||||
if (cause & HIDMA_ERR_INT_MASK) {
|
hidma_ll_int_handler_internal(lldev, cause);
|
||||||
dev_err(lldev->dev, "error 0x%x, disabling...\n",
|
|
||||||
cause);
|
|
||||||
|
|
||||||
/* Clear out pending interrupts */
|
|
||||||
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
|
||||||
|
|
||||||
/* No further submissions. */
|
|
||||||
hidma_ll_disable(lldev);
|
|
||||||
|
|
||||||
/* Driver completes the txn and intimates the client.*/
|
|
||||||
hidma_cleanup_pending_tre(lldev, 0xFF,
|
|
||||||
HIDMA_EVRE_STATUS_ERROR);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to consume as many EVREs as possible.
|
|
||||||
*/
|
|
||||||
hidma_handle_tre_completion(lldev);
|
|
||||||
|
|
||||||
/* We consumed TREs or there are pending TREs or EVREs. */
|
|
||||||
writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Another interrupt might have arrived while we are
|
* Another interrupt might have arrived while we are
|
||||||
|
@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
|
||||||
cause = status & enable;
|
cause = status & enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
|
||||||
|
{
|
||||||
|
struct hidma_lldev *lldev = arg;
|
||||||
|
|
||||||
|
hidma_ll_int_handler_internal(lldev, cause);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
|
||||||
tre->err_code = 0;
|
tre->err_code = 0;
|
||||||
tre->err_info = 0;
|
tre->err_info = 0;
|
||||||
tre->queued = 1;
|
tre->queued = 1;
|
||||||
lldev->pending_tre_count++;
|
atomic_inc(&lldev->pending_tre_count);
|
||||||
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
|
lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
|
||||||
% lldev->tre_ring_size;
|
% lldev->tre_ring_size;
|
||||||
spin_unlock_irqrestore(&lldev->lock, flags);
|
spin_unlock_irqrestore(&lldev->lock, flags);
|
||||||
|
@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
|
||||||
u32 val;
|
u32 val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
|
/* The channel needs to be in working state */
|
||||||
lldev->evch_state = HIDMA_CH_STATE(val);
|
if (!hidma_ll_isenabled(lldev))
|
||||||
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
|
|
||||||
lldev->trch_state = HIDMA_CH_STATE(val);
|
|
||||||
|
|
||||||
/* already suspended by this OS */
|
|
||||||
if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
|
|
||||||
(lldev->evch_state == HIDMA_CH_SUSPENDED))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* already stopped by the manager */
|
|
||||||
if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
|
|
||||||
(lldev->evch_state == HIDMA_CH_STOPPED))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
|
val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
|
||||||
|
@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
|
||||||
u32 val;
|
u32 val;
|
||||||
u32 nr_tres = lldev->nr_tres;
|
u32 nr_tres = lldev->nr_tres;
|
||||||
|
|
||||||
lldev->pending_tre_count = 0;
|
atomic_set(&lldev->pending_tre_count, 0);
|
||||||
lldev->tre_processed_off = 0;
|
lldev->tre_processed_off = 0;
|
||||||
lldev->evre_processed_off = 0;
|
lldev->evre_processed_off = 0;
|
||||||
lldev->tre_write_offset = 0;
|
lldev->tre_write_offset = 0;
|
||||||
|
@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
|
||||||
writel(HIDMA_EVRE_SIZE * nr_tres,
|
writel(HIDMA_EVRE_SIZE * nr_tres,
|
||||||
lldev->evca + HIDMA_EVCA_RING_LEN_REG);
|
lldev->evca + HIDMA_EVCA_RING_LEN_REG);
|
||||||
|
|
||||||
/* support IRQ only for now */
|
/* configure interrupts */
|
||||||
|
hidma_ll_setup_irq(lldev, lldev->msi_support);
|
||||||
|
|
||||||
|
rc = hidma_ll_enable(lldev);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
lldev->msi_support = msi;
|
||||||
|
|
||||||
|
/* disable interrupts again after reset */
|
||||||
|
writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
||||||
|
writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
||||||
|
|
||||||
|
/* support IRQ by default */
|
||||||
val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
|
val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
|
||||||
val &= ~0xF;
|
val &= ~0xF;
|
||||||
val |= 0x1;
|
if (!lldev->msi_support)
|
||||||
|
val = val | 0x1;
|
||||||
writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
|
writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
|
||||||
|
|
||||||
/* clear all pending interrupts and enable them */
|
/* clear all pending interrupts and enable them */
|
||||||
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
||||||
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
||||||
|
|
||||||
return hidma_ll_enable(lldev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
|
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
|
||||||
|
@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
|
||||||
tasklet_kill(&lldev->task);
|
tasklet_kill(&lldev->task);
|
||||||
memset(lldev->trepool, 0, required_bytes);
|
memset(lldev->trepool, 0, required_bytes);
|
||||||
lldev->trepool = NULL;
|
lldev->trepool = NULL;
|
||||||
lldev->pending_tre_count = 0;
|
atomic_set(&lldev->pending_tre_count, 0);
|
||||||
lldev->tre_write_offset = 0;
|
lldev->tre_write_offset = 0;
|
||||||
|
|
||||||
rc = hidma_ll_reset(lldev);
|
rc = hidma_ll_reset(lldev);
|
||||||
|
|
|
@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
|
||||||
{"QCOM8060"},
|
{"QCOM8060"},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct of_device_id hidma_mgmt_match[] = {
|
static const struct of_device_id hidma_mgmt_match[] = {
|
||||||
|
@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
|
||||||
ret = PTR_ERR(new_pdev);
|
ret = PTR_ERR(new_pdev);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
of_node_get(child);
|
||||||
|
new_pdev->dev.of_node = child;
|
||||||
of_dma_configure(&new_pdev->dev, child);
|
of_dma_configure(&new_pdev->dev, child);
|
||||||
|
/*
|
||||||
|
* It is assumed that calling of_msi_configure is safe on
|
||||||
|
* platforms with or without MSI support.
|
||||||
|
*/
|
||||||
|
of_msi_configure(&new_pdev->dev, child);
|
||||||
|
of_node_put(child);
|
||||||
kfree(res);
|
kfree(res);
|
||||||
res = NULL;
|
res = NULL;
|
||||||
}
|
}
|
||||||
|
@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void)
|
||||||
for_each_matching_node(child, hidma_mgmt_match) {
|
for_each_matching_node(child, hidma_mgmt_match) {
|
||||||
/* device tree based firmware here */
|
/* device tree based firmware here */
|
||||||
hidma_mgmt_of_populate_channels(child);
|
hidma_mgmt_of_populate_channels(child);
|
||||||
of_node_put(child);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
platform_driver_register(&hidma_mgmt_driver);
|
platform_driver_register(&hidma_mgmt_driver);
|
||||||
|
|
|
@ -289,16 +289,11 @@ static
|
||||||
struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
|
struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
|
||||||
{
|
{
|
||||||
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
|
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
|
||||||
const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
|
|
||||||
struct s3c24xx_dma_channel *cdata;
|
|
||||||
struct s3c24xx_dma_phy *phy = NULL;
|
struct s3c24xx_dma_phy *phy = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (s3cchan->slave)
|
|
||||||
cdata = &pdata->channels[s3cchan->id];
|
|
||||||
|
|
||||||
for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
|
for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
|
||||||
phy = &s3cdma->phy_chans[i];
|
phy = &s3cdma->phy_chans[i];
|
||||||
|
|
||||||
|
|
|
@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
|
||||||
static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
||||||
struct of_dma *ofdma)
|
struct of_dma *ofdma)
|
||||||
{
|
{
|
||||||
struct usb_dmac_chan *uchan;
|
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
dma_cap_mask_t mask;
|
dma_cap_mask_t mask;
|
||||||
|
|
||||||
|
@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
|
||||||
if (!chan)
|
if (!chan)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
uchan = to_usb_dmac_chan(chan);
|
|
||||||
|
|
||||||
return chan;
|
return chan;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
|
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
|
||||||
struct sirfsoc_dma_regs *save = &sdma->regs_save;
|
struct sirfsoc_dma_regs *save = &sdma->regs_save;
|
||||||
struct sirfsoc_dma_desc *sdesc;
|
|
||||||
struct sirfsoc_dma_chan *schan;
|
struct sirfsoc_dma_chan *schan;
|
||||||
int ch;
|
int ch;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
|
||||||
schan = &sdma->channels[ch];
|
schan = &sdma->channels[ch];
|
||||||
if (list_empty(&schan->active))
|
if (list_empty(&schan->active))
|
||||||
continue;
|
continue;
|
||||||
sdesc = list_first_entry(&schan->active,
|
|
||||||
struct sirfsoc_dma_desc,
|
|
||||||
node);
|
|
||||||
save->ctrl[ch] = readl_relaxed(sdma->base +
|
save->ctrl[ch] = readl_relaxed(sdma->base +
|
||||||
ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
|
ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
|
||||||
{
|
{
|
||||||
struct stm32_dma_chan *chan = devid;
|
struct stm32_dma_chan *chan = devid;
|
||||||
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
|
||||||
u32 status, scr, sfcr;
|
u32 status, scr;
|
||||||
|
|
||||||
spin_lock(&chan->vchan.lock);
|
spin_lock(&chan->vchan.lock);
|
||||||
|
|
||||||
status = stm32_dma_irq_status(chan);
|
status = stm32_dma_irq_status(chan);
|
||||||
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
|
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
|
||||||
sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
|
|
||||||
|
|
||||||
if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
|
if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
|
||||||
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
|
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
|
||||||
|
@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
|
||||||
int src_bus_width, dst_bus_width;
|
int src_bus_width, dst_bus_width;
|
||||||
int src_burst_size, dst_burst_size;
|
int src_burst_size, dst_burst_size;
|
||||||
u32 src_maxburst, dst_maxburst;
|
u32 src_maxburst, dst_maxburst;
|
||||||
dma_addr_t src_addr, dst_addr;
|
|
||||||
u32 dma_scr = 0;
|
u32 dma_scr = 0;
|
||||||
|
|
||||||
src_addr_width = chan->dma_sconfig.src_addr_width;
|
src_addr_width = chan->dma_sconfig.src_addr_width;
|
||||||
dst_addr_width = chan->dma_sconfig.dst_addr_width;
|
dst_addr_width = chan->dma_sconfig.dst_addr_width;
|
||||||
src_maxburst = chan->dma_sconfig.src_maxburst;
|
src_maxburst = chan->dma_sconfig.src_maxburst;
|
||||||
dst_maxburst = chan->dma_sconfig.dst_maxburst;
|
dst_maxburst = chan->dma_sconfig.dst_maxburst;
|
||||||
src_addr = chan->dma_sconfig.src_addr;
|
|
||||||
dst_addr = chan->dma_sconfig.dst_addr;
|
|
||||||
|
|
||||||
switch (direction) {
|
switch (direction) {
|
||||||
case DMA_MEM_TO_DEV:
|
case DMA_MEM_TO_DEV:
|
||||||
|
|
|
@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
|
||||||
if (!ds)
|
if (!ds)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
|
ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
|
||||||
if (!ds->desc_hw) {
|
if (!ds->desc_hw) {
|
||||||
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
|
dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
|
||||||
kfree(ds);
|
kfree(ds);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
|
|
||||||
ds->desc_num = num;
|
ds->desc_num = num;
|
||||||
return ds;
|
return ds;
|
||||||
}
|
}
|
||||||
|
|
|
@ -697,3 +697,4 @@ void of_msi_configure(struct device *dev, struct device_node *np)
|
||||||
dev_set_msi_domain(dev,
|
dev_set_msi_domain(dev,
|
||||||
of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
|
of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(of_msi_configure);
|
||||||
|
|
|
@ -341,27 +341,20 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
|
||||||
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
|
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
|
||||||
{
|
{
|
||||||
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
|
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
|
||||||
dma_filter_fn filter = sdd->cntrlr_info->filter;
|
|
||||||
struct device *dev = &sdd->pdev->dev;
|
struct device *dev = &sdd->pdev->dev;
|
||||||
dma_cap_mask_t mask;
|
|
||||||
|
|
||||||
if (is_polling(sdd))
|
if (is_polling(sdd))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
dma_cap_zero(mask);
|
|
||||||
dma_cap_set(DMA_SLAVE, mask);
|
|
||||||
|
|
||||||
/* Acquire DMA channels */
|
/* Acquire DMA channels */
|
||||||
sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
|
sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx");
|
||||||
sdd->cntrlr_info->dma_rx, dev, "rx");
|
|
||||||
if (!sdd->rx_dma.ch) {
|
if (!sdd->rx_dma.ch) {
|
||||||
dev_err(dev, "Failed to get RX DMA channel\n");
|
dev_err(dev, "Failed to get RX DMA channel\n");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
spi->dma_rx = sdd->rx_dma.ch;
|
spi->dma_rx = sdd->rx_dma.ch;
|
||||||
|
|
||||||
sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
|
sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx");
|
||||||
sdd->cntrlr_info->dma_tx, dev, "tx");
|
|
||||||
if (!sdd->tx_dma.ch) {
|
if (!sdd->tx_dma.ch) {
|
||||||
dev_err(dev, "Failed to get TX DMA channel\n");
|
dev_err(dev, "Failed to get TX DMA channel\n");
|
||||||
dma_release_channel(sdd->rx_dma.ch);
|
dma_release_channel(sdd->rx_dma.ch);
|
||||||
|
@ -1091,11 +1084,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
sdd->cur_bpw = 8;
|
sdd->cur_bpw = 8;
|
||||||
|
|
||||||
if (!sdd->pdev->dev.of_node && (!sci->dma_tx || !sci->dma_rx)) {
|
|
||||||
dev_warn(&pdev->dev, "Unable to get SPI tx/rx DMA data. Switching to poll mode\n");
|
|
||||||
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
|
|
||||||
}
|
|
||||||
|
|
||||||
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
|
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
|
||||||
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
|
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
|
||||||
|
|
||||||
|
@ -1205,9 +1193,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
|
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
|
||||||
sdd->port_id, master->num_chipselect);
|
sdd->port_id, master->num_chipselect);
|
||||||
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%p, Tx-%p]\n",
|
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
|
||||||
mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
|
mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
|
||||||
sci->dma_rx, sci->dma_tx);
|
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(&pdev->dev);
|
pm_runtime_mark_last_busy(&pdev->dev);
|
||||||
pm_runtime_put_autosuspend(&pdev->dev);
|
pm_runtime_put_autosuspend(&pdev->dev);
|
||||||
|
|
|
@ -157,12 +157,12 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
|
||||||
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
|
static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
|
||||||
.nr_channels = 2,
|
.nr_channels = 2,
|
||||||
.is_private = true,
|
.is_private = true,
|
||||||
.is_nollp = true,
|
|
||||||
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
|
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
|
||||||
.chan_priority = CHAN_PRIORITY_ASCENDING,
|
.chan_priority = CHAN_PRIORITY_ASCENDING,
|
||||||
.block_size = 4095,
|
.block_size = 4095,
|
||||||
.nr_masters = 1,
|
.nr_masters = 1,
|
||||||
.data_width = {4},
|
.data_width = {4},
|
||||||
|
.multi_block = {0},
|
||||||
};
|
};
|
||||||
|
|
||||||
static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
|
static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
|
||||||
|
|
|
@ -84,6 +84,8 @@ struct pl08x_channel_data {
|
||||||
* running any DMA transfer and multiplexing can be recycled
|
* running any DMA transfer and multiplexing can be recycled
|
||||||
* @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
|
* @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
|
||||||
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
|
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
|
||||||
|
* @slave_map: DMA slave matching table
|
||||||
|
* @slave_map_len: number of elements in @slave_map
|
||||||
*/
|
*/
|
||||||
struct pl08x_platform_data {
|
struct pl08x_platform_data {
|
||||||
struct pl08x_channel_data *slave_channels;
|
struct pl08x_channel_data *slave_channels;
|
||||||
|
@ -93,6 +95,8 @@ struct pl08x_platform_data {
|
||||||
void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
|
void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
|
||||||
u8 lli_buses;
|
u8 lli_buses;
|
||||||
u8 mem_buses;
|
u8 mem_buses;
|
||||||
|
const struct dma_slave_map *slave_map;
|
||||||
|
int slave_map_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_AMBA_PL08X
|
#ifdef CONFIG_AMBA_PL08X
|
||||||
|
|
|
@ -336,6 +336,12 @@ enum dma_slave_buswidth {
|
||||||
* may or may not be applicable on memory sources.
|
* may or may not be applicable on memory sources.
|
||||||
* @dst_maxburst: same as src_maxburst but for destination target
|
* @dst_maxburst: same as src_maxburst but for destination target
|
||||||
* mutatis mutandis.
|
* mutatis mutandis.
|
||||||
|
* @src_port_window_size: The length of the register area in words the data need
|
||||||
|
* to be accessed on the device side. It is only used for devices which is using
|
||||||
|
* an area instead of a single register to receive the data. Typically the DMA
|
||||||
|
* loops in this area in order to transfer the data.
|
||||||
|
* @dst_port_window_size: same as src_port_window_size but for the destination
|
||||||
|
* port.
|
||||||
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
|
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
|
||||||
* with 'true' if peripheral should be flow controller. Direction will be
|
* with 'true' if peripheral should be flow controller. Direction will be
|
||||||
* selected at Runtime.
|
* selected at Runtime.
|
||||||
|
@ -363,6 +369,8 @@ struct dma_slave_config {
|
||||||
enum dma_slave_buswidth dst_addr_width;
|
enum dma_slave_buswidth dst_addr_width;
|
||||||
u32 src_maxburst;
|
u32 src_maxburst;
|
||||||
u32 dst_maxburst;
|
u32 dst_maxburst;
|
||||||
|
u32 src_port_window_size;
|
||||||
|
u32 dst_port_window_size;
|
||||||
bool device_fc;
|
bool device_fc;
|
||||||
unsigned int slave_id;
|
unsigned int slave_id;
|
||||||
};
|
};
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
|
||||||
#define DW_DMA_MAX_NR_MASTERS 4
|
#define DW_DMA_MAX_NR_MASTERS 4
|
||||||
|
#define DW_DMA_MAX_NR_CHANNELS 8
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct dw_dma_slave - Controller-specific information about a slave
|
* struct dw_dma_slave - Controller-specific information about a slave
|
||||||
|
@ -40,19 +41,18 @@ struct dw_dma_slave {
|
||||||
* @is_private: The device channels should be marked as private and not for
|
* @is_private: The device channels should be marked as private and not for
|
||||||
* by the general purpose DMA channel allocator.
|
* by the general purpose DMA channel allocator.
|
||||||
* @is_memcpy: The device channels do support memory-to-memory transfers.
|
* @is_memcpy: The device channels do support memory-to-memory transfers.
|
||||||
* @is_nollp: The device channels does not support multi block transfers.
|
|
||||||
* @chan_allocation_order: Allocate channels starting from 0 or 7
|
* @chan_allocation_order: Allocate channels starting from 0 or 7
|
||||||
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
|
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
|
||||||
* @block_size: Maximum block size supported by the controller
|
* @block_size: Maximum block size supported by the controller
|
||||||
* @nr_masters: Number of AHB masters supported by the controller
|
* @nr_masters: Number of AHB masters supported by the controller
|
||||||
* @data_width: Maximum data width supported by hardware per AHB master
|
* @data_width: Maximum data width supported by hardware per AHB master
|
||||||
* (in bytes, power of 2)
|
* (in bytes, power of 2)
|
||||||
|
* @multi_block: Multi block transfers supported by hardware per channel.
|
||||||
*/
|
*/
|
||||||
struct dw_dma_platform_data {
|
struct dw_dma_platform_data {
|
||||||
unsigned int nr_channels;
|
unsigned int nr_channels;
|
||||||
bool is_private;
|
bool is_private;
|
||||||
bool is_memcpy;
|
bool is_memcpy;
|
||||||
bool is_nollp;
|
|
||||||
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
|
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
|
||||||
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
|
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
|
||||||
unsigned char chan_allocation_order;
|
unsigned char chan_allocation_order;
|
||||||
|
@ -62,6 +62,7 @@ struct dw_dma_platform_data {
|
||||||
unsigned int block_size;
|
unsigned int block_size;
|
||||||
unsigned char nr_masters;
|
unsigned char nr_masters;
|
||||||
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
|
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
|
||||||
|
unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _PLATFORM_DATA_DMA_DW_H */
|
#endif /* _PLATFORM_DATA_DMA_DW_H */
|
||||||
|
|
|
@ -40,9 +40,6 @@ struct s3c64xx_spi_info {
|
||||||
int num_cs;
|
int num_cs;
|
||||||
bool no_cs;
|
bool no_cs;
|
||||||
int (*cfg_gpio)(void);
|
int (*cfg_gpio)(void);
|
||||||
dma_filter_fn filter;
|
|
||||||
void *dma_tx;
|
|
||||||
void *dma_rx;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue