From eebeac03db93e5556f37de6ed940f89327cc221b Mon Sep 17 00:00:00 2001 From: Srikanth Thokala Date: Wed, 23 Apr 2014 20:23:25 +0530 Subject: [PATCH 01/45] dma: Add Xilinx Video DMA DT Binding Documentation Device-tree binding documentation of Xilinx Video DMA Engine Signed-off-by: Srikanth Thokala Acked-by: Rob Herring Signed-off-by: Vinod Koul --- .../bindings/dma/xilinx/xilinx_vdma.txt | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt new file mode 100644 index 000000000000..1405ed071bb4 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt @@ -0,0 +1,75 @@ +Xilinx AXI VDMA engine, it does transfers between memory and video devices. +It can be configured to have one channel or two channels. If configured +as two channels, one is to transmit to the video device and another is +to receive from the video device. + +Required properties: +- compatible: Should be "xlnx,axi-vdma-1.00.a" +- #dma-cells: Should be <1>, see "dmas" property below +- reg: Should contain VDMA registers location and length. +- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. +- dma-channel child node: Should have at least one channel and can have up to + two channels per device. This node specifies the properties of each + DMA channel (see child node properties below). + +Optional properties: +- xlnx,include-sg: Tells configured for Scatter-mode in + the hardware. +- xlnx,flush-fsync: Tells which channel to Flush on Frame sync. + It takes following values: + {1}, flush both channels + {2}, flush mm2s channel + {3}, flush s2mm channel + +Required child node properties: +- compatible: It should be either "xlnx,axi-vdma-mm2s-channel" or + "xlnx,axi-vdma-s2mm-channel". +- interrupts: Should contain per channel VDMA interrupts. +- xlnx,data-width: Should contain the stream data width, take values + {32,64...1024}. + +Optional child node properties: +- xlnx,include-dre: Tells hardware is configured for Data + Realignment Engine. +- xlnx,genlock-mode: Tells Genlock synchronization is + enabled/disabled in hardware. + +Example: +++++++++ + +axi_vdma_0: axivdma@40030000 { + compatible = "xlnx,axi-vdma-1.00.a"; + #dma_cells = <1>; + reg = < 0x40030000 0x10000 >; + xlnx,num-fstores = <0x8>; + xlnx,flush-fsync = <0x1>; + dma-channel@40030000 { + compatible = "xlnx,axi-vdma-mm2s-channel"; + interrupts = < 0 54 4 >; + xlnx,datawidth = <0x40>; + } ; + dma-channel@40030030 { + compatible = "xlnx,axi-vdma-s2mm-channel"; + interrupts = < 0 53 4 >; + xlnx,datawidth = <0x40>; + } ; +} ; + + +* DMA client + +Required properties: +- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, + where Channel ID is '0' for write/tx and '1' for read/rx + channel. +- dma-names: a list of DMA channel names, one per "dmas" entry + +Example: +++++++++ + +vdmatest_0: vdmatest@0 { + compatible ="xlnx,axi-vdma-test-1.00.a"; + dmas = <&axi_vdma_0 0 + &axi_vdma_0 1>; + dma-names = "vdma0", "vdma1"; +} ; From 9cd4360de6090a6daf7fbe024e34953f2ae60ef2 Mon Sep 17 00:00:00 2001 From: Srikanth Thokala Date: Wed, 23 Apr 2014 20:23:26 +0530 Subject: [PATCH 02/45] dma: Add Xilinx AXI Video Direct Memory Access Engine driver support This is the driver for the AXI Video Direct Memory Access (AXI VDMA) core, which is a soft Xilinx IP core that provides high- bandwidth direct memory access between memory and AXI4-Stream type video target peripherals. The core provides efficient two dimensional DMA operations with independent asynchronous read and write channel operation. This module works on Zynq (ARM Based SoC) and Microblaze platforms. Signed-off-by: Srikanth Thokala Acked-by: Jassi Brar Reviewed-by: Levente Kurusa Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 14 + drivers/dma/Makefile | 1 + drivers/dma/xilinx/Makefile | 1 + drivers/dma/xilinx/xilinx_vdma.c | 1379 ++++++++++++++++++++++++++++++ include/linux/amba/xilinx_dma.h | 47 + 5 files changed, 1442 insertions(+) create mode 100644 drivers/dma/xilinx/Makefile create mode 100644 drivers/dma/xilinx/xilinx_vdma.c create mode 100644 include/linux/amba/xilinx_dma.h diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5c5863842de9..b30b7ed89fb2 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -361,6 +361,20 @@ config FSL_EDMA multiplexing capability for DMA request sources(slot). This module can be found on Freescale Vybrid and LS-1 SoCs. +config XILINX_VDMA + tristate "Xilinx AXI VDMA Engine" + depends on (ARCH_ZYNQ || MICROBLAZE) + select DMA_ENGINE + help + Enable support for Xilinx AXI VDMA Soft IP. + + This engine provides high-bandwidth direct memory access + between memory and AXI4-Stream video type target + peripherals including peripherals which support AXI4- + Stream Video Protocol. It has two stream interfaces/ + channels, Memory Mapped to Stream (MM2S) and Stream to + Memory Mapped (S2MM) for the data transfers. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 5150c82c9caf..c779e1eb2db2 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -46,3 +46,4 @@ obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_MOXART_DMA) += moxart-dma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o +obj-y += xilinx/ diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile new file mode 100644 index 000000000000..3c4e9f2fea28 --- /dev/null +++ b/drivers/dma/xilinx/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c new file mode 100644 index 000000000000..42a13e8d4607 --- /dev/null +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -0,0 +1,1379 @@ +/* + * DMA driver for Xilinx Video DMA Engine + * + * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. + * + * Based on the Freescale DMA driver. + * + * Description: + * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP + * core that provides high-bandwidth direct memory access between memory + * and AXI4-Stream type video target peripherals. The core provides efficient + * two dimensional DMA operations with independent asynchronous read (S2MM) + * and write (MM2S) channel operation. It can be configured to have either + * one channel or two channels. If configured as two channels, one is to + * transmit to the video device (MM2S) and another is to receive from the + * video device (S2MM). Initialization, status, interrupt and management + * registers are accessed through an AXI4-Lite slave interface. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" + +/* Register/Descriptor Offsets */ +#define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 +#define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 +#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 +#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 + +/* Control Registers */ +#define XILINX_VDMA_REG_DMACR 0x0000 +#define XILINX_VDMA_DMACR_DELAY_MAX 0xff +#define XILINX_VDMA_DMACR_DELAY_SHIFT 24 +#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff +#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 +#define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) +#define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) +#define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) +#define XILINX_VDMA_DMACR_MASTER_SHIFT 8 +#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 +#define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) +#define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) +#define XILINX_VDMA_DMACR_RESET BIT(2) +#define XILINX_VDMA_DMACR_CIRC_EN BIT(1) +#define XILINX_VDMA_DMACR_RUNSTOP BIT(0) +#define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) + +#define XILINX_VDMA_REG_DMASR 0x0004 +#define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) +#define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) +#define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) +#define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) +#define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) +#define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) +#define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) +#define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) +#define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) +#define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) +#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) +#define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) +#define XILINX_VDMA_DMASR_IDLE BIT(1) +#define XILINX_VDMA_DMASR_HALTED BIT(0) +#define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) +#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) + +#define XILINX_VDMA_REG_CURDESC 0x0008 +#define XILINX_VDMA_REG_TAILDESC 0x0010 +#define XILINX_VDMA_REG_REG_INDEX 0x0014 +#define XILINX_VDMA_REG_FRMSTORE 0x0018 +#define XILINX_VDMA_REG_THRESHOLD 0x001c +#define XILINX_VDMA_REG_FRMPTR_STS 0x0024 +#define XILINX_VDMA_REG_PARK_PTR 0x0028 +#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 +#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 +#define XILINX_VDMA_REG_VDMA_VERSION 0x002c + +/* Register Direct Mode Registers */ +#define XILINX_VDMA_REG_VSIZE 0x0000 +#define XILINX_VDMA_REG_HSIZE 0x0004 + +#define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 +#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 +#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 + +#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) + +/* HW specific definitions */ +#define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 + +#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ + (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ + XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ + XILINX_VDMA_DMASR_ERR_IRQ) + +#define XILINX_VDMA_DMASR_ALL_ERR_MASK \ + (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ + XILINX_VDMA_DMASR_SOF_LATE_ERR | \ + XILINX_VDMA_DMASR_SG_DEC_ERR | \ + XILINX_VDMA_DMASR_SG_SLV_ERR | \ + XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ + XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ + XILINX_VDMA_DMASR_DMA_DEC_ERR | \ + XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ + XILINX_VDMA_DMASR_DMA_INT_ERR) + +/* + * Recoverable errors are DMA Internal error, SOF Early, EOF Early + * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC + * is enabled in the h/w system. + */ +#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ + (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ + XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ + XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ + XILINX_VDMA_DMASR_DMA_INT_ERR) + +/* Axi VDMA Flush on Fsync bits */ +#define XILINX_VDMA_FLUSH_S2MM 3 +#define XILINX_VDMA_FLUSH_MM2S 2 +#define XILINX_VDMA_FLUSH_BOTH 1 + +/* Delay loop counter to prevent hardware failure */ +#define XILINX_VDMA_LOOP_COUNT 1000000 + +/** + * struct xilinx_vdma_desc_hw - Hardware Descriptor + * @next_desc: Next Descriptor Pointer @0x00 + * @pad1: Reserved @0x04 + * @buf_addr: Buffer address @0x08 + * @pad2: Reserved @0x0C + * @vsize: Vertical Size @0x10 + * @hsize: Horizontal Size @0x14 + * @stride: Number of bytes between the first + * pixels of each horizontal line @0x18 + */ +struct xilinx_vdma_desc_hw { + u32 next_desc; + u32 pad1; + u32 buf_addr; + u32 pad2; + u32 vsize; + u32 hsize; + u32 stride; +} __aligned(64); + +/** + * struct xilinx_vdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_vdma_tx_segment { + struct xilinx_vdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** + * struct xilinx_vdma_tx_descriptor - Per Transaction structure + * @async_tx: Async transaction descriptor + * @segments: TX segments list + * @node: Node in the channel descriptors list + */ +struct xilinx_vdma_tx_descriptor { + struct dma_async_tx_descriptor async_tx; + struct list_head segments; + struct list_head node; +}; + +/** + * struct xilinx_vdma_chan - Driver specific VDMA channel structure + * @xdev: Driver specific device structure + * @ctrl_offset: Control registers offset + * @desc_offset: TX descriptor registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @active_desc: Active descriptor + * @allocated_desc: Allocated descriptor + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool: Descriptors pool + * @dev: The dma device + * @irq: Channel IRQ + * @id: Channel ID + * @direction: Transfer direction + * @num_frms: Number of frames + * @has_sg: Support scatter transfers + * @genlock: Support genlock mode + * @err: Channel has errors + * @tasklet: Cleanup work after irq + * @config: Device configuration info + * @flush_on_fsync: Flush on Frame sync + */ +struct xilinx_vdma_chan { + struct xilinx_vdma_device *xdev; + u32 ctrl_offset; + u32 desc_offset; + spinlock_t lock; + struct list_head pending_list; + struct xilinx_vdma_tx_descriptor *active_desc; + struct xilinx_vdma_tx_descriptor *allocated_desc; + struct list_head done_list; + struct dma_chan common; + struct dma_pool *desc_pool; + struct device *dev; + int irq; + int id; + enum dma_transfer_direction direction; + int num_frms; + bool has_sg; + bool genlock; + bool err; + struct tasklet_struct tasklet; + struct xilinx_vdma_config config; + bool flush_on_fsync; +}; + +/** + * struct xilinx_vdma_device - VDMA device structure + * @regs: I/O mapped base address + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific VDMA channel + * @has_sg: Specifies whether Scatter-Gather is present or not + * @flush_on_fsync: Flush on frame sync + */ +struct xilinx_vdma_device { + void __iomem *regs; + struct device *dev; + struct dma_device common; + struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; + bool has_sg; + u32 flush_on_fsync; +}; + +/* Macros */ +#define to_xilinx_chan(chan) \ + container_of(chan, struct xilinx_vdma_chan, common) +#define to_vdma_tx_descriptor(tx) \ + container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) + +/* IO accessors */ +static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) +{ + return ioread32(chan->xdev->regs + reg); +} + +static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) +{ + iowrite32(value, chan->xdev->regs + reg); +} + +static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, + u32 value) +{ + vdma_write(chan, chan->desc_offset + reg, value); +} + +static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) +{ + return vdma_read(chan, chan->ctrl_offset + reg); +} + +static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, + u32 value) +{ + vdma_write(chan, chan->ctrl_offset + reg, value); +} + +static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, + u32 clr) +{ + vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); +} + +static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, + u32 set) +{ + vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); +} + +/* ----------------------------------------------------------------------------- + * Descriptors and segments alloc and free + */ + +/** + * xilinx_vdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific VDMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_vdma_tx_segment * +xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) +{ + struct xilinx_vdma_tx_segment *segment; + dma_addr_t phys; + + segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + if (!segment) + return NULL; + + memset(segment, 0, sizeof(*segment)); + segment->phys = phys; + + return segment; +} + +/** + * xilinx_vdma_free_tx_segment - Free transaction segment + * @chan: Driver specific VDMA channel + * @segment: VDMA transaction segment + */ +static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, + struct xilinx_vdma_tx_segment *segment) +{ + dma_pool_free(chan->desc_pool, segment, segment->phys); +} + +/** + * xilinx_vdma_tx_descriptor - Allocate transaction descriptor + * @chan: Driver specific VDMA channel + * + * Return: The allocated descriptor on success and NULL on failure. + */ +static struct xilinx_vdma_tx_descriptor * +xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) +{ + struct xilinx_vdma_tx_descriptor *desc; + unsigned long flags; + + if (chan->allocated_desc) + return chan->allocated_desc; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return NULL; + + spin_lock_irqsave(&chan->lock, flags); + chan->allocated_desc = desc; + spin_unlock_irqrestore(&chan->lock, flags); + + INIT_LIST_HEAD(&desc->segments); + + return desc; +} + +/** + * xilinx_vdma_free_tx_descriptor - Free transaction descriptor + * @chan: Driver specific VDMA channel + * @desc: VDMA transaction descriptor + */ +static void +xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, + struct xilinx_vdma_tx_descriptor *desc) +{ + struct xilinx_vdma_tx_segment *segment, *next; + + if (!desc) + return; + + list_for_each_entry_safe(segment, next, &desc->segments, node) { + list_del(&segment->node); + xilinx_vdma_free_tx_segment(chan, segment); + } + + kfree(desc); +} + +/* Required functions */ + +/** + * xilinx_vdma_free_desc_list - Free descriptors list + * @chan: Driver specific VDMA channel + * @list: List to parse and delete the descriptor + */ +static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, + struct list_head *list) +{ + struct xilinx_vdma_tx_descriptor *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) { + list_del(&desc->node); + xilinx_vdma_free_tx_descriptor(chan, desc); + } +} + +/** + * xilinx_vdma_free_descriptors - Free channel descriptors + * @chan: Driver specific VDMA channel + */ +static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) +{ + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + xilinx_vdma_free_desc_list(chan, &chan->pending_list); + xilinx_vdma_free_desc_list(chan, &chan->done_list); + + xilinx_vdma_free_tx_descriptor(chan, chan->active_desc); + chan->active_desc = NULL; + + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_vdma_free_chan_resources - Free channel resources + * @dchan: DMA channel + */ +static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) +{ + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + + dev_dbg(chan->dev, "Free all channel resources.\n"); + + xilinx_vdma_free_descriptors(chan); + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; +} + +/** + * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors + * @chan: Driver specific VDMA channel + */ +static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) +{ + struct xilinx_vdma_tx_descriptor *desc, *next; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + /* Remove from the list of running transactions */ + list_del(&desc->node); + + /* Run the link descriptor callback function */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, flags); + } + + /* Run any dependencies, then free the descriptor */ + dma_run_dependencies(&desc->async_tx); + xilinx_vdma_free_tx_descriptor(chan, desc); + } + + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_vdma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the Xilinx VDMA channel structure + */ +static void xilinx_vdma_do_tasklet(unsigned long data) +{ + struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; + + xilinx_vdma_chan_desc_cleanup(chan); +} + +/** + * xilinx_vdma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + + /* Has this channel already been allocated? */ + if (chan->desc_pool) + return 0; + + /* + * We need the descriptor to be aligned to 64bytes + * for meeting Xilinx VDMA specification requirement. + */ + chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", + chan->dev, + sizeof(struct xilinx_vdma_tx_segment), + __alignof__(struct xilinx_vdma_tx_segment), 0); + if (!chan->desc_pool) { + dev_err(chan->dev, + "unable to allocate channel %d descriptor pool\n", + chan->id); + return -ENOMEM; + } + + dma_cookie_init(dchan); + return 0; +} + +/** + * xilinx_vdma_tx_status - Get VDMA transaction status + * @dchan: DMA channel + * @cookie: Transaction identifier + * @txstate: Transaction state + * + * Return: DMA transaction status + */ +static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dchan, cookie, txstate); +} + +/** + * xilinx_vdma_is_running - Check if VDMA channel is running + * @chan: Driver specific VDMA channel + * + * Return: '1' if running, '0' if not. + */ +static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) +{ + return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & + XILINX_VDMA_DMASR_HALTED) && + (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & + XILINX_VDMA_DMACR_RUNSTOP); +} + +/** + * xilinx_vdma_is_idle - Check if VDMA channel is idle + * @chan: Driver specific VDMA channel + * + * Return: '1' if idle, '0' if not. + */ +static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) +{ + return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & + XILINX_VDMA_DMASR_IDLE; +} + +/** + * xilinx_vdma_halt - Halt VDMA channel + * @chan: Driver specific VDMA channel + */ +static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) +{ + int loop = XILINX_VDMA_LOOP_COUNT; + + vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); + + /* Wait for the hardware to halt */ + do { + if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & + XILINX_VDMA_DMASR_HALTED) + break; + } while (loop--); + + if (!loop) { + dev_err(chan->dev, "Cannot stop channel %p: %x\n", + chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); + chan->err = true; + } + + return; +} + +/** + * xilinx_vdma_start - Start VDMA channel + * @chan: Driver specific VDMA channel + */ +static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) +{ + int loop = XILINX_VDMA_LOOP_COUNT; + + vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); + + /* Wait for the hardware to start */ + do { + if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & + XILINX_VDMA_DMASR_HALTED)) + break; + } while (loop--); + + if (!loop) { + dev_err(chan->dev, "Cannot start channel %p: %x\n", + chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); + + chan->err = true; + } + + return; +} + +/** + * xilinx_vdma_start_transfer - Starts VDMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) +{ + struct xilinx_vdma_config *config = &chan->config; + struct xilinx_vdma_tx_descriptor *desc; + unsigned long flags; + u32 reg; + struct xilinx_vdma_tx_segment *head, *tail = NULL; + + if (chan->err) + return; + + spin_lock_irqsave(&chan->lock, flags); + + /* There's already an active descriptor, bail out. */ + if (chan->active_desc) + goto out_unlock; + + if (list_empty(&chan->pending_list)) + goto out_unlock; + + desc = list_first_entry(&chan->pending_list, + struct xilinx_vdma_tx_descriptor, node); + + /* If it is SG mode and hardware is busy, cannot submit */ + if (chan->has_sg && xilinx_vdma_is_running(chan) && + !xilinx_vdma_is_idle(chan)) { + dev_dbg(chan->dev, "DMA controller still busy\n"); + goto out_unlock; + } + + /* + * If hardware is idle, then all descriptors on the running lists are + * done, start new transfers + */ + if (chan->has_sg) { + head = list_first_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); + tail = list_entry(desc->segments.prev, + struct xilinx_vdma_tx_segment, node); + + vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys); + } + + /* Configure the hardware using info in the config structure */ + reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); + + if (config->frm_cnt_en) + reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; + else + reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; + + /* + * With SG, start with circular mode, so that BDs can be fetched. + * In direct register mode, if not parking, enable circular mode + */ + if (chan->has_sg || !config->park) + reg |= XILINX_VDMA_DMACR_CIRC_EN; + + if (config->park) + reg &= ~XILINX_VDMA_DMACR_CIRC_EN; + + vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); + + if (config->park && (config->park_frm >= 0) && + (config->park_frm < chan->num_frms)) { + if (chan->direction == DMA_MEM_TO_DEV) + vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, + config->park_frm << + XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); + else + vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, + config->park_frm << + XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); + } + + /* Start the hardware */ + xilinx_vdma_start(chan); + + if (chan->err) + goto out_unlock; + + /* Start the transfer */ + if (chan->has_sg) { + vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys); + } else { + struct xilinx_vdma_tx_segment *segment, *last = NULL; + int i = 0; + + list_for_each_entry(segment, &desc->segments, node) { + vdma_desc_write(chan, + XILINX_VDMA_REG_START_ADDRESS(i++), + segment->hw.buf_addr); + last = segment; + } + + if (!last) + goto out_unlock; + + /* HW expects these parameters to be same for one transaction */ + vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); + vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, + last->hw.stride); + vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); + } + + list_del(&desc->node); + chan->active_desc = desc; + +out_unlock: + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_vdma_issue_pending - Issue pending transactions + * @dchan: DMA channel + */ +static void xilinx_vdma_issue_pending(struct dma_chan *dchan) +{ + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + + xilinx_vdma_start_transfer(chan); +} + +/** + * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete + * @chan : xilinx DMA channel + * + * CONTEXT: hardirq + */ +static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) +{ + struct xilinx_vdma_tx_descriptor *desc; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + desc = chan->active_desc; + if (!desc) { + dev_dbg(chan->dev, "no running descriptors\n"); + goto out_unlock; + } + + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); + + chan->active_desc = NULL; + +out_unlock: + spin_unlock_irqrestore(&chan->lock, flags); +} + +/** + * xilinx_vdma_reset - Reset VDMA channel + * @chan: Driver specific VDMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) +{ + int loop = XILINX_VDMA_LOOP_COUNT; + u32 tmp; + + vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); + + tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & + XILINX_VDMA_DMACR_RESET; + + /* Wait for the hardware to finish reset */ + do { + tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & + XILINX_VDMA_DMACR_RESET; + } while (loop-- && tmp); + + if (!loop) { + dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", + vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), + vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); + return -ETIMEDOUT; + } + + chan->err = false; + + return 0; +} + +/** + * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts + * @chan: Driver specific VDMA channel + * + * Return: '0' on success and failure value on error + */ +static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) +{ + int err; + + /* Reset VDMA */ + err = xilinx_vdma_reset(chan); + if (err) + return err; + + /* Enable interrupts */ + vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, + XILINX_VDMA_DMAXR_ALL_IRQ_MASK); + + return 0; +} + +/** + * xilinx_vdma_irq_handler - VDMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the Xilinx VDMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) +{ + struct xilinx_vdma_chan *chan = data; + u32 status; + + /* Read the status and ack the interrupts. */ + status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); + if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) + return IRQ_NONE; + + vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, + status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); + + if (status & XILINX_VDMA_DMASR_ERR_IRQ) { + /* + * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the + * error is recoverable, ignore it. Otherwise flag the error. + * + * Only recoverable errors can be cleared in the DMASR register, + * make sure not to write to other error bits to 1. + */ + u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; + vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, + errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); + + if (!chan->flush_on_fsync || + (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { + dev_err(chan->dev, + "Channel %p has errors %x, cdr %x tdr %x\n", + chan, errors, + vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), + vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); + chan->err = true; + } + } + + if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { + /* + * Device takes too long to do the transfer when user requires + * responsiveness. + */ + dev_dbg(chan->dev, "Inter-packet latency too long\n"); + } + + if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { + xilinx_vdma_complete_descriptor(chan); + xilinx_vdma_start_transfer(chan); + } + + tasklet_schedule(&chan->tasklet); + return IRQ_HANDLED; +} + +/** + * xilinx_vdma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor + * + * Return: cookie value on success and failure value on error + */ +static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); + struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + int err; + + if (chan->err) { + /* + * If reset fails, need to hard reset the system. + * Channel is no longer functional + */ + err = xilinx_vdma_chan_reset(chan); + if (err < 0) + return err; + } + + spin_lock_irqsave(&chan->lock, flags); + + cookie = dma_cookie_assign(tx); + + /* Append the transaction to the pending transactions queue. */ + list_add_tail(&desc->node, &chan->pending_list); + + /* Free the allocated desc */ + chan->allocated_desc = NULL; + + spin_unlock_irqrestore(&chan->lock, flags); + + return cookie; +} + +/** + * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_vdma_tx_descriptor *desc; + struct xilinx_vdma_tx_segment *segment, *prev = NULL; + struct xilinx_vdma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_vdma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_vdma_tx_submit; + async_tx_ack(&desc->async_tx); + + /* Allocate the link descriptor from DMA pool */ + segment = xilinx_vdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* Fill in the hardware descriptor */ + hw = &segment->hw; + hw->vsize = xt->numf; + hw->hsize = xt->sgl[0].size; + hw->stride = xt->sgl[0].icg << + XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; + hw->stride |= chan->config.frm_dly << + XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; + + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + /* Link the previous next descriptor to current */ + prev = list_last_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); + prev->hw.next_desc = segment->phys; + + /* Insert the segment into the descriptor segments list. */ + list_add_tail(&segment->node, &desc->segments); + + prev = segment; + + /* Link the last hardware descriptor with the first. */ + segment = list_first_entry(&desc->segments, + struct xilinx_vdma_tx_segment, node); + prev->hw.next_desc = segment->phys; + + return &desc->async_tx; + +error: + xilinx_vdma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_vdma_terminate_all - Halt the channel and free descriptors + * @chan: Driver specific VDMA Channel pointer + */ +static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) +{ + /* Halt the DMA engine */ + xilinx_vdma_halt(chan); + + /* Remove and free all of the descriptors in the lists */ + xilinx_vdma_free_descriptors(chan); +} + +/** + * xilinx_vdma_channel_set_config - Configure VDMA channel + * Run-time configuration for Axi VDMA, supports: + * . halt the channel + * . configure interrupt coalescing and inter-packet delay threshold + * . start/stop parking + * . enable genlock + * + * @dchan: DMA channel + * @cfg: VDMA device configuration pointer + * + * Return: '0' on success and failure value on error + */ +int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + struct xilinx_vdma_config *cfg) +{ + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + u32 dmacr; + + if (cfg->reset) + return xilinx_vdma_chan_reset(chan); + + dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); + + chan->config.frm_dly = cfg->frm_dly; + chan->config.park = cfg->park; + + /* genlock settings */ + chan->config.gen_lock = cfg->gen_lock; + chan->config.master = cfg->master; + + if (cfg->gen_lock && chan->genlock) { + dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; + dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; + } + + chan->config.frm_cnt_en = cfg->frm_cnt_en; + if (cfg->park) + chan->config.park_frm = cfg->park_frm; + else + chan->config.park_frm = -1; + + chan->config.coalesc = cfg->coalesc; + chan->config.delay = cfg->delay; + + if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { + dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; + chan->config.coalesc = cfg->coalesc; + } + + if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { + dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; + chan->config.delay = cfg->delay; + } + + /* FSync Source selection */ + dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; + dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; + + vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); + + return 0; +} +EXPORT_SYMBOL(xilinx_vdma_channel_set_config); + +/** + * xilinx_vdma_device_control - Configure DMA channel of the device + * @dchan: DMA Channel pointer + * @cmd: DMA control command + * @arg: Channel configuration + * + * Return: '0' on success and failure value on error + */ +static int xilinx_vdma_device_control(struct dma_chan *dchan, + enum dma_ctrl_cmd cmd, unsigned long arg) +{ + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + + if (cmd != DMA_TERMINATE_ALL) + return -ENXIO; + + xilinx_vdma_terminate_all(chan); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Probe and remove + */ + +/** + * xilinx_vdma_chan_remove - Per Channel remove function + * @chan: Driver specific VDMA channel + */ +static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) +{ + /* Disable all interrupts */ + vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, + XILINX_VDMA_DMAXR_ALL_IRQ_MASK); + + if (chan->irq > 0) + free_irq(chan->irq, chan); + + tasklet_kill(&chan->tasklet); + + list_del(&chan->common.device_node); +} + +/** + * xilinx_vdma_chan_probe - Per Channel Probing + * It get channel features from the device tree entry and + * initialize special channel handling routines + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: '0' on success and failure value on error + */ +static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, + struct device_node *node) +{ + struct xilinx_vdma_chan *chan; + bool has_dre = false; + u32 value, width; + int err; + + /* Allocate and initialize the channel structure */ + chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + + chan->dev = xdev->dev; + chan->xdev = xdev; + chan->has_sg = xdev->has_sg; + + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + + /* Retrieve the channel properties from the device tree */ + has_dre = of_property_read_bool(node, "xlnx,include-dre"); + + chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); + + err = of_property_read_u32(node, "xlnx,datawidth", &value); + if (err) { + dev_err(xdev->dev, "missing xlnx,datawidth property\n"); + return err; + } + width = value >> 3; /* Convert bits to bytes */ + + /* If data width is greater than 8 bytes, DRE is not in hw */ + if (width > 8) + has_dre = false; + + if (!has_dre) + xdev->common.copy_align = fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { + chan->direction = DMA_MEM_TO_DEV; + chan->id = 0; + + chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; + chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + + if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) + chan->flush_on_fsync = true; + } else if (of_device_is_compatible(node, + "xlnx,axi-vdma-s2mm-channel")) { + chan->direction = DMA_DEV_TO_MEM; + chan->id = 1; + + chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; + chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + + if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || + xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) + chan->flush_on_fsync = true; + } else { + dev_err(xdev->dev, "Invalid channel compatible node\n"); + return -EINVAL; + } + + /* Request the interrupt */ + chan->irq = irq_of_parse_and_map(node, 0); + err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, + "xilinx-vdma-controller", chan); + if (err) { + dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); + return err; + } + + /* Initialize the tasklet */ + tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, + (unsigned long)chan); + + /* + * Initialize the DMA channel and add it to the DMA engine channels + * list. + */ + chan->common.device = &xdev->common; + + list_add_tail(&chan->common.device_node, &xdev->common.channels); + xdev->chan[chan->id] = chan; + + /* Reset the channel */ + err = xilinx_vdma_chan_reset(chan); + if (err < 0) { + dev_err(xdev->dev, "Reset channel failed\n"); + return err; + } + + return 0; +} + +/** + * of_dma_xilinx_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct xilinx_vdma_device *xdev = ofdma->of_dma_data; + int chan_id = dma_spec->args[0]; + + if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) + return NULL; + + return dma_get_slave_channel(&xdev->chan[chan_id]->common); +} + +/** + * xilinx_vdma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int xilinx_vdma_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct xilinx_vdma_device *xdev; + struct device_node *child; + struct resource *io; + u32 num_frames; + int i, err; + + /* Allocate and initialize the DMA engine structure */ + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + xdev->dev = &pdev->dev; + + /* Request and map I/O memory */ + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xdev->regs = devm_ioremap_resource(&pdev->dev, io); + if (IS_ERR(xdev->regs)) + return PTR_ERR(xdev->regs); + + /* Retrieve the DMA engine properties from the device tree */ + xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + + err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); + if (err < 0) { + dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); + return err; + } + + err = of_property_read_u32(node, "xlnx,flush-fsync", + &xdev->flush_on_fsync); + if (err < 0) + dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); + + /* Initialize the DMA engine */ + xdev->common.dev = &pdev->dev; + + INIT_LIST_HEAD(&xdev->common.channels); + dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); + dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); + + xdev->common.device_alloc_chan_resources = + xilinx_vdma_alloc_chan_resources; + xdev->common.device_free_chan_resources = + xilinx_vdma_free_chan_resources; + xdev->common.device_prep_interleaved_dma = + xilinx_vdma_dma_prep_interleaved; + xdev->common.device_control = xilinx_vdma_device_control; + xdev->common.device_tx_status = xilinx_vdma_tx_status; + xdev->common.device_issue_pending = xilinx_vdma_issue_pending; + + platform_set_drvdata(pdev, xdev); + + /* Initialize the channels */ + for_each_child_of_node(node, child) { + err = xilinx_vdma_chan_probe(xdev, child); + if (err < 0) + goto error; + } + + for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) + if (xdev->chan[i]) + xdev->chan[i]->num_frms = num_frames; + + /* Register the DMA engine with the core */ + dma_async_device_register(&xdev->common); + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); + if (err < 0) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&xdev->common); + goto error; + } + + dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); + + return 0; + +error: + for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) + if (xdev->chan[i]) + xilinx_vdma_chan_remove(xdev->chan[i]); + + return err; +} + +/** + * xilinx_vdma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int xilinx_vdma_remove(struct platform_device *pdev) +{ + struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); + int i; + + of_dma_controller_free(pdev->dev.of_node); + + dma_async_device_unregister(&xdev->common); + + for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) + if (xdev->chan[i]) + xilinx_vdma_chan_remove(xdev->chan[i]); + + return 0; +} + +static const struct of_device_id xilinx_vdma_of_ids[] = { + { .compatible = "xlnx,axi-vdma-1.00.a",}, + {} +}; + +static struct platform_driver xilinx_vdma_driver = { + .driver = { + .name = "xilinx-vdma", + .owner = THIS_MODULE, + .of_match_table = xilinx_vdma_of_ids, + }, + .probe = xilinx_vdma_probe, + .remove = xilinx_vdma_remove, +}; + +module_platform_driver(xilinx_vdma_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx VDMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/amba/xilinx_dma.h new file mode 100644 index 000000000000..34b98f276ed0 --- /dev/null +++ b/include/linux/amba/xilinx_dma.h @@ -0,0 +1,47 @@ +/* + * Xilinx DMA Engine drivers support header file + * + * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DMA_XILINX_DMA_H +#define __DMA_XILINX_DMA_H + +#include +#include + +/** + * struct xilinx_vdma_config - VDMA Configuration structure + * @frm_dly: Frame delay + * @gen_lock: Whether in gen-lock mode + * @master: Master that it syncs to + * @frm_cnt_en: Enable frame count enable + * @park: Whether wants to park + * @park_frm: Frame to park on + * @coalesc: Interrupt coalescing threshold + * @delay: Delay counter + * @reset: Reset Channel + * @ext_fsync: External Frame Sync source + */ +struct xilinx_vdma_config { + int frm_dly; + int gen_lock; + int master; + int frm_cnt_en; + int park; + int park_frm; + int coalesc; + int delay; + int reset; + int ext_fsync; +}; + +int xilinx_vdma_channel_set_config(struct dma_chan *dchan, + struct xilinx_vdma_config *cfg); + +#endif From af98715f28174fabea8f1bea3d297b8f0ad1f84b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 15 Apr 2014 17:13:33 +0200 Subject: [PATCH 03/45] dma: mmp_pdma: Fix the #dma-channels DT property documentation The property is optional and defaults to 32. Document it as such. Cc: devicetree@vger.kernel.org Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/mmp-dma.txt | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/mmp-dma.txt b/Documentation/devicetree/bindings/dma/mmp-dma.txt index a4fa4efa1d83..7a802f64e5bd 100644 --- a/Documentation/devicetree/bindings/dma/mmp-dma.txt +++ b/Documentation/devicetree/bindings/dma/mmp-dma.txt @@ -1,17 +1,20 @@ * MARVELL MMP DMA controller Marvell Peripheral DMA Controller -Used platfroms: pxa688, pxa910, pxa3xx, etc +Used platforms: pxa688, pxa910, pxa3xx, etc Required properties: - compatible: Should be "marvell,pdma-1.0" - reg: Should contain DMA registers location and length. - interrupts: Either contain all of the per-channel DMA interrupts or one irq for pdma device -- #dma-channels: Number of DMA channels supported by the controller. + +Optional properties: +- #dma-channels: Number of DMA channels supported by the controller (defaults + to 32 when not specified) "marvell,pdma-1.0" -Used platfroms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688. +Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688. Examples: @@ -45,7 +48,7 @@ pdma: dma-controller@d4000000 { Marvell Two Channel DMA Controller used specifically for audio -Used platfroms: pxa688, pxa910 +Used platforms: pxa688, pxa910 Required properties: - compatible: Should be "marvell,adma-1.0" or "marvell,pxa910-squ" From a2a7c176cc4ae48ec1589429ef12b89975963b48 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 15 Apr 2014 17:13:34 +0200 Subject: [PATCH 04/45] dma: mmp_pdma: Simplify access to channel drcmr value As the physical channel and virtual channel point to each other, pchan->phy->vchan is always equal to pchan. Simplify the code accordingly. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index bf02e7beb51a..f3fe92da295f 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -277,7 +277,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) return; /* clear the channel mapping in DRCMR */ - reg = DRCMR(pchan->phy->vchan->drcmr); + reg = DRCMR(pchan->drcmr); writel(0, pchan->phy->base + reg); spin_lock_irqsave(&pdev->phy_lock, flags); From 593d9c2e10bc66f1c5cf1adb5eeb709432eb616d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 15 Apr 2014 17:13:35 +0200 Subject: [PATCH 05/45] dma: mmp_pdma: Fix physical channel memory allocation size Use sizeof(*var) instead of sizeof(type) when calling devm_k*alloc(). This avoids using the wrong type as was done to allocate the physical channels array. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index f3fe92da295f..7affa533d2a8 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -858,8 +858,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) struct mmp_pdma_chan *chan; int ret; - chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan), - GFP_KERNEL); + chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); if (chan == NULL) return -ENOMEM; @@ -946,8 +945,7 @@ static int mmp_pdma_probe(struct platform_device *op) irq_num++; } - pdev->phy = devm_kcalloc(pdev->dev, - dma_channels, sizeof(struct mmp_pdma_chan), + pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), GFP_KERNEL); if (pdev->phy == NULL) return -ENOMEM; From 91ea74e9ec5c584eef1dcd69554b8315c1ebb0d9 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 2 Apr 2014 20:16:51 -0700 Subject: [PATCH 06/45] DMA: shdma: tidyup callback chunk finder Current shdma is using "last" which indicates last desc which needs to have callback function. But that desc's chunks is always 1, we can use it as finder Signed-off-by: Kuninori Morimoto [reflown changelog for readablity] Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 52396771acbe..6786ecbd5ed4 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -73,8 +73,7 @@ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct shdma_desc *chunk, *c, *desc = - container_of(tx, struct shdma_desc, async_tx), - *last = desc; + container_of(tx, struct shdma_desc, async_tx); struct shdma_chan *schan = to_shdma_chan(tx->chan); dma_async_tx_callback callback = tx->callback; dma_cookie_t cookie; @@ -98,19 +97,20 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) &chunk->node == &schan->ld_free)) break; chunk->mark = DESC_SUBMITTED; - /* Callback goes to the last chunk */ - chunk->async_tx.callback = NULL; + if (chunk->chunks == 1) { + chunk->async_tx.callback = callback; + chunk->async_tx.callback_param = tx->callback_param; + } else { + /* Callback goes to the last chunk */ + chunk->async_tx.callback = NULL; + } chunk->cookie = cookie; list_move_tail(&chunk->node, &schan->ld_queue); - last = chunk; dev_dbg(schan->dev, "submit #%d@%p on %d\n", - tx->cookie, &last->async_tx, schan->id); + tx->cookie, &chunk->async_tx, schan->id); } - last->async_tx.callback = callback; - last->async_tx.callback_param = tx->callback_param; - if (power_up) { int ret; schan->pm_state = SHDMA_PM_BUSY; From dfbb85cab5f0819d0424a3637b03e7892704fa42 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 2 Apr 2014 20:17:00 -0700 Subject: [PATCH 07/45] DMA: shdma: add cyclic transfer support This patch add cyclic transfer support and enables dmaengine_prep_dma_cyclic() Signed-off-by: Kuninori Morimoto [reflown changelog for readablity] Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 72 +++++++++++++++++++++++++++++++++---- include/linux/shdma-base.h | 1 + 2 files changed, 66 insertions(+), 7 deletions(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 6786ecbd5ed4..974794cdb6ed 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) dma_async_tx_callback callback = NULL; void *param = NULL; unsigned long flags; + LIST_HEAD(cyclic_list); spin_lock_irqsave(&schan->chan_lock, flags); list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { @@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) if (((desc->mark == DESC_COMPLETED || desc->mark == DESC_WAITING) && async_tx_test_ack(&desc->async_tx)) || all) { - /* Remove from ld_queue list */ - desc->mark = DESC_IDLE; - list_move(&desc->node, &schan->ld_free); + if (all || !desc->cyclic) { + /* Remove from ld_queue list */ + desc->mark = DESC_IDLE; + list_move(&desc->node, &schan->ld_free); + } else { + /* reuse as cyclic */ + desc->mark = DESC_SUBMITTED; + list_move_tail(&desc->node, &cyclic_list); + } if (list_empty(&schan->ld_queue)) { dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); @@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) */ schan->dma_chan.completed_cookie = schan->dma_chan.cookie; + list_splice_tail(&cyclic_list, &schan->ld_queue); + spin_unlock_irqrestore(&schan->chan_lock, flags); if (callback) @@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, */ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, - enum dma_transfer_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags, bool cyclic) { struct scatterlist *sg; struct shdma_desc *first = NULL, *new = NULL /* compiler... */; @@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, if (!new) goto err_get_desc; - new->chunks = chunks--; + new->cyclic = cyclic; + if (cyclic) + new->chunks = 1; + else + new->chunks = chunks--; list_add_tail(&new->node, &tx_list); } while (len); } @@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy( sg_dma_address(&sg) = dma_src; sg_dma_len(&sg) = len; - return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); + return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, + flags, false); } static struct dma_async_tx_descriptor *shdma_prep_slave_sg( @@ -640,7 +654,50 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( slave_addr = ops->slave_addr(schan); return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, - direction, flags); + direction, flags, false); +} + +struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct shdma_chan *schan = to_shdma_chan(chan); + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); + const struct shdma_ops *ops = sdev->ops; + unsigned int sg_len = buf_len / period_len; + int slave_id = schan->slave_id; + dma_addr_t slave_addr; + struct scatterlist sgl[sg_len]; + int i; + + if (!chan) + return NULL; + + BUG_ON(!schan->desc_num); + + /* Someone calling slave DMA on a generic channel? */ + if (slave_id < 0 || (buf_len < period_len)) { + dev_warn(schan->dev, + "%s: bad parameter: buf_len=%d, period_len=%d, id=%d\n", + __func__, buf_len, period_len, slave_id); + return NULL; + } + + slave_addr = ops->slave_addr(schan); + + sg_init_table(sgl, sg_len); + for (i = 0; i < sg_len; i++) { + dma_addr_t src = buf_addr + (period_len * i); + + sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, + offset_in_page(src)); + sg_dma_address(&sgl[i]) = src; + sg_dma_len(&sgl[i]) = period_len; + } + + return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, + direction, flags, true); } static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -915,6 +972,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, /* Compulsory for DMA_SLAVE fields */ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; + dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; dma_dev->device_control = shdma_control; dma_dev->dev = dev; diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index f92c0a43c54c..abdf1f229dc3 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -54,6 +54,7 @@ struct shdma_desc { dma_cookie_t cookie; int chunks; int mark; + bool cyclic; /* used as cyclic transfer */ }; struct shdma_chan { From cf7f7a2b4b1ec2305d2ba518c69109cc110e70f8 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 18 Apr 2014 16:17:44 +0800 Subject: [PATCH 08/45] DMA: Freescale: remove the unnecessary FSL_DMA_LD_DEBUG Some codes are calling chan_dbg with FSL_DMA_LD_DEBUG surrounded, it is really unnecessary to use such a macro because chan_dbg is a wrapper of dev_dbg, we do have corresponding DEBUG macro to switch on/off dev_dbg, and most of the other codes are also calling chan_dbg directly without using FSL_DMA_LD_DEBUG. Signed-off-by: Hongbo Zhang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index f157c6f76b32..ec5042044d67 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -426,9 +426,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) desc->async_tx.tx_submit = fsl_dma_tx_submit; desc->async_tx.phys = pdesc; -#ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p allocated\n", desc); -#endif return desc; } @@ -479,9 +477,7 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, list_for_each_entry_safe(desc, _desc, list, node) { list_del(&desc->node); -#ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); -#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } @@ -493,9 +489,7 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, list_for_each_entry_safe_reverse(desc, _desc, list, node) { list_del(&desc->node); -#ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); -#endif dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } } @@ -832,9 +826,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, /* Run the link descriptor callback function */ if (txd->callback) { -#ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p callback\n", desc); -#endif txd->callback(txd->callback_param); } @@ -842,9 +834,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, dma_run_dependencies(txd); dma_descriptor_unmap(txd); -#ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); -#endif dma_pool_free(chan->desc_pool, desc, txd->phys); } From ccdce9a041185b2ed3009bfe426130eecdf4f551 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 18 Apr 2014 16:17:45 +0800 Subject: [PATCH 09/45] DMA: Freescale: unify register access methods Methods of accessing DMA controller registers are inconsistent, some registers are accessed by DMA_IN/OUT directly, while others are accessed by functions get/set_* which are wrappers of DMA_IN/OUT, and even for the BCR register, it is read by get_bcr but written by DMA_OUT. This patch unifies the inconsistent methods, all registers are accessed by get/set_* now. Signed-off-by: Hongbo Zhang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 52 ++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ec5042044d67..5f32cb8a5767 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -61,6 +61,16 @@ static u32 get_sr(struct fsldma_chan *chan) return DMA_IN(chan, &chan->regs->sr, 32); } +static void set_mr(struct fsldma_chan *chan, u32 val) +{ + DMA_OUT(chan, &chan->regs->mr, val, 32); +} + +static u32 get_mr(struct fsldma_chan *chan) +{ + return DMA_IN(chan, &chan->regs->mr, 32); +} + static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) { DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); @@ -71,6 +81,11 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan) return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } +static void set_bcr(struct fsldma_chan *chan, u32 val) +{ + DMA_OUT(chan, &chan->regs->bcr, val, 32); +} + static u32 get_bcr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->bcr, 32); @@ -135,7 +150,7 @@ static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) static void dma_init(struct fsldma_chan *chan) { /* Reset the channel */ - DMA_OUT(chan, &chan->regs->mr, 0, 32); + set_mr(chan, 0); switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: @@ -144,16 +159,15 @@ static void dma_init(struct fsldma_chan *chan) * EOLNIE - End of links interrupt enable * BWC - Bandwidth sharing among channels */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC - | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); + set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE + | FSL_DMA_MR_EOLNIE); break; case FSL_DMA_IP_83XX: /* Set the channel to below modes: * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE - | FSL_DMA_MR_PRC_RM, 32); + set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM); break; } } @@ -175,10 +189,10 @@ static void dma_start(struct fsldma_chan *chan) { u32 mode; - mode = DMA_IN(chan, &chan->regs->mr, 32); + mode = get_mr(chan); if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(chan, &chan->regs->bcr, 0, 32); + set_bcr(chan, 0); mode |= FSL_DMA_MR_EMP_EN; } else { mode &= ~FSL_DMA_MR_EMP_EN; @@ -191,7 +205,7 @@ static void dma_start(struct fsldma_chan *chan) mode |= FSL_DMA_MR_CS; } - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); } static void dma_halt(struct fsldma_chan *chan) @@ -200,7 +214,7 @@ static void dma_halt(struct fsldma_chan *chan) int i; /* read the mode register */ - mode = DMA_IN(chan, &chan->regs->mr, 32); + mode = get_mr(chan); /* * The 85xx controller supports channel abort, which will stop @@ -209,14 +223,14 @@ static void dma_halt(struct fsldma_chan *chan) */ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { mode |= FSL_DMA_MR_CA; - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); mode &= ~FSL_DMA_MR_CA; } /* stop the DMA controller */ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); /* wait for the DMA controller to become idle */ for (i = 0; i < 100; i++) { @@ -245,7 +259,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) { u32 mode; - mode = DMA_IN(chan, &chan->regs->mr, 32); + mode = get_mr(chan); switch (size) { case 0: @@ -259,7 +273,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) break; } - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); } /** @@ -277,7 +291,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) { u32 mode; - mode = DMA_IN(chan, &chan->regs->mr, 32); + mode = get_mr(chan); switch (size) { case 0: @@ -291,7 +305,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) break; } - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); } /** @@ -312,10 +326,10 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) BUG_ON(size > 1024); - mode = DMA_IN(chan, &chan->regs->mr, 32); + mode = get_mr(chan); mode |= (__ilog2(size) << 24) & 0x0f000000; - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); } /** @@ -889,9 +903,9 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { u32 mode; - mode = DMA_IN(chan, &chan->regs->mr, 32); + mode = get_mr(chan); mode &= ~FSL_DMA_MR_CS; - DMA_OUT(chan, &chan->regs->mr, mode, 32); + set_mr(chan, mode); } /* From 867dfa5dfcf4545b73e39ce95bf6abdafacf95bf Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 18 Apr 2014 16:17:46 +0800 Subject: [PATCH 10/45] DMA: Freescale: remove attribute DMA_INTERRUPT of dmaengine Delete attribute DMA_INTERRUPT because fsldma doesn't support this function, exception will be thrown if talitos is used to offload xor at the same time. Signed-off-by: Hongbo Zhang Signed-off-by: Qiang Liu Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 5f32cb8a5767..b71cc04e66ca 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -527,35 +527,6 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) chan->desc_pool = NULL; } -static struct dma_async_tx_descriptor * -fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) -{ - struct fsldma_chan *chan; - struct fsl_desc_sw *new; - - if (!dchan) - return NULL; - - chan = to_fsl_chan(dchan); - - new = fsl_dma_alloc_descriptor(chan); - if (!new) { - chan_err(chan, "%s\n", msg_ld_oom); - return NULL; - } - - new->async_tx.cookie = -EBUSY; - new->async_tx.flags = flags; - - /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &new->tx_list); - - /* Set End-of-link to the last link descriptor of new list */ - set_ld_eol(chan, new); - - return &new->async_tx; -} - static struct dma_async_tx_descriptor * fsl_dma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, @@ -1308,12 +1279,10 @@ static int fsldma_of_probe(struct platform_device *op) fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); - dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); dma_cap_set(DMA_SG, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; - fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; fdev->common.device_tx_status = fsl_tx_status; From 86d19a5491d08fdc5a5cb4be5a09d5f88a6b96ba Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 18 Apr 2014 16:17:47 +0800 Subject: [PATCH 11/45] DMA: Freescale: add fsl_dma_free_descriptor() to reduce code duplication There are several places where descriptors are freed using identical code. This patch puts this code into a function to reduce code duplication. Signed-off-by: Hongbo Zhang Signed-off-by: Qiang Liu Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index b71cc04e66ca..adc266e6d5c9 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -417,6 +417,19 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) return cookie; } +/** + * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool. + * @chan : Freescale DMA channel + * @desc: descriptor to be freed + */ +static void fsl_dma_free_descriptor(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + list_del(&desc->node); + chan_dbg(chan, "LD %p free\n", desc); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); +} + /** * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. * @chan : Freescale DMA channel @@ -489,11 +502,8 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, { struct fsl_desc_sw *desc, *_desc; - list_for_each_entry_safe(desc, _desc, list, node) { - list_del(&desc->node); - chan_dbg(chan, "LD %p free\n", desc); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); - } + list_for_each_entry_safe(desc, _desc, list, node) + fsl_dma_free_descriptor(chan, desc); } static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, @@ -501,11 +511,8 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, { struct fsl_desc_sw *desc, *_desc; - list_for_each_entry_safe_reverse(desc, _desc, list, node) { - list_del(&desc->node); - chan_dbg(chan, "LD %p free\n", desc); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); - } + list_for_each_entry_safe_reverse(desc, _desc, list, node) + fsl_dma_free_descriptor(chan, desc); } /** From 2a5ecb7918d72183f0292266026d077cd2c8d3ed Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 18 Apr 2014 16:17:48 +0800 Subject: [PATCH 12/45] DMA: Freescale: move functions to avoid forward declarations These functions will be modified in the next patch in the series. By moving the function in a patch separate from the changes, it will make review easier. Signed-off-by: Hongbo Zhang Signed-off-by: Qiang Liu Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 190 +++++++++++++++++++++---------------------- 1 file changed, 95 insertions(+), 95 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index adc266e6d5c9..e0fec68aed25 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -458,6 +458,101 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) return desc; } +/** + * fsl_chan_xfer_ld_queue - transfer any pending transactions + * @chan : Freescale DMA channel + * + * HARDWARE STATE: idle + * LOCKING: must hold chan->desc_lock + */ +static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) +{ + struct fsl_desc_sw *desc; + + /* + * If the list of pending descriptors is empty, then we + * don't need to do any work at all + */ + if (list_empty(&chan->ld_pending)) { + chan_dbg(chan, "no pending LDs\n"); + return; + } + + /* + * The DMA controller is not idle, which means that the interrupt + * handler will start any queued transactions when it runs after + * this transaction finishes + */ + if (!chan->idle) { + chan_dbg(chan, "DMA controller still busy\n"); + return; + } + + /* + * If there are some link descriptors which have not been + * transferred, we need to start the controller + */ + + /* + * Move all elements from the queue of pending transactions + * onto the list of running transactions + */ + chan_dbg(chan, "idle, starting controller\n"); + desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); + list_splice_tail_init(&chan->ld_pending, &chan->ld_running); + + /* + * The 85xx DMA controller doesn't clear the channel start bit + * automatically at the end of a transfer. Therefore we must clear + * it in software before starting the transfer. + */ + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + u32 mode; + + mode = get_mr(chan); + mode &= ~FSL_DMA_MR_CS; + set_mr(chan, mode); + } + + /* + * Program the descriptor's address into the DMA controller, + * then start the DMA transaction + */ + set_cdar(chan, desc->async_tx.phys); + get_cdar(chan); + + dma_start(chan); + chan->idle = false; +} + +/** + * fsldma_cleanup_descriptor - cleanup and free a single link descriptor + * @chan: Freescale DMA channel + * @desc: descriptor to cleanup and free + * + * This function is used on a descriptor which has been executed by the DMA + * controller. It will run any callbacks, submit any dependencies, and then + * free the descriptor. + */ +static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + struct dma_async_tx_descriptor *txd = &desc->async_tx; + + /* Run the link descriptor callback function */ + if (txd->callback) { + chan_dbg(chan, "LD %p callback\n", desc); + txd->callback(txd->callback_param); + } + + /* Run any dependencies */ + dma_run_dependencies(txd); + + dma_descriptor_unmap(txd); + chan_dbg(chan, "LD %p free\n", desc); + dma_pool_free(chan->desc_pool, desc, txd->phys); +} + /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. * @chan : Freescale DMA channel @@ -802,101 +897,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, return 0; } -/** - * fsldma_cleanup_descriptor - cleanup and free a single link descriptor - * @chan: Freescale DMA channel - * @desc: descriptor to cleanup and free - * - * This function is used on a descriptor which has been executed by the DMA - * controller. It will run any callbacks, submit any dependencies, and then - * free the descriptor. - */ -static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - struct dma_async_tx_descriptor *txd = &desc->async_tx; - - /* Run the link descriptor callback function */ - if (txd->callback) { - chan_dbg(chan, "LD %p callback\n", desc); - txd->callback(txd->callback_param); - } - - /* Run any dependencies */ - dma_run_dependencies(txd); - - dma_descriptor_unmap(txd); - chan_dbg(chan, "LD %p free\n", desc); - dma_pool_free(chan->desc_pool, desc, txd->phys); -} - -/** - * fsl_chan_xfer_ld_queue - transfer any pending transactions - * @chan : Freescale DMA channel - * - * HARDWARE STATE: idle - * LOCKING: must hold chan->desc_lock - */ -static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) -{ - struct fsl_desc_sw *desc; - - /* - * If the list of pending descriptors is empty, then we - * don't need to do any work at all - */ - if (list_empty(&chan->ld_pending)) { - chan_dbg(chan, "no pending LDs\n"); - return; - } - - /* - * The DMA controller is not idle, which means that the interrupt - * handler will start any queued transactions when it runs after - * this transaction finishes - */ - if (!chan->idle) { - chan_dbg(chan, "DMA controller still busy\n"); - return; - } - - /* - * If there are some link descriptors which have not been - * transferred, we need to start the controller - */ - - /* - * Move all elements from the queue of pending transactions - * onto the list of running transactions - */ - chan_dbg(chan, "idle, starting controller\n"); - desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); - list_splice_tail_init(&chan->ld_pending, &chan->ld_running); - - /* - * The 85xx DMA controller doesn't clear the channel start bit - * automatically at the end of a transfer. Therefore we must clear - * it in software before starting the transfer. - */ - if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - u32 mode; - - mode = get_mr(chan); - mode &= ~FSL_DMA_MR_CS; - set_mr(chan, mode); - } - - /* - * Program the descriptor's address into the DMA controller, - * then start the DMA transaction - */ - set_cdar(chan, desc->async_tx.phys); - get_cdar(chan); - - dma_start(chan); - chan->idle = false; -} - /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command * @chan : Freescale DMA channel From 78a4f0367a1de89c625237fc5524f4ef4535f031 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 23 Apr 2014 17:53:23 +0400 Subject: [PATCH 13/45] dma: mpc512x: reorder mpc8308 specific instructions Concentrate the specific code for MPC8308 in the 'if' branch and handle MPC512x in the 'else' branch. This modification only reorders instructions but doesn't change behaviour. Signed-off-by: Alexander Popov Acked-by: Anatolij Gustschin Signed-off-by: Vinod Koul --- drivers/dma/mpc512x_dma.c | 42 +++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 448750da4402..2ce248b552dc 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -52,9 +52,17 @@ #define MPC_DMA_DESCRIPTORS 64 /* Macro definitions */ -#define MPC_DMA_CHANNELS 64 #define MPC_DMA_TCD_OFFSET 0x1000 +/* + * Maximum channel counts for individual hardware variants + * and the maximum channel count over all supported controllers, + * used for data structure size + */ +#define MPC8308_DMACHAN_MAX 16 +#define MPC512x_DMACHAN_MAX 64 +#define MPC_DMA_CHANNELS 64 + /* Arbitration mode of group and channel */ #define MPC_DMA_DMACR_EDCG (1 << 31) #define MPC_DMA_DMACR_ERGA (1 << 3) @@ -710,10 +718,10 @@ static int mpc_dma_probe(struct platform_device *op) dma = &mdma->dma; dma->dev = dev; - if (!mdma->is_mpc8308) - dma->chancnt = MPC_DMA_CHANNELS; + if (mdma->is_mpc8308) + dma->chancnt = MPC8308_DMACHAN_MAX; else - dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */ + dma->chancnt = MPC512x_DMACHAN_MAX; dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; dma->device_free_chan_resources = mpc_dma_free_chan_resources; dma->device_issue_pending = mpc_dma_issue_pending; @@ -747,7 +755,19 @@ static int mpc_dma_probe(struct platform_device *op) * - Round-robin group arbitration, * - Round-robin channel arbitration. */ - if (!mdma->is_mpc8308) { + if (mdma->is_mpc8308) { + /* MPC8308 has 16 channels and lacks some registers */ + out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); + + /* enable snooping */ + out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); + /* Disable error interrupts */ + out_be32(&mdma->regs->dmaeeil, 0); + + /* Clear interrupts status */ + out_be32(&mdma->regs->dmaintl, 0xFFFF); + out_be32(&mdma->regs->dmaerrl, 0xFFFF); + } else { out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); @@ -768,18 +788,6 @@ static int mpc_dma_probe(struct platform_device *op) /* Route interrupts to IPIC */ out_be32(&mdma->regs->dmaihsa, 0); out_be32(&mdma->regs->dmailsa, 0); - } else { - /* MPC8308 has 16 channels and lacks some registers */ - out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); - - /* enable snooping */ - out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); - /* Disable error interrupts */ - out_be32(&mdma->regs->dmaeeil, 0); - - /* Clear interrupts status */ - out_be32(&mdma->regs->dmaintl, 0xFFFF); - out_be32(&mdma->regs->dmaerrl, 0xFFFF); } /* Register DMA engine */ From 62057d3375a5237c24f1dee82cb6c860081cc4ff Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 23 Apr 2014 17:53:24 +0400 Subject: [PATCH 14/45] dma: mpc512x: separate 'compatible' values for MPC512x and MPC8308 MPC512x and MPC8308 have similar DMA controllers, but are independent SoCs. DMA controller driver should have separate 'compatible' values for these SoCs. Signed-off-by: Alexander Popov Signed-off-by: Vinod Koul --- arch/powerpc/boot/dts/mpc8308_p1m.dts | 2 +- arch/powerpc/boot/dts/mpc8308rdb.dts | 2 +- drivers/dma/mpc512x_dma.c | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts index 651e4f55acdb..57f86cdf9f36 100644 --- a/arch/powerpc/boot/dts/mpc8308_p1m.dts +++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts @@ -296,7 +296,7 @@ }; dma@2c000 { - compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma"; + compatible = "fsl,mpc8308-dma"; reg = <0x2c000 0x1800>; interrupts = <3 0x8 94 0x8>; diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts index 9ce45f2efd34..d0211f0413c6 100644 --- a/arch/powerpc/boot/dts/mpc8308rdb.dts +++ b/arch/powerpc/boot/dts/mpc8308rdb.dts @@ -265,7 +265,7 @@ }; dma@2c000 { - compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma"; + compatible = "fsl,mpc8308-dma"; reg = <0x2c000 0x1800>; interrupts = <3 0x8 94 0x8>; diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 2ce248b552dc..0b17f4db1908 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -815,6 +815,7 @@ static int mpc_dma_remove(struct platform_device *op) static struct of_device_id mpc_dma_match[] = { { .compatible = "fsl,mpc5121-dma", }, + { .compatible = "fsl,mpc8308-dma", }, {}, }; From baca66f7960f60a7ed5884acf4eb1a76d868c43c Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 23 Apr 2014 17:53:26 +0400 Subject: [PATCH 15/45] dma: mpc512x: fix freeing resources in mpc_dma_probe() and mpc_dma_remove() Fix mpc_dma_probe() error path and mpc_dma_remove(): manually free IRQs and dispose IRQ mappings before devm_* takes care of other resources. Moreover replace devm_request_irq() with request_irq() since there is no need to use it because the original code always frees IRQ manually with devm_free_irq(). Replace devm_free_irq() with free_irq() accordingly. Signed-off-by: Alexander Popov Signed-off-by: Vinod Koul --- drivers/dma/mpc512x_dma.c | 55 +++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 0b17f4db1908..96104f4eebe8 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -657,13 +657,15 @@ static int mpc_dma_probe(struct platform_device *op) mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); if (!mdma) { dev_err(dev, "Memory exhausted!\n"); - return -ENOMEM; + retval = -ENOMEM; + goto err; } mdma->irq = irq_of_parse_and_map(dn, 0); if (mdma->irq == NO_IRQ) { dev_err(dev, "Error mapping IRQ!\n"); - return -EINVAL; + retval = -EINVAL; + goto err; } if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { @@ -671,14 +673,15 @@ static int mpc_dma_probe(struct platform_device *op) mdma->irq2 = irq_of_parse_and_map(dn, 1); if (mdma->irq2 == NO_IRQ) { dev_err(dev, "Error mapping IRQ!\n"); - return -EINVAL; + retval = -EINVAL; + goto err_dispose1; } } retval = of_address_to_resource(dn, 0, &res); if (retval) { dev_err(dev, "Error parsing memory region!\n"); - return retval; + goto err_dispose2; } regs_start = res.start; @@ -686,31 +689,34 @@ static int mpc_dma_probe(struct platform_device *op) if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { dev_err(dev, "Error requesting memory region!\n"); - return -EBUSY; + retval = -EBUSY; + goto err_dispose2; } mdma->regs = devm_ioremap(dev, regs_start, regs_size); if (!mdma->regs) { dev_err(dev, "Error mapping memory region!\n"); - return -ENOMEM; + retval = -ENOMEM; + goto err_dispose2; } mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) + MPC_DMA_TCD_OFFSET); - retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, - mdma); + retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma); if (retval) { dev_err(dev, "Error requesting IRQ!\n"); - return -EINVAL; + retval = -EINVAL; + goto err_dispose2; } if (mdma->is_mpc8308) { - retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0, - DRV_NAME, mdma); + retval = request_irq(mdma->irq2, &mpc_dma_irq, 0, + DRV_NAME, mdma); if (retval) { dev_err(dev, "Error requesting IRQ2!\n"); - return -EINVAL; + retval = -EINVAL; + goto err_free1; } } @@ -793,12 +799,23 @@ static int mpc_dma_probe(struct platform_device *op) /* Register DMA engine */ dev_set_drvdata(dev, mdma); retval = dma_async_device_register(dma); - if (retval) { - devm_free_irq(dev, mdma->irq, mdma); - irq_dispose_mapping(mdma->irq); - } + if (retval) + goto err_free2; return retval; + +err_free2: + if (mdma->is_mpc8308) + free_irq(mdma->irq2, mdma); +err_free1: + free_irq(mdma->irq, mdma); +err_dispose2: + if (mdma->is_mpc8308) + irq_dispose_mapping(mdma->irq2); +err_dispose1: + irq_dispose_mapping(mdma->irq); +err: + return retval; } static int mpc_dma_remove(struct platform_device *op) @@ -807,7 +824,11 @@ static int mpc_dma_remove(struct platform_device *op) struct mpc_dma *mdma = dev_get_drvdata(dev); dma_async_device_unregister(&mdma->dma); - devm_free_irq(dev, mdma->irq, mdma); + if (mdma->is_mpc8308) { + free_irq(mdma->irq2, mdma); + irq_dispose_mapping(mdma->irq2); + } + free_irq(mdma->irq, mdma); irq_dispose_mapping(mdma->irq); return 0; From 067bd4fdfa17c3bbdac09b401667e967c81eb2b7 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 15 Apr 2014 16:18:41 +0300 Subject: [PATCH 16/45] dmaengine: dw: move PM to suspend_late / resume_early stages There is no need to use *_noirq version of suspend and resume PM callbacks. The suspend_late / resume_early suit better (it was discussed in [1]) and in future could be used for runtime PM support. [1] http://www.spinics.net/lists/kernel/msg1650974.html Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/platform.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 453822cc4f9d..c5b339af6be5 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -256,7 +256,7 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); #ifdef CONFIG_PM_SLEEP -static int dw_suspend_noirq(struct device *dev) +static int dw_suspend_late(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_dma_chip *chip = platform_get_drvdata(pdev); @@ -264,7 +264,7 @@ static int dw_suspend_noirq(struct device *dev) return dw_dma_suspend(chip); } -static int dw_resume_noirq(struct device *dev) +static int dw_resume_early(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_dma_chip *chip = platform_get_drvdata(pdev); @@ -272,20 +272,10 @@ static int dw_resume_noirq(struct device *dev) return dw_dma_resume(chip); } -#else /* !CONFIG_PM_SLEEP */ - -#define dw_suspend_noirq NULL -#define dw_resume_noirq NULL - -#endif /* !CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dw_dev_pm_ops = { - .suspend_noirq = dw_suspend_noirq, - .resume_noirq = dw_resume_noirq, - .freeze_noirq = dw_suspend_noirq, - .thaw_noirq = dw_resume_noirq, - .restore_noirq = dw_resume_noirq, - .poweroff_noirq = dw_suspend_noirq, + SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early) }; static struct platform_driver dw_driver = { From c31b6ae1b4a75680765ac66d80e2e107e8bf05a0 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 15 Apr 2014 16:18:42 +0300 Subject: [PATCH 17/45] dmaengine: dw: convert to use SET_LATE_SYSTEM_SLEEP_PM_OPS The commit 4501fe61 "dma: dw: Add suspend and resume handling for PCI mode DW_DMAC." introduces system power management callbacks. Regarding to commit f78c4cff "PM / Sleep: Add macro to define common late/early system PM callbacks" we have nice macro to setup dev_pm_ops structure. This patch converts a driver to use the macro. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index fec59f1a77bb..be81db780627 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -93,16 +93,10 @@ static int dw_pci_resume_early(struct device *dev) return dw_dma_resume(chip); }; -#else /* !CONFIG_PM_SLEEP */ - -#define dw_pci_suspend_late NULL -#define dw_pci_resume_early NULL - -#endif /* !CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dw_pci_dev_pm_ops = { - .suspend_late = dw_pci_suspend_late, - .resume_early = dw_pci_resume_early, + SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early) }; static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { From 80245216ccbdb4b1dce4db714e0fdc692c81af6d Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 23 Apr 2014 21:52:01 +0200 Subject: [PATCH 18/45] dma: ste_dma40: Maintain spinlock order while handling pause The runtime PM resume callback needs to be executed while holding the spinlock, make sure to maintain this for the pause operation as well. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index bf18c786ed40..6e97cf6931f1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1495,8 +1495,8 @@ static int d40_pause(struct d40_chan *d40c) if (!d40c->busy) return 0; - pm_runtime_get_sync(d40c->base->dev); spin_lock_irqsave(&d40c->lock, flags); + pm_runtime_get_sync(d40c->base->dev); res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); From 2dafca17c8807bc622daf111ec359a4baad550f6 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 23 Apr 2014 21:52:02 +0200 Subject: [PATCH 19/45] dma: ste_dma40: Don't require CONFIG_PM_RUNTIME While probing, don't rely on CONFIG_PM_RUNTIME to be configured. Instead, let's power up the device and make it fully operational. Update the runtime PM status to reflect the active state. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6e97cf6931f1..45e809f4acda 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -556,7 +556,6 @@ struct d40_gen_dmac { * later * @reg_val_backup_chan: Backup data for standard channel parameter registers. * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. - * @initialized: true if the dma has been initialized * @gen_dmac: the struct for generic registers values to represent u8500/8540 * DMA controller */ @@ -594,7 +593,6 @@ struct d40_base { u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; u32 *reg_val_backup_chan; u16 gcc_pwr_off_mask; - bool initialized; struct d40_gen_dmac gen_dmac; }; @@ -3030,8 +3028,7 @@ static int dma40_runtime_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); - if (base->initialized) - d40_save_restore_registers(base, false); + d40_save_restore_registers(base, false); writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); @@ -3645,12 +3642,6 @@ static int __init d40_probe(struct platform_device *pdev) goto failure; } - pm_runtime_irq_safe(base->dev); - pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(base->dev); - pm_runtime_enable(base->dev); - pm_runtime_resume(base->dev); - if (base->plat_data->use_esram_lcla) { base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); @@ -3671,7 +3662,15 @@ static int __init d40_probe(struct platform_device *pdev) } } - base->initialized = true; + writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); + + pm_runtime_irq_safe(base->dev); + pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(base->dev); + pm_runtime_mark_last_busy(base->dev); + pm_runtime_set_active(base->dev); + pm_runtime_enable(base->dev); + ret = d40_dmaengine_init(base, num_reserved_chans); if (ret) goto failure; From 123e4ca172707267eef5fbe9f0c58bd4c412685c Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 23 Apr 2014 21:52:03 +0200 Subject: [PATCH 20/45] dma: ste_dma40: Convert to PM macros while providing the PM callbacks Converting to the PM macros makes us simplify and remove some code. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 150 +++++++++++++++++++--------------------- 1 file changed, 71 insertions(+), 79 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 45e809f4acda..0b29af321873 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1054,62 +1054,6 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, return len; } - -#ifdef CONFIG_PM -static void dma40_backup(void __iomem *baseaddr, u32 *backup, - u32 *regaddr, int num, bool save) -{ - int i; - - for (i = 0; i < num; i++) { - void __iomem *addr = baseaddr + regaddr[i]; - - if (save) - backup[i] = readl_relaxed(addr); - else - writel_relaxed(backup[i], addr); - } -} - -static void d40_save_restore_registers(struct d40_base *base, bool save) -{ - int i; - - /* Save/Restore channel specific registers */ - for (i = 0; i < base->num_phy_chans; i++) { - void __iomem *addr; - int idx; - - if (base->phy_res[i].reserved) - continue; - - addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; - idx = i * ARRAY_SIZE(d40_backup_regs_chan); - - dma40_backup(addr, &base->reg_val_backup_chan[idx], - d40_backup_regs_chan, - ARRAY_SIZE(d40_backup_regs_chan), - save); - } - - /* Save/Restore global registers */ - dma40_backup(base->virtbase, base->reg_val_backup, - d40_backup_regs, ARRAY_SIZE(d40_backup_regs), - save); - - /* Save/Restore registers only existing on dma40 v3 and later */ - if (base->gen_dmac.backup) - dma40_backup(base->virtbase, base->reg_val_backup_v4, - base->gen_dmac.backup, - base->gen_dmac.backup_size, - save); -} -#else -static void d40_save_restore_registers(struct d40_base *base, bool save) -{ -} -#endif - static int __d40_execute_command_phy(struct d40_chan *d40c, enum d40_command command) { @@ -2996,8 +2940,8 @@ failure1: } /* Suspend resume functionality */ -#ifdef CONFIG_PM -static int dma40_pm_suspend(struct device *dev) +#ifdef CONFIG_PM_SLEEP +static int dma40_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); @@ -3008,6 +2952,69 @@ static int dma40_pm_suspend(struct device *dev) return ret; } +static int dma40_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct d40_base *base = platform_get_drvdata(pdev); + int ret = 0; + + if (base->lcpa_regulator) + ret = regulator_enable(base->lcpa_regulator); + + return ret; +} +#endif + +#ifdef CONFIG_PM +static void dma40_backup(void __iomem *baseaddr, u32 *backup, + u32 *regaddr, int num, bool save) +{ + int i; + + for (i = 0; i < num; i++) { + void __iomem *addr = baseaddr + regaddr[i]; + + if (save) + backup[i] = readl_relaxed(addr); + else + writel_relaxed(backup[i], addr); + } +} + +static void d40_save_restore_registers(struct d40_base *base, bool save) +{ + int i; + + /* Save/Restore channel specific registers */ + for (i = 0; i < base->num_phy_chans; i++) { + void __iomem *addr; + int idx; + + if (base->phy_res[i].reserved) + continue; + + addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; + idx = i * ARRAY_SIZE(d40_backup_regs_chan); + + dma40_backup(addr, &base->reg_val_backup_chan[idx], + d40_backup_regs_chan, + ARRAY_SIZE(d40_backup_regs_chan), + save); + } + + /* Save/Restore global registers */ + dma40_backup(base->virtbase, base->reg_val_backup, + d40_backup_regs, ARRAY_SIZE(d40_backup_regs), + save); + + /* Save/Restore registers only existing on dma40 v3 and later */ + if (base->gen_dmac.backup) + dma40_backup(base->virtbase, base->reg_val_backup_v4, + base->gen_dmac.backup, + base->gen_dmac.backup_size, + save); +} + static int dma40_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -3034,29 +3041,14 @@ static int dma40_runtime_resume(struct device *dev) base->virtbase + D40_DREG_GCC); return 0; } - -static int dma40_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct d40_base *base = platform_get_drvdata(pdev); - int ret = 0; - - if (base->lcpa_regulator) - ret = regulator_enable(base->lcpa_regulator); - - return ret; -} +#endif static const struct dev_pm_ops dma40_pm_ops = { - .suspend = dma40_pm_suspend, - .runtime_suspend = dma40_runtime_suspend, - .runtime_resume = dma40_runtime_resume, - .resume = dma40_resume, + SET_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) + SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend, + dma40_runtime_resume, + NULL) }; -#define DMA40_PM_OPS (&dma40_pm_ops) -#else -#define DMA40_PM_OPS NULL -#endif /* Initialization functions. */ @@ -3753,7 +3745,7 @@ static struct platform_driver d40_driver = { .driver = { .owner = THIS_MODULE, .name = D40_NAME, - .pm = DMA40_PM_OPS, + .pm = &dma40_pm_ops, .of_match_table = d40_match, }, }; From c906a3ec458742c95850c0c1cde9e8b68df25c01 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 23 Apr 2014 21:52:04 +0200 Subject: [PATCH 21/45] dma: ste_dma40: Fixup system suspend/resume Make sure to handle register context save/restore when needed from system PM callbacks. Previously we solely trusted the device to reside in in-active state while the system suspend callback were invoked, which is just too optimistic. Signed-off-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0b29af321873..b539fc9b469a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2945,7 +2945,11 @@ static int dma40_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); - int ret = 0; + int ret; + + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; if (base->lcpa_regulator) ret = regulator_disable(base->lcpa_regulator); @@ -2958,10 +2962,13 @@ static int dma40_resume(struct device *dev) struct d40_base *base = platform_get_drvdata(pdev); int ret = 0; - if (base->lcpa_regulator) + if (base->lcpa_regulator) { ret = regulator_enable(base->lcpa_regulator); + if (ret) + return ret; + } - return ret; + return pm_runtime_force_resume(dev); } #endif From 1b38da264674d6a0fe26a63996b8f88b88c3da48 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 17 Feb 2014 12:29:06 +0100 Subject: [PATCH 22/45] dma: mmp_pdma: add support for residue reporting A channel can accommodate more than one transaction, each consisting of multiple descriptors, the last of which has the DCMD_ENDIRQEN bit set. In order to report the channel's residue, we hence have to walk the list of running descriptors, look for those which match the cookie, and then try to find the descriptor which defines upper and lower boundaries that embrace the current transport pointer. Once it is found, walk forward until we find the descriptor that tells us about the end of a transaction via a set DCMD_ENDIRQEN bit. Signed-off-by: Daniel Mack Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 87 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 7affa533d2a8..a7b186d536b3 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -29,8 +29,8 @@ #define DALGN 0x00a0 #define DINT 0x00f0 #define DDADR 0x0200 -#define DSADR 0x0204 -#define DTADR 0x0208 +#define DSADR(n) (0x0204 + ((n) << 4)) +#define DTADR(n) (0x0208 + ((n) << 4)) #define DCMD 0x020c #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ @@ -748,11 +748,92 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, return 0; } +static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, + dma_cookie_t cookie) +{ + struct mmp_pdma_desc_sw *sw; + u32 curr, residue = 0; + bool passed = false; + bool cyclic = chan->cyclic_first != NULL; + + /* + * If the channel does not have a phy pointer anymore, it has already + * been completed. Therefore, its residue is 0. + */ + if (!chan->phy) + return 0; + + if (chan->dir == DMA_DEV_TO_MEM) + curr = readl(chan->phy->base + DTADR(chan->phy->idx)); + else + curr = readl(chan->phy->base + DSADR(chan->phy->idx)); + + list_for_each_entry(sw, &chan->chain_running, node) { + u32 start, end, len; + + if (chan->dir == DMA_DEV_TO_MEM) + start = sw->desc.dtadr; + else + start = sw->desc.dsadr; + + len = sw->desc.dcmd & DCMD_LENGTH; + end = start + len; + + /* + * 'passed' will be latched once we found the descriptor which + * lies inside the boundaries of the curr pointer. All + * descriptors that occur in the list _after_ we found that + * partially handled descriptor are still to be processed and + * are hence added to the residual bytes counter. + */ + + if (passed) { + residue += len; + } else if (curr >= start && curr <= end) { + residue += end - curr; + passed = true; + } + + /* + * Descriptors that have the ENDIRQEN bit set mark the end of a + * transaction chain, and the cookie assigned with it has been + * returned previously from mmp_pdma_tx_submit(). + * + * In case we have multiple transactions in the running chain, + * and the cookie does not match the one the user asked us + * about, reset the state variables and start over. + * + * This logic does not apply to cyclic transactions, where all + * descriptors have the ENDIRQEN bit set, and for which we + * can't have multiple transactions on one channel anyway. + */ + if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) + continue; + + if (sw->async_tx.cookie == cookie) { + return residue; + } else { + residue = 0; + passed = false; + } + } + + /* We should only get here in case of cyclic transactions */ + return residue; +} + static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(dchan, cookie, txstate); + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); + enum dma_status ret; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (likely(ret != DMA_ERROR)) + dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); + + return ret; } /** From 673d377345d69464929a1a70c778be830fa58143 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 7 May 2014 11:03:57 +0200 Subject: [PATCH 23/45] dma: ste_dma40: Convert to the late system PM callbacks Clients may still be active in the early phase of system PM, thus we need to move the suspend operations to the late system PM phase. Signed-off-by: Ulf Hansson Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b539fc9b469a..c7984459ede7 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3051,7 +3051,7 @@ static int dma40_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops dma40_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) SET_PM_RUNTIME_PM_OPS(dma40_runtime_suspend, dma40_runtime_resume, NULL) From 7587821d3d0812fde4799657aa6d32a57d655bcd Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 9 May 2014 14:59:36 +0900 Subject: [PATCH 24/45] dma: remove DEFINE_PCI_DEVICE_TABLE macro Don't use DEFINE_PCI_DEVICE_TABLE macro, because this macro is deprecated. Signed-off-by: Jingoo Han Acked-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/pci.c | 2 +- drivers/dma/pch_dma.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index be81db780627..39e30c3c7a9d 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -99,7 +99,7 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early) }; -static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { +static const struct pci_device_id dw_pci_id_table[] = { /* Medfield */ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 05fa548bd659..b4b8cc69c2bf 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -996,7 +996,7 @@ static void pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 -DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { +const struct pci_device_id pch_dma_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ From ba730340f96c01160b5f26f81e8fb38f8cb1821c Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Thu, 15 May 2014 18:15:31 +0400 Subject: [PATCH 25/45] dmaengine: fix comment typo Fix comment typo. Signed-off-by: Alexander Popov Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8300fb87b84a..cbb168e04dc1 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -292,7 +292,7 @@ struct dma_chan_dev { }; /** - * enum dma_slave_buswidth - defines bus with of the DMA slave + * enum dma_slave_buswidth - defines bus width of the DMA slave * device, source or target buses */ enum dma_slave_buswidth { From 63da8e0d4f274fdf73b9924e8fd8f64a3d11d24a Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Thu, 15 May 2014 18:15:32 +0400 Subject: [PATCH 26/45] dmaengine: mpc512x: add support for peripheral transfers Introduce support for slave s/g transfer preparation and the associated device control callback in the MPC512x DMA controller driver, which adds support for data transfers between memory and peripheral I/O to the previously supported mem-to-mem transfers. Signed-off-by: Alexander Popov [fixed subsytem name] Signed-off-by: Vinod Koul --- drivers/dma/mpc512x_dma.c | 244 +++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 5 deletions(-) diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 96104f4eebe8..2ad43738ac8b 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -2,6 +2,7 @@ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. * Copyright (C) Semihalf 2009 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 + * Copyright (C) Alexander Popov, Promcontroller 2014 * * Written by Piotr Ziecik . Hardware description * (defines, structures and comments) was taken from MPC5121 DMA driver @@ -29,8 +30,18 @@ */ /* - * This is initial version of MPC5121 DMA driver. Only memory to memory - * transfers are supported (tested using dmatest module). + * MPC512x and MPC8308 DMA driver. It supports + * memory to memory data transfers (tested using dmatest module) and + * data transfers between memory and peripheral I/O memory + * by means of slave scatter/gather with these limitations: + * - chunked transfers (described by s/g lists with more than one item) + * are refused as long as proper support for scatter/gather is missing; + * - transfers on MPC8308 always start from software as this SoC appears + * not to have external request lines for peripheral flow control; + * - only peripheral devices with 4-byte FIFO access register are supported; + * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently + * source and destination addresses must be 4-byte aligned + * and transfer size must be aligned on (4 * maxburst) boundary; */ #include @@ -189,6 +200,7 @@ struct mpc_dma_desc { dma_addr_t tcd_paddr; int error; struct list_head node; + int will_access_peripheral; }; struct mpc_dma_chan { @@ -201,6 +213,12 @@ struct mpc_dma_chan { struct mpc_dma_tcd *tcd; dma_addr_t tcd_paddr; + /* Settings for access to peripheral FIFO */ + dma_addr_t src_per_paddr; + u32 src_tcd_nunits; + dma_addr_t dst_per_paddr; + u32 dst_tcd_nunits; + /* Lock for this structure */ spinlock_t lock; }; @@ -251,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan) struct mpc_dma_desc *mdesc; int cid = mchan->chan.chan_id; - /* Move all queued descriptors to active list */ - list_splice_tail_init(&mchan->queued, &mchan->active); + while (!list_empty(&mchan->queued)) { + mdesc = list_first_entry(&mchan->queued, + struct mpc_dma_desc, node); + /* + * Grab either several mem-to-mem transfer descriptors + * or one peripheral transfer descriptor, + * don't mix mem-to-mem and peripheral transfer descriptors + * within the same 'active' list. + */ + if (mdesc->will_access_peripheral) { + if (list_empty(&mchan->active)) + list_move_tail(&mdesc->node, &mchan->active); + break; + } else { + list_move_tail(&mdesc->node, &mchan->active); + } + } /* Chain descriptors into one transaction */ list_for_each_entry(mdesc, &mchan->active, node) { @@ -278,7 +311,17 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan) if (first != prev) mdma->tcd[cid].e_sg = 1; - out_8(&mdma->regs->dmassrt, cid); + + if (mdma->is_mpc8308) { + /* MPC8308, no request lines, software initiated start */ + out_8(&mdma->regs->dmassrt, cid); + } else if (first->will_access_peripheral) { + /* Peripherals involved, start by external request signal */ + out_8(&mdma->regs->dmaserq, cid); + } else { + /* Memory to memory transfer, software initiated start */ + out_8(&mdma->regs->dmassrt, cid); + } } /* Handle interrupt on one half of DMA controller (32 channels) */ @@ -596,6 +639,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, } mdesc->error = 0; + mdesc->will_access_peripheral = 0; tcd = mdesc->tcd; /* Prepare Transfer Control Descriptor for this transaction */ @@ -643,6 +687,193 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, return &mdesc->desc; } +static struct dma_async_tx_descriptor * +mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc = NULL; + dma_addr_t per_paddr; + u32 tcd_nunits; + struct mpc_dma_tcd *tcd; + unsigned long iflags; + struct scatterlist *sg; + size_t len; + int iter, i; + + /* Currently there is no proper support for scatter/gather */ + if (sg_len != 1) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + spin_lock_irqsave(&mchan->lock, iflags); + + mdesc = list_first_entry(&mchan->free, + struct mpc_dma_desc, node); + if (!mdesc) { + spin_unlock_irqrestore(&mchan->lock, iflags); + /* Try to free completed descriptors */ + mpc_dma_process_completed(mdma); + return NULL; + } + + list_del(&mdesc->node); + + if (direction == DMA_DEV_TO_MEM) { + per_paddr = mchan->src_per_paddr; + tcd_nunits = mchan->src_tcd_nunits; + } else { + per_paddr = mchan->dst_per_paddr; + tcd_nunits = mchan->dst_tcd_nunits; + } + + spin_unlock_irqrestore(&mchan->lock, iflags); + + if (per_paddr == 0 || tcd_nunits == 0) + goto err_prep; + + mdesc->error = 0; + mdesc->will_access_peripheral = 1; + + /* Prepare Transfer Control Descriptor for this transaction */ + tcd = mdesc->tcd; + + memset(tcd, 0, sizeof(struct mpc_dma_tcd)); + + if (!IS_ALIGNED(sg_dma_address(sg), 4)) + goto err_prep; + + if (direction == DMA_DEV_TO_MEM) { + tcd->saddr = per_paddr; + tcd->daddr = sg_dma_address(sg); + tcd->soff = 0; + tcd->doff = 4; + } else { + tcd->saddr = sg_dma_address(sg); + tcd->daddr = per_paddr; + tcd->soff = 4; + tcd->doff = 0; + } + + tcd->ssize = MPC_DMA_TSIZE_4; + tcd->dsize = MPC_DMA_TSIZE_4; + + len = sg_dma_len(sg); + tcd->nbytes = tcd_nunits * 4; + if (!IS_ALIGNED(len, tcd->nbytes)) + goto err_prep; + + iter = len / tcd->nbytes; + if (iter >= 1 << 15) { + /* len is too big */ + goto err_prep; + } + /* citer_linkch contains the high bits of iter */ + tcd->biter = iter & 0x1ff; + tcd->biter_linkch = iter >> 9; + tcd->citer = tcd->biter; + tcd->citer_linkch = tcd->biter_linkch; + + tcd->e_sg = 0; + tcd->d_req = 1; + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&mchan->lock, iflags); + list_add_tail(&mdesc->node, &mchan->prepared); + spin_unlock_irqrestore(&mchan->lock, iflags); + } + + return &mdesc->desc; + +err_prep: + /* Put the descriptor back */ + spin_lock_irqsave(&mchan->lock, iflags); + list_add_tail(&mdesc->node, &mchan->free); + spin_unlock_irqrestore(&mchan->lock, iflags); + + return NULL; +} + +static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct mpc_dma_chan *mchan; + struct mpc_dma *mdma; + struct dma_slave_config *cfg; + unsigned long flags; + + mchan = dma_chan_to_mpc_dma_chan(chan); + switch (cmd) { + case DMA_TERMINATE_ALL: + /* Disable channel requests */ + mdma = dma_chan_to_mpc_dma(chan); + + spin_lock_irqsave(&mchan->lock, flags); + + out_8(&mdma->regs->dmacerq, chan->chan_id); + list_splice_tail_init(&mchan->prepared, &mchan->free); + list_splice_tail_init(&mchan->queued, &mchan->free); + list_splice_tail_init(&mchan->active, &mchan->free); + + spin_unlock_irqrestore(&mchan->lock, flags); + + return 0; + + case DMA_SLAVE_CONFIG: + /* + * Software constraints: + * - only transfers between a peripheral device and + * memory are supported; + * - only peripheral devices with 4-byte FIFO access register + * are supported; + * - minimal transfer chunk is 4 bytes and consequently + * source and destination addresses must be 4-byte aligned + * and transfer size must be aligned on (4 * maxburst) + * boundary; + * - during the transfer RAM address is being incremented by + * the size of minimal transfer chunk; + * - peripheral port's address is constant during the transfer. + */ + + cfg = (void *)arg; + + if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || + cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || + !IS_ALIGNED(cfg->src_addr, 4) || + !IS_ALIGNED(cfg->dst_addr, 4)) { + return -EINVAL; + } + + spin_lock_irqsave(&mchan->lock, flags); + + mchan->src_per_paddr = cfg->src_addr; + mchan->src_tcd_nunits = cfg->src_maxburst; + mchan->dst_per_paddr = cfg->dst_addr; + mchan->dst_tcd_nunits = cfg->dst_maxburst; + + /* Apply defaults */ + if (mchan->src_tcd_nunits == 0) + mchan->src_tcd_nunits = 1; + if (mchan->dst_tcd_nunits == 0) + mchan->dst_tcd_nunits = 1; + + spin_unlock_irqrestore(&mchan->lock, flags); + + return 0; + + default: + /* Unknown command */ + break; + } + + return -ENXIO; +} + static int mpc_dma_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; @@ -733,9 +964,12 @@ static int mpc_dma_probe(struct platform_device *op) dma->device_issue_pending = mpc_dma_issue_pending; dma->device_tx_status = mpc_dma_tx_status; dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; + dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; + dma->device_control = mpc_dma_device_control; INIT_LIST_HEAD(&dma->channels); dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma_cap_set(DMA_SLAVE, dma->cap_mask); for (i = 0; i < dma->chancnt; i++) { mchan = &mdma->channels[i]; From 4828b493695b1e7edd6bd9017c2b010b0d545381 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 16 May 2014 16:17:37 +0200 Subject: [PATCH 27/45] dma: pch_dma: Fix Kconfig dependencies The pch_dma driver is for a companion chip to the Intel Atom E600 series processors. These are 32-bit x86 processors so the driver is only needed on X86_32. Add COMPILE_TEST as an alternative, so that the driver can still be build-tested elsewhere. Signed-off-by: Jean Delvare Cc: Dan Williams Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5c5863842de9..9ad43abe26ff 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -234,7 +234,7 @@ config PL330_DMA config PCH_DMA tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" - depends on PCI && X86 + depends on PCI && (X86_32 || COMPILE_TEST) select DMA_ENGINE help Enable support for Intel EG20T PCH DMA engine. From d2f78e95e42a9130002c76f1a1f76e657a4b4004 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 May 2014 12:01:48 +0300 Subject: [PATCH 28/45] dmaengine: dw: enable clock before access hclk signal is a bus clock. So, it means we have to have it enabled during access to the DMA controller. This patch makes sure that we enable clock before access to the device, though it currently works on Intel hardware. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 7a740769c2fa..009dc62f9437 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1493,6 +1493,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->regs = chip->regs; chip->dw = dw; + dw->clk = devm_clk_get(chip->dev, "hclk"); + if (IS_ERR(dw->clk)) + return PTR_ERR(dw->clk); + clk_prepare_enable(dw->clk); + dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); autocfg = dw_params >> DW_PARAMS_EN & 0x1; @@ -1520,11 +1525,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) if (!dw->chan) return -ENOMEM; - dw->clk = devm_clk_get(chip->dev, "hclk"); - if (IS_ERR(dw->clk)) - return PTR_ERR(dw->clk); - clk_prepare_enable(dw->clk); - /* Get hardware configuration parameters */ if (autocfg) { max_blk_size = dma_readl(dw, MAX_BLK_SIZE); From 8be4f523b48087765defd18483c66b268b3286e5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 May 2014 12:01:49 +0300 Subject: [PATCH 29/45] dmaengine: dw: fix regression in dw_probe() function The commit dbde5c29 "dw_dmac: use devm_* functions to simplify code" turns probe function to use devm_* helpers and simultaneously brings a regression. We have to 1) call clk_disable_unprepare() on error path, and 2) check error code of clk_enable_prepare(). First part was done in the original code, second one is an update. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 009dc62f9437..d539019fbe60 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1496,7 +1496,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->clk = devm_clk_get(chip->dev, "hclk"); if (IS_ERR(dw->clk)) return PTR_ERR(dw->clk); - clk_prepare_enable(dw->clk); + err = clk_prepare_enable(dw->clk); + if (err) + return err; dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); autocfg = dw_params >> DW_PARAMS_EN & 0x1; @@ -1505,15 +1507,19 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) if (!pdata && autocfg) { pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; + if (!pdata) { + err = -ENOMEM; + goto err_pdata; + } /* Fill platform data with the default values */ pdata->is_private = true; pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; pdata->chan_priority = CHAN_PRIORITY_ASCENDING; - } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) - return -EINVAL; + } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { + err = -EINVAL; + goto err_pdata; + } if (autocfg) nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; @@ -1522,8 +1528,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan), GFP_KERNEL); - if (!dw->chan) - return -ENOMEM; + if (!dw->chan) { + err = -ENOMEM; + goto err_pdata; + } /* Get hardware configuration parameters */ if (autocfg) { @@ -1553,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) sizeof(struct dw_desc), 4, 0); if (!dw->desc_pool) { dev_err(chip->dev, "No memory for descriptors dma pool\n"); - return -ENOMEM; + err = -ENOMEM; + goto err_pdata; } tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); @@ -1561,7 +1570,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, "dw_dmac", dw); if (err) - return err; + goto err_pdata; INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { @@ -1656,6 +1665,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dma_async_device_register(&dw->dma); return 0; + +err_pdata: + clk_disable_unprepare(dw->clk); + return err; } EXPORT_SYMBOL_GPL(dw_dma_probe); @@ -1676,6 +1689,8 @@ int dw_dma_remove(struct dw_dma_chip *chip) channel_clear_bit(dw, CH_EN, dwc->mask); } + clk_disable_unprepare(dw->clk); + return 0; } EXPORT_SYMBOL_GPL(dw_dma_remove); From 1222934e54b63752b4b1ad305d6a7f632a3ae46d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 8 May 2014 12:01:50 +0300 Subject: [PATCH 30/45] dmaengine: dw: check return code of dma_async_device_register() dma_async_device_register() may return non-zero error code. In such case we have to follow error path. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index d539019fbe60..a27ded53ab4f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1659,13 +1659,17 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dma_writel(dw, CFG, DW_CFG_DMA_EN); + err = dma_async_device_register(&dw->dma); + if (err) + goto err_dma_register; + dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", nr_channels); - dma_async_device_register(&dw->dma); - return 0; +err_dma_register: + free_irq(chip->irq, dw); err_pdata: clk_disable_unprepare(dw->clk); return err; From a15783c34f6304e85f7d810e71316c1c3ba1fe69 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 22 May 2014 18:50:49 +0530 Subject: [PATCH 31/45] dmaengine: pch: fix compilation for alpha target commit 4828b493 introduced COMPILE_TEST for this driver and this cause compile failure on alpha as kzalloc wasnt availble for this arch in included header, so explictly add slab.h Reported-by: kbuild test robot Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index b4b8cc69c2bf..9f9ca9fe5ce6 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include From ffe59b29308791c9600ab86de2ca070d742c107d Mon Sep 17 00:00:00 2001 From: Jiada Wang Date: Wed, 14 May 2014 18:22:13 -0700 Subject: [PATCH 32/45] dmaengine: imx: correct sdmac->status for cyclic dma tx In cyclic dma tx's handler sdma_handle_channel_loop(), SDMA channel statue is set to either DMA_ERROR or DMA_IN_PROGRESS based on each period's status. This has the following issues: 1) If one period's status is BD_RROR, then channel status will be set to DMA_ERROR, but it will be overwritten to DMA_IN_PROGRESS if the following periods are OK. 2) DMA client may call sdma_control(DMA_TERMINATE_ALL) to stop the cyclic dma operation, sdma channel status will be set to DMA_ERROR, but if after this handler is called, then again the channel status will be overwritten to DMA_IN_PROGRESS. Then the following dmaengine_prep_dma_cyclic() will always fail, as channel status is DMA_IN_PROGRESS. As in cyclic dma tx, channel status will be initially set to DMA_IN_PROGRESS, driver only needs to change it to DMA_ERROR, when something wrong happens (one period status is wrong, or stoped by client explicitly). Signed-off-by: Jiada Wang Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 19041cefabb1..128714622bf5 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -607,8 +607,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) if (bd->mode.status & BD_RROR) sdmac->status = DMA_ERROR; - else - sdmac->status = DMA_IN_PROGRESS; bd->mode.status |= BD_DONE; sdmac->buf_tail++; From 6915f45fb9748ff578025e95506b6aec3734b886 Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Tue, 20 May 2014 23:23:01 +0300 Subject: [PATCH 33/45] dmaengine: s3c24xx-dma: Process whole SG chain Due to redundant 'break' in loop driver processed only first chunk. Signed-off-by: Vasily Khoruzhick Reviewed-by: Heiko Stuebner Signed-off-by: Vinod Koul --- drivers/dma/s3c24xx-dma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index b209a0f17344..6528eeda1575 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -961,7 +961,6 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( dsg->src_addr = slave_addr; dsg->dst_addr = sg_dma_address(sg); } - break; } return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); From c3e175e52f82991dbb70a012212b510f7a4b3726 Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Tue, 20 May 2014 23:23:02 +0300 Subject: [PATCH 34/45] dmaengine: s3c24xx-dma: Add cyclic transfer support Many audio interface drivers require support of cyclic transfers to work correctly, for example Samsung ASoC DMA driver. This patch adds support for cyclic transfers to the s3c24xx-dma driver Signed-off-by: Vasily Khoruzhick Reviewed-by: Heiko Stuebner Signed-off-by: Vinod Koul --- drivers/dma/s3c24xx-dma.c | 112 +++++++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 1 deletion(-) diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 6528eeda1575..012520c9fd79 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -164,6 +164,7 @@ struct s3c24xx_sg { * @disrcc: value for source control register * @didstc: value for destination control register * @dcon: base value for dcon register + * @cyclic: indicate cyclic transfer */ struct s3c24xx_txd { struct virt_dma_desc vd; @@ -173,6 +174,7 @@ struct s3c24xx_txd { u32 disrcc; u32 didstc; u32 dcon; + bool cyclic; }; struct s3c24xx_dma_chan; @@ -669,8 +671,10 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data) /* when more sg's are in this txd, start the next one */ if (!list_is_last(txd->at, &txd->dsg_list)) { txd->at = txd->at->next; + if (txd->cyclic) + vchan_cyclic_callback(&txd->vd); s3c24xx_dma_start_next_sg(s3cchan, txd); - } else { + } else if (!txd->cyclic) { s3cchan->at = NULL; vchan_cookie_complete(&txd->vd); @@ -682,6 +686,12 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data) s3c24xx_dma_start_next_txd(s3cchan); else s3c24xx_dma_phy_free(s3cchan); + } else { + vchan_cyclic_callback(&txd->vd); + + /* Cyclic: reset at beginning */ + txd->at = txd->dsg_list.next; + s3c24xx_dma_start_next_sg(s3cchan, txd); } } spin_unlock(&s3cchan->vc.lock); @@ -877,6 +887,104 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); } +static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, + enum dma_transfer_direction direction, unsigned long flags, + void *context) +{ + struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); + struct s3c24xx_dma_engine *s3cdma = s3cchan->host; + const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; + struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; + struct s3c24xx_txd *txd; + struct s3c24xx_sg *dsg; + unsigned sg_len; + dma_addr_t slave_addr; + u32 hwcfg = 0; + int i; + + dev_dbg(&s3cdma->pdev->dev, + "prepare cyclic transaction of %zu bytes with period %zu from %s\n", + size, period, s3cchan->name); + + if (!is_slave_direction(direction)) { + dev_err(&s3cdma->pdev->dev, + "direction %d unsupported\n", direction); + return NULL; + } + + txd = s3c24xx_dma_get_txd(); + if (!txd) + return NULL; + + txd->cyclic = 1; + + if (cdata->handshake) + txd->dcon |= S3C24XX_DCON_HANDSHAKE; + + switch (cdata->bus) { + case S3C24XX_DMA_APB: + txd->dcon |= S3C24XX_DCON_SYNC_PCLK; + hwcfg |= S3C24XX_DISRCC_LOC_APB; + break; + case S3C24XX_DMA_AHB: + txd->dcon |= S3C24XX_DCON_SYNC_HCLK; + hwcfg |= S3C24XX_DISRCC_LOC_AHB; + break; + } + + /* + * Always assume our peripheral desintation is a fixed + * address in memory. + */ + hwcfg |= S3C24XX_DISRCC_INC_FIXED; + + /* + * Individual dma operations are requested by the slave, + * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). + */ + txd->dcon |= S3C24XX_DCON_SERV_SINGLE; + + if (direction == DMA_MEM_TO_DEV) { + txd->disrcc = S3C24XX_DISRCC_LOC_AHB | + S3C24XX_DISRCC_INC_INCREMENT; + txd->didstc = hwcfg; + slave_addr = s3cchan->cfg.dst_addr; + txd->width = s3cchan->cfg.dst_addr_width; + } else { + txd->disrcc = hwcfg; + txd->didstc = S3C24XX_DIDSTC_LOC_AHB | + S3C24XX_DIDSTC_INC_INCREMENT; + slave_addr = s3cchan->cfg.src_addr; + txd->width = s3cchan->cfg.src_addr_width; + } + + sg_len = size / period; + + for (i = 0; i < sg_len; i++) { + dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); + if (!dsg) { + s3c24xx_dma_free_txd(txd); + return NULL; + } + list_add_tail(&dsg->node, &txd->dsg_list); + + dsg->len = period; + /* Check last period length */ + if (i == sg_len - 1) + dsg->len = size - period * i; + if (direction == DMA_MEM_TO_DEV) { + dsg->src_addr = addr + period * i; + dsg->dst_addr = slave_addr; + } else { /* DMA_DEV_TO_MEM */ + dsg->src_addr = slave_addr; + dsg->dst_addr = addr + period * i; + } + } + + return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); +} + static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -1197,6 +1305,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) /* Initialize slave engine for SoC internal dedicated peripherals */ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); s3cdma->slave.dev = &pdev->dev; s3cdma->slave.device_alloc_chan_resources = @@ -1206,6 +1315,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; + s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; s3cdma->slave.device_control = s3c24xx_dma_control; /* Register as many memcpy channels as there are physical channels */ From a5cdc1c15562c0c68e7c418693f3f884ec838a06 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 13 May 2014 01:02:11 +0200 Subject: [PATCH 35/45] dmaengine: shdmac: Keep #include sorted alphabetically This helps detecting duplicate includes. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/shdmac.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index dda7e7563f5d..7313537e5c54 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -18,21 +18,21 @@ * */ +#include +#include #include +#include +#include #include +#include #include #include -#include -#include -#include -#include #include #include -#include -#include -#include -#include #include +#include +#include +#include #include "../dmaengine.h" #include "shdma.h" From c46b9af26f1a971f7727328ba1cf4d702545cf2f Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 13 May 2014 01:02:12 +0200 Subject: [PATCH 36/45] dmaengine: shdmac: Include linux/err.h linux/err.h isn't implicitly included by the current headers on all platforms, resulting in compilation failures due to implicit declarations of IS_ERR and PTR_ERR. Fix this by including linux/err.h. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/shdmac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 7313537e5c54..146d5df926db 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include From cf5a23b78717bb547ab6c9267f05d4fe803bd853 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 13 May 2014 01:02:13 +0200 Subject: [PATCH 37/45] dmaengine: sudmac: Keep #include sorted alphabetically This helps detecting duplicate includes. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/sudmac.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index 4e7df43b50d6..b5b7e54ac845 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c @@ -14,12 +14,12 @@ * published by the Free Software Foundation. */ -#include -#include -#include -#include #include +#include +#include +#include #include +#include #include struct sudmac_chan { From 830c863987aa26c2133241b61fe22bf466ccb7cc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 13 May 2014 01:02:14 +0200 Subject: [PATCH 38/45] dmaengine: sudmac: Include linux/err.h linux/err.h isn't implicitly included by the current headers on all platforms, resulting in compilation failures due to implicit declarations of IS_ERR and PTR_ERR. Fix this by including linux/err.h. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/sudmac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c index b5b7e54ac845..3ce103909896 100644 --- a/drivers/dma/sh/sudmac.c +++ b/drivers/dma/sh/sudmac.c @@ -15,6 +15,7 @@ */ #include +#include #include #include #include From 494ead469e1b0db060a96627c879fb444fe3970a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 13 May 2014 01:02:15 +0200 Subject: [PATCH 39/45] dmaengine: rcar-hpbdma: Include linux/err.h linux/err.h isn't implicitly included by the current headers on all platforms, resulting in compilation failures due to implicit declarations of IS_ERR and PTR_ERR. Fix this by including linux/err.h. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-hpbdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 3083d901a414..b212d9471ab5 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include From ebc6d2d9c8cd48c351da84dd467c0edddc149ded Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 13 May 2014 01:02:16 +0200 Subject: [PATCH 40/45] dmaengine: shdma: Enable driver compilation with COMPILE_TEST This helps increasing build testing coverage. Signed-off-by: Laurent Pinchart Acked-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index b4c813831006..0f719816c91b 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -4,7 +4,7 @@ config SH_DMAE_BASE bool "Renesas SuperH DMA Engine support" - depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) + depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST depends on !SH_DMA_API default y select DMA_ENGINE From 9d296ec6fd4051c806677d39b3ce11d48d3d90d5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 23 May 2014 10:34:35 +0200 Subject: [PATCH 41/45] of: dma: Grammar s/requests/request/, s/used required/required/ Signed-off-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/dma.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/dma.txt b/Documentation/devicetree/bindings/dma/dma.txt index 8f504e6bae14..82104271e754 100644 --- a/Documentation/devicetree/bindings/dma/dma.txt +++ b/Documentation/devicetree/bindings/dma/dma.txt @@ -14,7 +14,7 @@ Required property: Optional properties: - dma-channels: Number of DMA channels supported by the controller. -- dma-requests: Number of DMA requests signals supported by the +- dma-requests: Number of DMA request signals supported by the controller. Example: @@ -44,7 +44,7 @@ Required property: #dma-cells property in the node referenced by phandle containing DMA controller specific information. This typically contains a DMA request line number or a - channel number, but can contain any data that is used + channel number, but can contain any data that is required for configuring a channel. - dma-names: Contains one identifier string for each DMA specifier in the dmas property. The specific strings that can be used From 654fa24965aafcb4bb8a1a14400c5ecea3be76b6 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 26 May 2014 18:08:25 -0300 Subject: [PATCH 42/45] dmaengine: Kconfig: Update MXS_DMA help text to include MX6Q/MX6DL The APBX-DMA block is also found on MX6Q/MX6DL chips. Update the help text accordingly. Signed-off-by: Fabio Estevam Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9ad43abe26ff..049059f28065 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -269,7 +269,7 @@ config MXS_DMA select DMA_ENGINE help Support the MXS DMA engine. This engine including APBH-DMA - and APBX-DMA is integrated into Freescale i.MX23/28 chips. + and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips. config EP93XX_DMA bool "Cirrus Logic EP93xx DMA support" From a68765430772fac92ad4f0dd9e33d31b4276546e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 2 Jun 2014 09:22:03 +0530 Subject: [PATCH 43/45] dmaengine: sh: make shdma_prep_dma_cyclic static kbuild test robot reports that shdma_prep_dma_cyclic should be static, since symbol is not declared, quick check revails that is the case >> drivers/dma/sh/shdma-base.c:660:32: sparse: symbol 'shdma_prep_dma_cyclic' >> was not declared. Should it be static? Reported-by: kbuild test robot Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 974794cdb6ed..66b4a35f7bd1 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -657,7 +657,7 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( direction, flags, false); } -struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( +static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) From 9d9f71a804314e7d50e2fa9e6e61bc77e2d6ae1c Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 2 Jun 2014 09:32:59 +0530 Subject: [PATCH 44/45] dmaengine: sh: fix print specifier warnings As documented in Documentation/printk-formats.txt we should use %zu/%zx specifiers for size_t type variables for the code to compile on different architectures. This is uncovered as COMPILE_TEST has been enabled recently for this driver drivers/dma/sh/shdma-base.c: In function 'shdma_prep_dma_cyclic': >> drivers/dma/sh/shdma-base.c:683:4: warning: format '%d' expects argument of >> type 'int', but argument 4 has type 'size_t' [-Wformat=] __func__, buf_len, period_len, slave_id); >> drivers/dma/sh/shdma-base.c:683:4: warning: format '%d' expects argument of >> type 'int', but argument 5 has type 'size_t' [-Wformat=] Reported-by: kbuild test robot Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 66b4a35f7bd1..591b9d8cfd7f 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -679,7 +679,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( /* Someone calling slave DMA on a generic channel? */ if (slave_id < 0 || (buf_len < period_len)) { dev_warn(schan->dev, - "%s: bad parameter: buf_len=%d, period_len=%d, id=%d\n", + "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", __func__, buf_len, period_len, slave_id); return NULL; } From 877d8425072b50965f6f04ea3a127928f66db72f Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 2 Jun 2014 09:40:00 +0530 Subject: [PATCH 45/45] dmaengine: sh: don't use dynamic static allocation dynamic stack allocation in kernel is considered bad as kernel stack is low and we get warns on few archs as reported by kbuild test robot >> drivers/dma/sh/shdma-base.c:671:32: sparse: Variable length array is used. >> drivers/dma/sh/shdma-base.c:701:1: warning: 'shdma_prep_dma_cyclic' uses >> dynamic stack allocation [enabled by default] Fix this by making a static array of 32 which should be sufficient for shdma_prep_dma_cyclic which only user in kernel is audio and 32 periods for audio seems quite sufficient atm Reported-by: kbuild test robot Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 591b9d8cfd7f..b35007e21e6b 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -657,6 +657,8 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( direction, flags, false); } +#define SHDMA_MAX_SG_LEN 32 + static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, @@ -668,7 +670,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( unsigned int sg_len = buf_len / period_len; int slave_id = schan->slave_id; dma_addr_t slave_addr; - struct scatterlist sgl[sg_len]; + struct scatterlist sgl[SHDMA_MAX_SG_LEN]; int i; if (!chan) @@ -676,6 +678,12 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( BUG_ON(!schan->desc_num); + if (sg_len > SHDMA_MAX_SG_LEN) { + dev_err(schan->dev, "sg length %d exceds limit %d", + sg_len, SHDMA_MAX_SG_LEN); + return NULL; + } + /* Someone calling slave DMA on a generic channel? */ if (slave_id < 0 || (buf_len < period_len)) { dev_warn(schan->dev,