OpenCloudOS-Kernel/include/linux/platform_data/edma.h

85 lines
2.5 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* TI EDMA definitions
*
* Copyright (C) 2006-2013 Texas Instruments.
*/
/*
* This EDMA3 programming framework exposes two basic kinds of resource:
*
* Channel Triggers transfers, usually from a hardware event but
* also manually or by "chaining" from DMA completions.
* Each channel is coupled to a Parameter RAM (PaRAM) slot.
*
* Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM
* "set"), source and destination addresses, a link to a
* next PaRAM slot (if any), options for the transfer, and
* instructions for updating those addresses. There are
* more than twice as many slots as event channels.
*
* Each PaRAM set describes a sequence of transfers, either for one large
* buffer or for several discontiguous smaller buffers. An EDMA transfer
* is driven only from a channel, which performs the transfers specified
* in its PaRAM slot until there are no more transfers. When that last
* transfer completes, the "link" field may be used to reload the channel's
* PaRAM slot with a new transfer descriptor.
*
* The EDMA Channel Controller (CC) maps requests from channels into physical
* Transfer Controller (TC) requests when the channel triggers (by hardware
* or software events, or by chaining). The two physical DMA channels provided
* by the TCs are thus shared by many logical channels.
*
* DaVinci hardware also has a "QDMA" mechanism which is not currently
* supported through this interface. (DSP firmware uses it though.)
*/
#ifndef EDMA_H_
#define EDMA_H_
enum dma_event_q {
EVENTQ_0 = 0,
EVENTQ_1 = 1,
EVENTQ_2 = 2,
EVENTQ_3 = 3,
EVENTQ_DEFAULT = -1
};
#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan))
#define EDMA_CTLR(i) ((i) >> 16)
#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) })
struct edma_rsv_info {
const s16 (*rsv_chans)[2];
const s16 (*rsv_slots)[2];
};
struct dma_slave_map;
/* platform_data for EDMA driver */
struct edma_soc_info {
/*
* Default queue is expected to be a low-priority queue.
* This way, long transfers on the default queue started
* by the codec engine will not cause audio defects.
*/
enum dma_event_q default_queue;
/* Resource reservation for other cores */
struct edma_rsv_info *rsv;
dmaengine: edma: New device tree binding With the old binding and driver architecture we had many issues: No way to assign eDMA channels to event queues, thus not able to tune the system by moving specific DMA channels to low/high priority servicing. We moved the cyclic channels to high priority within the code, but that was just a workaround to this issue. Memcopy was fundamentally broken: even if the driver scanned the DT/devices in the booted system for direct DMA users (which is not effective when the events are going through a crossbar) and created a map of 'used' channels, this information was not really usable. Since via dmaengien API the eDMA driver will be called with _some_ channel number, we would try to request this channel when any channel is requested for memcpy. By luck we got channel which is not used by any device most of the time so things worked, but if a device would have been using the given channel, but not requested it, the memcpy channel would have been waiting for HW event. The old code had the am33xx/am43xx DMA event router handling embedded. This should have been done in a separate driver since it is not part of the actual eDMA IP. There were no way to 'lock' PaRAM slots to be used by the DSP for example when booting with DT. In DT boot the edma node used more than one hwmod which is not a good practice and the kernel prints warning because of this. With the new bindings and the changes in the driver we can: - No regression with Legacy binding and non DT boot - DMA channels can be assigned to any TC (to set priority) - PaRAM slots can be reserved for other cores to use - Dynamic power management for CC and TCs, if only TC0 is used all other TC can be powered down for example Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2015-10-16 15:18:10 +08:00
/* List of channels allocated for memcpy, terminated with -1 */
s32 *memcpy_channels;
dmaengine: edma: New device tree binding With the old binding and driver architecture we had many issues: No way to assign eDMA channels to event queues, thus not able to tune the system by moving specific DMA channels to low/high priority servicing. We moved the cyclic channels to high priority within the code, but that was just a workaround to this issue. Memcopy was fundamentally broken: even if the driver scanned the DT/devices in the booted system for direct DMA users (which is not effective when the events are going through a crossbar) and created a map of 'used' channels, this information was not really usable. Since via dmaengien API the eDMA driver will be called with _some_ channel number, we would try to request this channel when any channel is requested for memcpy. By luck we got channel which is not used by any device most of the time so things worked, but if a device would have been using the given channel, but not requested it, the memcpy channel would have been waiting for HW event. The old code had the am33xx/am43xx DMA event router handling embedded. This should have been done in a separate driver since it is not part of the actual eDMA IP. There were no way to 'lock' PaRAM slots to be used by the DSP for example when booting with DT. In DT boot the edma node used more than one hwmod which is not a good practice and the kernel prints warning because of this. With the new bindings and the changes in the driver we can: - No regression with Legacy binding and non DT boot - DMA channels can be assigned to any TC (to set priority) - PaRAM slots can be reserved for other cores to use - Dynamic power management for CC and TCs, if only TC0 is used all other TC can be powered down for example Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
2015-10-16 15:18:10 +08:00
s8 (*queue_priority_mapping)[2];
const s16 (*xbar_chans)[2];
const struct dma_slave_map *slave_map;
int slavecnt;
};
#endif