Merge branch 'topic/qcom' into for-linus
This commit is contained in:
commit
346ea25e81
|
@ -395,6 +395,13 @@ where to put them)
|
||||||
when DMA_CTRL_REUSE is already set
|
when DMA_CTRL_REUSE is already set
|
||||||
- Terminating the channel
|
- Terminating the channel
|
||||||
|
|
||||||
|
* DMA_PREP_CMD
|
||||||
|
- If set, the client driver tells DMA controller that passed data in DMA
|
||||||
|
API is command data.
|
||||||
|
- Interpretation of command data is DMA controller specific. It can be
|
||||||
|
used for issuing commands to other peripherals/register reads/register
|
||||||
|
writes for which the descriptor should be in different format from
|
||||||
|
normal data descriptors.
|
||||||
|
|
||||||
General Design Notes
|
General Design Notes
|
||||||
--------------------
|
--------------------
|
||||||
|
|
|
@ -65,6 +65,7 @@ struct bam_desc_hw {
|
||||||
#define DESC_FLAG_EOT BIT(14)
|
#define DESC_FLAG_EOT BIT(14)
|
||||||
#define DESC_FLAG_EOB BIT(13)
|
#define DESC_FLAG_EOB BIT(13)
|
||||||
#define DESC_FLAG_NWD BIT(12)
|
#define DESC_FLAG_NWD BIT(12)
|
||||||
|
#define DESC_FLAG_CMD BIT(11)
|
||||||
|
|
||||||
struct bam_async_desc {
|
struct bam_async_desc {
|
||||||
struct virt_dma_desc vd;
|
struct virt_dma_desc vd;
|
||||||
|
@ -645,6 +646,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
|
||||||
unsigned int curr_offset = 0;
|
unsigned int curr_offset = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
if (flags & DMA_PREP_CMD)
|
||||||
|
desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
|
||||||
|
|
||||||
desc->addr = cpu_to_le32(sg_dma_address(sg) +
|
desc->addr = cpu_to_le32(sg_dma_address(sg) +
|
||||||
curr_offset);
|
curr_offset);
|
||||||
|
|
||||||
|
@ -960,7 +964,7 @@ static void bam_start_dma(struct bam_chan *bchan)
|
||||||
|
|
||||||
/* set any special flags on the last descriptor */
|
/* set any special flags on the last descriptor */
|
||||||
if (async_desc->num_desc == async_desc->xfer_len)
|
if (async_desc->num_desc == async_desc->xfer_len)
|
||||||
desc[async_desc->xfer_len - 1].flags =
|
desc[async_desc->xfer_len - 1].flags |=
|
||||||
cpu_to_le16(async_desc->flags);
|
cpu_to_le16(async_desc->flags);
|
||||||
else
|
else
|
||||||
desc[async_desc->xfer_len - 1].flags |=
|
desc[async_desc->xfer_len - 1].flags |=
|
||||||
|
|
|
@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
|
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
|
||||||
src, dest, len, flags);
|
src, dest, len, flags,
|
||||||
|
HIDMA_TRE_MEMCPY);
|
||||||
|
|
||||||
|
/* Place descriptor in prepared list */
|
||||||
|
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||||
|
list_add_tail(&mdesc->node, &mchan->prepared);
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||||
|
|
||||||
|
return &mdesc->desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct dma_async_tx_descriptor *
|
||||||
|
hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
|
||||||
|
size_t len, unsigned long flags)
|
||||||
|
{
|
||||||
|
struct hidma_chan *mchan = to_hidma_chan(dmach);
|
||||||
|
struct hidma_desc *mdesc = NULL;
|
||||||
|
struct hidma_dev *mdma = mchan->dmadev;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
/* Get free descriptor */
|
||||||
|
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||||
|
if (!list_empty(&mchan->free)) {
|
||||||
|
mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
|
||||||
|
list_del(&mdesc->node);
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||||
|
|
||||||
|
if (!mdesc)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
|
||||||
|
value, dest, len, flags,
|
||||||
|
HIDMA_TRE_MEMSET);
|
||||||
|
|
||||||
/* Place descriptor in prepared list */
|
/* Place descriptor in prepared list */
|
||||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||||
|
@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev)
|
||||||
pm_runtime_get_sync(dmadev->ddev.dev);
|
pm_runtime_get_sync(dmadev->ddev.dev);
|
||||||
|
|
||||||
dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
|
dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
|
||||||
|
dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
|
||||||
if (WARN_ON(!pdev->dev.dma_mask)) {
|
if (WARN_ON(!pdev->dev.dma_mask)) {
|
||||||
rc = -ENXIO;
|
rc = -ENXIO;
|
||||||
goto dmafree;
|
goto dmafree;
|
||||||
|
@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev)
|
||||||
dmadev->dev_trca = trca;
|
dmadev->dev_trca = trca;
|
||||||
dmadev->trca_resource = trca_resource;
|
dmadev->trca_resource = trca_resource;
|
||||||
dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
|
dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
|
||||||
|
dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
|
||||||
dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
|
dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
|
||||||
dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
|
dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
|
||||||
dmadev->ddev.device_tx_status = hidma_tx_status;
|
dmadev->ddev.device_tx_status = hidma_tx_status;
|
||||||
|
|
|
@ -28,6 +28,11 @@
|
||||||
#define HIDMA_TRE_DEST_LOW_IDX 4
|
#define HIDMA_TRE_DEST_LOW_IDX 4
|
||||||
#define HIDMA_TRE_DEST_HI_IDX 5
|
#define HIDMA_TRE_DEST_HI_IDX 5
|
||||||
|
|
||||||
|
enum tre_type {
|
||||||
|
HIDMA_TRE_MEMCPY = 3,
|
||||||
|
HIDMA_TRE_MEMSET = 4,
|
||||||
|
};
|
||||||
|
|
||||||
struct hidma_tre {
|
struct hidma_tre {
|
||||||
atomic_t allocated; /* if this channel is allocated */
|
atomic_t allocated; /* if this channel is allocated */
|
||||||
bool queued; /* flag whether this is pending */
|
bool queued; /* flag whether this is pending */
|
||||||
|
@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl);
|
||||||
int hidma_ll_disable(struct hidma_lldev *lldev);
|
int hidma_ll_disable(struct hidma_lldev *lldev);
|
||||||
int hidma_ll_enable(struct hidma_lldev *llhndl);
|
int hidma_ll_enable(struct hidma_lldev *llhndl);
|
||||||
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
|
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
|
||||||
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
|
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
|
||||||
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
|
void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
|
||||||
int hidma_ll_setup(struct hidma_lldev *lldev);
|
int hidma_ll_setup(struct hidma_lldev *lldev);
|
||||||
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
|
struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
|
||||||
|
|
|
@ -105,10 +105,6 @@ enum ch_state {
|
||||||
HIDMA_CH_STOPPED = 4,
|
HIDMA_CH_STOPPED = 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum tre_type {
|
|
||||||
HIDMA_TRE_MEMCPY = 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum err_code {
|
enum err_code {
|
||||||
HIDMA_EVRE_STATUS_COMPLETE = 1,
|
HIDMA_EVRE_STATUS_COMPLETE = 1,
|
||||||
HIDMA_EVRE_STATUS_ERROR = 4,
|
HIDMA_EVRE_STATUS_ERROR = 4,
|
||||||
|
@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
|
||||||
tre->err_info = 0;
|
tre->err_info = 0;
|
||||||
tre->lldev = lldev;
|
tre->lldev = lldev;
|
||||||
tre_local = &tre->tre_local[0];
|
tre_local = &tre->tre_local[0];
|
||||||
tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
|
tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
|
||||||
tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
|
|
||||||
tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
|
tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
|
||||||
*tre_ch = i;
|
*tre_ch = i;
|
||||||
if (callback)
|
if (callback)
|
||||||
|
@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
|
||||||
|
|
||||||
void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
|
void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
|
||||||
dma_addr_t src, dma_addr_t dest, u32 len,
|
dma_addr_t src, dma_addr_t dest, u32 len,
|
||||||
u32 flags)
|
u32 flags, u32 txntype)
|
||||||
{
|
{
|
||||||
struct hidma_tre *tre;
|
struct hidma_tre *tre;
|
||||||
u32 *tre_local;
|
u32 *tre_local;
|
||||||
|
@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
|
||||||
}
|
}
|
||||||
|
|
||||||
tre_local = &tre->tre_local[0];
|
tre_local = &tre->tre_local[0];
|
||||||
|
tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
|
||||||
|
tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
|
||||||
tre_local[HIDMA_TRE_LEN_IDX] = len;
|
tre_local[HIDMA_TRE_LEN_IDX] = len;
|
||||||
tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
|
tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
|
||||||
tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
|
tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
|
|
||||||
#include "hidma_mgmt.h"
|
#include "hidma_mgmt.h"
|
||||||
|
|
||||||
#define HIDMA_QOS_N_OFFSET 0x300
|
#define HIDMA_QOS_N_OFFSET 0x700
|
||||||
#define HIDMA_CFG_OFFSET 0x400
|
#define HIDMA_CFG_OFFSET 0x400
|
||||||
#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
|
#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
|
||||||
#define HIDMA_MAX_XACTIONS_OFFSET 0x420
|
#define HIDMA_MAX_XACTIONS_OFFSET 0x420
|
||||||
|
@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (max_write_request) {
|
if (max_write_request &&
|
||||||
|
(max_write_request != mgmtdev->max_write_request)) {
|
||||||
dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
|
dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
|
||||||
max_write_request);
|
max_write_request);
|
||||||
mgmtdev->max_write_request = max_write_request;
|
mgmtdev->max_write_request = max_write_request;
|
||||||
|
@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
||||||
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
|
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (max_read_request) {
|
if (max_read_request &&
|
||||||
|
(max_read_request != mgmtdev->max_read_request)) {
|
||||||
dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
|
dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
|
||||||
max_read_request);
|
max_read_request);
|
||||||
mgmtdev->max_read_request = max_read_request;
|
mgmtdev->max_read_request = max_read_request;
|
||||||
|
@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
||||||
dev_err(&pdev->dev, "max-write-transactions missing\n");
|
dev_err(&pdev->dev, "max-write-transactions missing\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (max_wr_xactions) {
|
if (max_wr_xactions &&
|
||||||
|
(max_wr_xactions != mgmtdev->max_wr_xactions)) {
|
||||||
dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
|
dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
|
||||||
max_wr_xactions);
|
max_wr_xactions);
|
||||||
mgmtdev->max_wr_xactions = max_wr_xactions;
|
mgmtdev->max_wr_xactions = max_wr_xactions;
|
||||||
|
@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
||||||
dev_err(&pdev->dev, "max-read-transactions missing\n");
|
dev_err(&pdev->dev, "max-read-transactions missing\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (max_rd_xactions) {
|
if (max_rd_xactions &&
|
||||||
|
(max_rd_xactions != mgmtdev->max_rd_xactions)) {
|
||||||
dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
|
dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
|
||||||
max_rd_xactions);
|
max_rd_xactions);
|
||||||
mgmtdev->max_rd_xactions = max_rd_xactions;
|
mgmtdev->max_rd_xactions = max_rd_xactions;
|
||||||
|
@ -354,7 +358,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
|
||||||
struct platform_device_info pdevinfo;
|
struct platform_device_info pdevinfo;
|
||||||
struct of_phandle_args out_irq;
|
struct of_phandle_args out_irq;
|
||||||
struct device_node *child;
|
struct device_node *child;
|
||||||
struct resource *res;
|
struct resource *res = NULL;
|
||||||
const __be32 *cell;
|
const __be32 *cell;
|
||||||
int ret = 0, size, i, num;
|
int ret = 0, size, i, num;
|
||||||
u64 addr, addr_size;
|
u64 addr, addr_size;
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
|
||||||
|
*
|
||||||
|
* This software is licensed under the terms of the GNU General Public
|
||||||
|
* License version 2, as published by the Free Software Foundation, and
|
||||||
|
* may be copied, distributed, and modified under those terms.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _QCOM_BAM_DMA_H
|
||||||
|
#define _QCOM_BAM_DMA_H
|
||||||
|
|
||||||
|
#include <asm/byteorder.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This data type corresponds to the native Command Element
|
||||||
|
* supported by BAM DMA Engine.
|
||||||
|
*
|
||||||
|
* @cmd_and_addr - upper 8 bits command and lower 24 bits register address.
|
||||||
|
* @data - for write command: content to be written into peripheral register.
|
||||||
|
* for read command: dest addr to write peripheral register value.
|
||||||
|
* @mask - register mask.
|
||||||
|
* @reserved - for future usage.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct bam_cmd_element {
|
||||||
|
__le32 cmd_and_addr;
|
||||||
|
__le32 data;
|
||||||
|
__le32 mask;
|
||||||
|
__le32 reserved;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This enum indicates the command type in a command element
|
||||||
|
*/
|
||||||
|
enum bam_command_type {
|
||||||
|
BAM_WRITE_COMMAND = 0,
|
||||||
|
BAM_READ_COMMAND,
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
|
||||||
|
* element with the data already in le32 format.
|
||||||
|
*
|
||||||
|
* @bam_ce: bam command element
|
||||||
|
* @addr: target address
|
||||||
|
* @cmd: BAM command
|
||||||
|
* @data: actual data for write and dest addr for read in le32
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr,
|
||||||
|
enum bam_command_type cmd, __le32 data)
|
||||||
|
{
|
||||||
|
bam_ce->cmd_and_addr =
|
||||||
|
cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24));
|
||||||
|
bam_ce->data = data;
|
||||||
|
bam_ce->mask = cpu_to_le32(0xffffffff);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bam_prep_ce - Wrapper function to prepare a single BAM command element
|
||||||
|
* with the data.
|
||||||
|
*
|
||||||
|
* @bam_ce: BAM command element
|
||||||
|
* @addr: target address
|
||||||
|
* @cmd: BAM command
|
||||||
|
* @data: actual data for write and dest addr for read
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr,
|
||||||
|
enum bam_command_type cmd, u32 data)
|
||||||
|
{
|
||||||
|
bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data));
|
||||||
|
}
|
||||||
|
#endif
|
|
@ -186,6 +186,9 @@ struct dma_interleaved_template {
|
||||||
* on the result of this operation
|
* on the result of this operation
|
||||||
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
|
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
|
||||||
* cleared or freed
|
* cleared or freed
|
||||||
|
* @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
|
||||||
|
* data and the descriptor should be in different format from normal
|
||||||
|
* data descriptors.
|
||||||
*/
|
*/
|
||||||
enum dma_ctrl_flags {
|
enum dma_ctrl_flags {
|
||||||
DMA_PREP_INTERRUPT = (1 << 0),
|
DMA_PREP_INTERRUPT = (1 << 0),
|
||||||
|
@ -195,6 +198,7 @@ enum dma_ctrl_flags {
|
||||||
DMA_PREP_CONTINUE = (1 << 4),
|
DMA_PREP_CONTINUE = (1 << 4),
|
||||||
DMA_PREP_FENCE = (1 << 5),
|
DMA_PREP_FENCE = (1 << 5),
|
||||||
DMA_CTRL_REUSE = (1 << 6),
|
DMA_CTRL_REUSE = (1 << 6),
|
||||||
|
DMA_PREP_CMD = (1 << 7),
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue