bus: mhi: ep: Add support for async DMA read operation

[ Upstream commit 2547beb00ddb40e55b773970622421d978f71473 ]

As like the async DMA write operation, let's add support for async DMA read
operation. In the async path, the data will be read from the transfer ring
continuously and when the controller driver notifies the stack using the
completion callback (mhi_ep_read_completion), then the client driver will
be notified with the read data and the completion event will be sent to the
host for the respective ring element (if requested by the host).

Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Stable-dep-of: c7d0b2db5bc5 ("bus: mhi: ep: Do not allocate memory for MHI objects from DMA zone")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Manivannan Sadhasivam 2023-08-21 16:53:24 +05:30 committed by Greg Kroah-Hartman
parent b6af3a9541
commit 39601f49c9
1 changed files with 89 additions and 73 deletions

View File

@ -318,17 +318,81 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
{
struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
struct mhi_result result = {};
int ret;
if (mhi_chan->xfer_cb) {
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
result.bytes_xferd = buf_info->size;
mhi_chan->xfer_cb(mhi_dev, &result);
}
/*
* The host will split the data packet into multiple TREs if it can't fit
* the packet in a single TRE. In that case, CHAIN flag will be set by the
* host for all TREs except the last one.
*/
if (buf_info->code != MHI_EV_CC_OVERFLOW) {
if (MHI_TRE_DATA_GET_CHAIN(el)) {
/*
* IEOB (Interrupt on End of Block) flag will be set by the host if
* it expects the completion event for all TREs of a TD.
*/
if (MHI_TRE_DATA_GET_IEOB(el)) {
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
MHI_TRE_DATA_GET_LEN(el),
MHI_EV_CC_EOB);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev,
"Error sending transfer compl. event\n");
goto err_free_tre_buf;
}
}
} else {
/*
* IEOT (Interrupt on End of Transfer) flag will be set by the host
* for the last TRE of the TD and expects the completion event for
* the same.
*/
if (MHI_TRE_DATA_GET_IEOT(el)) {
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
MHI_TRE_DATA_GET_LEN(el),
MHI_EV_CC_EOT);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev,
"Error sending transfer compl. event\n");
goto err_free_tre_buf;
}
}
}
}
mhi_ep_ring_inc_index(ring);
err_free_tre_buf:
kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
}
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
struct mhi_ep_ring *ring,
struct mhi_result *result,
u32 len)
struct mhi_ep_ring *ring)
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
struct mhi_ep_buf_info buf_info = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
bool tr_done = false;
void *buf_addr;
u32 buf_left;
int ret;
@ -358,83 +422,50 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
if (!buf_addr)
return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
buf_info.dev_addr = result->buf_addr + write_offset;
buf_info.dev_addr = buf_addr + write_offset;
buf_info.size = tr_len;
buf_info.cb = mhi_ep_read_completion;
buf_info.cb_buf = buf_addr;
buf_info.mhi_dev = mhi_chan->mhi_dev;
if (mhi_chan->tre_bytes_left - tr_len)
buf_info.code = MHI_EV_CC_OVERFLOW;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
return ret;
goto err_free_buf_addr;
}
buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
/*
* Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
* read completely:
*
* 1. Send completion event to the host based on the flags set in TRE.
* 2. Increment the local read offset of the transfer ring.
*/
if (!mhi_chan->tre_bytes_left) {
/*
* The host will split the data packet into multiple TREs if it can't fit
* the packet in a single TRE. In that case, CHAIN flag will be set by the
* host for all TREs except the last one.
*/
if (MHI_TRE_DATA_GET_CHAIN(el)) {
/*
* IEOB (Interrupt on End of Block) flag will be set by the host if
* it expects the completion event for all TREs of a TD.
*/
if (MHI_TRE_DATA_GET_IEOB(el)) {
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
MHI_TRE_DATA_GET_LEN(el),
MHI_EV_CC_EOB);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev,
"Error sending transfer compl. event\n");
return ret;
}
}
} else {
/*
* IEOT (Interrupt on End of Transfer) flag will be set by the host
* for the last TRE of the TD and expects the completion event for
* the same.
*/
if (MHI_TRE_DATA_GET_IEOT(el)) {
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
MHI_TRE_DATA_GET_LEN(el),
MHI_EV_CC_EOT);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev,
"Error sending transfer compl. event\n");
return ret;
}
}
if (MHI_TRE_DATA_GET_IEOT(el))
tr_done = true;
}
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
mhi_ep_ring_inc_index(ring);
}
result->bytes_xferd += tr_len;
} while (buf_left && !tr_done);
return 0;
err_free_buf_addr:
kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
return ret;
}
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct mhi_result result = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ep_chan *mhi_chan;
int ret;
@ -455,27 +486,15 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
if (!result.buf_addr)
return -ENOMEM;
do {
ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
ret = mhi_ep_read_channel(mhi_cntrl, ring);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
return ret;
}
result.dir = mhi_chan->dir;
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
result.bytes_xferd = 0;
memset(result.buf_addr, 0, len);
/* Read until the ring becomes empty */
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
}
return 0;
@ -781,7 +800,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_ring_item *itr, *tmp;
struct mhi_ring_element *el;
struct mhi_ep_ring *ring;
struct mhi_ep_chan *chan;
unsigned long flags;
@ -826,10 +844,8 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
continue;
}
el = &ring->ring_cache[ring->rd_offset];
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
ret = mhi_ep_process_ch_ring(ring, el);
ret = mhi_ep_process_ch_ring(ring);
if (ret) {
dev_err(dev, "Error processing ring for channel (%u): %d\n",
ring->ch_id, ret);