bus: mhi: core: Replace DMA allocation wrappers with original APIs
There is nothing special done within the mhi_alloc_coherent() and the mhi_free_coherent() wrapper functions. They only directly call the equivalent DMA allocation functions. Replace them with the original function calls such that the implementation is clear and direct. Link: https://lore.kernel.org/r/1624392428-9328-1-git-send-email-bbhatt@codeaurora.org Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Link: https://lore.kernel.org/r/20210802051255.5771-10-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
06e2c4a9ea
commit
2e36190de6
|
@ -302,8 +302,8 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
|
|||
struct mhi_buf *mhi_buf = image_info->mhi_buf;
|
||||
|
||||
for (i = 0; i < image_info->entries; i++, mhi_buf++)
|
||||
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
|
||||
mhi_buf->dma_addr);
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
|
||||
mhi_buf->buf, mhi_buf->dma_addr);
|
||||
|
||||
kfree(image_info->mhi_buf);
|
||||
kfree(image_info);
|
||||
|
@ -339,8 +339,8 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
|
|||
vec_size = sizeof(struct bhi_vec_entry) * i;
|
||||
|
||||
mhi_buf->len = vec_size;
|
||||
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
|
||||
&mhi_buf->dma_addr,
|
||||
mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
|
||||
vec_size, &mhi_buf->dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!mhi_buf->buf)
|
||||
goto error_alloc_segment;
|
||||
|
@ -354,8 +354,8 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
|
|||
|
||||
error_alloc_segment:
|
||||
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
|
||||
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
|
||||
mhi_buf->dma_addr);
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
|
||||
mhi_buf->buf, mhi_buf->dma_addr);
|
||||
|
||||
error_alloc_mhi_buf:
|
||||
kfree(img_info);
|
||||
|
@ -442,7 +442,8 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
|
|||
if (size > firmware->size)
|
||||
size = firmware->size;
|
||||
|
||||
buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
|
||||
buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!buf) {
|
||||
release_firmware(firmware);
|
||||
goto error_fw_load;
|
||||
|
@ -451,7 +452,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
|
|||
/* Download image using BHI */
|
||||
memcpy(buf, firmware->data, size);
|
||||
ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
|
||||
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
|
||||
|
||||
/* Error or in EDL mode, we're done */
|
||||
if (ret) {
|
||||
|
|
|
@ -129,7 +129,7 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
|
|||
u64 len)
|
||||
{
|
||||
ring->alloc_size = len + (len - 1);
|
||||
ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
|
||||
ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
|
||||
&ring->dma_handle, GFP_KERNEL);
|
||||
if (!ring->pre_aligned)
|
||||
return -ENOMEM;
|
||||
|
@ -221,13 +221,13 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
|||
mhi_cmd = mhi_cntrl->mhi_cmd;
|
||||
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
|
||||
ring = &mhi_cmd->ring;
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
ring->base = NULL;
|
||||
ring->iommu_base = 0;
|
||||
}
|
||||
|
||||
mhi_free_coherent(mhi_cntrl,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev,
|
||||
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
|
||||
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
|
||||
|
||||
|
@ -237,17 +237,17 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
|||
continue;
|
||||
|
||||
ring = &mhi_event->ring;
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
ring->base = NULL;
|
||||
ring->iommu_base = 0;
|
||||
}
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
|
||||
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
|
||||
mhi_ctxt->er_ctxt_addr);
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
|
||||
mhi_ctxt->chan_ctxt_addr);
|
||||
|
||||
|
@ -275,7 +275,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Setup channel ctxt */
|
||||
mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
|
||||
mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
|
||||
sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
mhi_cntrl->max_chan,
|
||||
&mhi_ctxt->chan_ctxt_addr,
|
||||
|
@ -307,7 +307,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
|||
}
|
||||
|
||||
/* Setup event context */
|
||||
mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
|
||||
mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
|
||||
sizeof(*mhi_ctxt->er_ctxt) *
|
||||
mhi_cntrl->total_ev_rings,
|
||||
&mhi_ctxt->er_ctxt_addr,
|
||||
|
@ -354,7 +354,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
/* Setup cmd context */
|
||||
ret = -ENOMEM;
|
||||
mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
|
||||
mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
|
||||
sizeof(*mhi_ctxt->cmd_ctxt) *
|
||||
NR_OF_CMD_RINGS,
|
||||
&mhi_ctxt->cmd_ctxt_addr,
|
||||
|
@ -389,10 +389,10 @@ error_alloc_cmd:
|
|||
for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
|
||||
struct mhi_ring *ring = &mhi_cmd->ring;
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
}
|
||||
mhi_free_coherent(mhi_cntrl,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev,
|
||||
sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
|
||||
mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
|
||||
i = mhi_cntrl->total_ev_rings;
|
||||
|
@ -405,15 +405,15 @@ error_alloc_er:
|
|||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, ring->alloc_size,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
|
||||
ring->pre_aligned, ring->dma_handle);
|
||||
}
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
|
||||
mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
|
||||
mhi_ctxt->er_ctxt_addr);
|
||||
|
||||
error_alloc_er_ctxt:
|
||||
mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
|
||||
mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
|
||||
mhi_ctxt->chan_ctxt_addr);
|
||||
|
||||
|
@ -567,7 +567,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
|||
if (!chan_ctxt->rbase) /* Already uninitialized */
|
||||
return;
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
|
||||
tre_ring->pre_aligned, tre_ring->dma_handle);
|
||||
vfree(buf_ring->base);
|
||||
|
||||
|
@ -610,7 +610,7 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
|||
buf_ring->base = vzalloc(buf_ring->len);
|
||||
|
||||
if (!buf_ring->base) {
|
||||
mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
|
||||
tre_ring->pre_aligned, tre_ring->dma_handle);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -690,26 +690,6 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
|||
void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_chan *mhi_chan);
|
||||
|
||||
/* Memory allocation methods */
|
||||
static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
|
||||
size_t size,
|
||||
dma_addr_t *dma_handle,
|
||||
gfp_t gfp)
|
||||
{
|
||||
void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
|
||||
gfp);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
|
||||
size_t size,
|
||||
void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
/* Event processing methods */
|
||||
void mhi_ctrl_ev_task(unsigned long data);
|
||||
void mhi_ev_task(unsigned long data);
|
||||
|
|
|
@ -193,7 +193,7 @@ int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
|
|||
int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_buf_info *buf_info)
|
||||
{
|
||||
void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
|
||||
void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
|
||||
&buf_info->p_addr, GFP_ATOMIC);
|
||||
|
||||
if (!buf)
|
||||
|
@ -220,8 +220,8 @@ void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
|
|||
if (buf_info->dir == DMA_FROM_DEVICE)
|
||||
memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
|
||||
|
||||
mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
|
||||
buf_info->p_addr);
|
||||
dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
|
||||
buf_info->bb_addr, buf_info->p_addr);
|
||||
}
|
||||
|
||||
static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
|
||||
|
|
Loading…
Reference in New Issue