rapidio: use a reference count for struct mport_dma_req
Once the dma request is passed to the DMA engine, the DMA subsystem would hold a pointer to this structure and could call the completion callback after do_dma_request() has timed out. The current code deals with this by putting timed out SYNC requests to a pending list and freeing them later, when the mport cdev device is released. This still does not guarantee that the DMA subsystem is really done with those transfers, so in theory dma_xfer_callback/dma_req_free could be called after mport_cdev_release_dma and could potentially access already freed memory. This patch simplifies the current handling by using a kref in the mport dma request structure, so that it gets freed only when nobody uses it anymore. This also simplifies the code a bit, as FAF transfers are now handled in the same way as SYNC and ASYNC transfers. There is no need anymore for the pending list and for the dma workqueue which was used in case of FAF transfers, so we remove them both. Link: http://lkml.kernel.org/r/20180405203342.GA16191@nokia.com Signed-off-by: Ioan Nicu <ioan.nicu.ext@nokia.com> Acked-by: Alexandre Bounine <alex.bou9@gmail.com> Cc: Barry Wood <barry.wood@idt.com> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Christophe JAILLET <christophe.jaillet@wanadoo.fr> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Frank Kunz <frank.kunz@nokia.com> Cc: Alexander Sverdlin <alexander.sverdlin@nokia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b94bb1f610
commit
bbd876adb8
|
@ -212,7 +212,6 @@ struct mport_cdev_priv {
|
||||||
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
||||||
struct dma_chan *dmach;
|
struct dma_chan *dmach;
|
||||||
struct list_head async_list;
|
struct list_head async_list;
|
||||||
struct list_head pend_list;
|
|
||||||
spinlock_t req_lock;
|
spinlock_t req_lock;
|
||||||
struct mutex dma_lock;
|
struct mutex dma_lock;
|
||||||
struct kref dma_ref;
|
struct kref dma_ref;
|
||||||
|
@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
|
||||||
static struct class *dev_class;
|
static struct class *dev_class;
|
||||||
static dev_t dev_number;
|
static dev_t dev_number;
|
||||||
|
|
||||||
static struct workqueue_struct *dma_wq;
|
|
||||||
|
|
||||||
static void mport_release_mapping(struct kref *ref);
|
static void mport_release_mapping(struct kref *ref);
|
||||||
|
|
||||||
static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
|
static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
|
||||||
|
@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
|
||||||
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
||||||
|
|
||||||
struct mport_dma_req {
|
struct mport_dma_req {
|
||||||
|
struct kref refcount;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct file *filp;
|
struct file *filp;
|
||||||
struct mport_cdev_priv *priv;
|
struct mport_cdev_priv *priv;
|
||||||
|
@ -554,11 +552,6 @@ struct mport_dma_req {
|
||||||
struct completion req_comp;
|
struct completion req_comp;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mport_faf_work {
|
|
||||||
struct work_struct work;
|
|
||||||
struct mport_dma_req *req;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void mport_release_def_dma(struct kref *dma_ref)
|
static void mport_release_def_dma(struct kref *dma_ref)
|
||||||
{
|
{
|
||||||
struct mport_dev *md =
|
struct mport_dev *md =
|
||||||
|
@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
|
||||||
complete(&priv->comp);
|
complete(&priv->comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_req_free(struct mport_dma_req *req)
|
static void dma_req_free(struct kref *ref)
|
||||||
{
|
{
|
||||||
|
struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
|
||||||
|
refcount);
|
||||||
struct mport_cdev_priv *priv = req->priv;
|
struct mport_cdev_priv *priv = req->priv;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
|
||||||
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
|
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
complete(&req->req_comp);
|
complete(&req->req_comp);
|
||||||
}
|
kref_put(&req->refcount, dma_req_free);
|
||||||
|
|
||||||
static void dma_faf_cleanup(struct work_struct *_work)
|
|
||||||
{
|
|
||||||
struct mport_faf_work *work = container_of(_work,
|
|
||||||
struct mport_faf_work, work);
|
|
||||||
struct mport_dma_req *req = work->req;
|
|
||||||
|
|
||||||
dma_req_free(req);
|
|
||||||
kfree(work);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dma_faf_callback(void *param)
|
|
||||||
{
|
|
||||||
struct mport_dma_req *req = (struct mport_dma_req *)param;
|
|
||||||
struct mport_faf_work *work;
|
|
||||||
|
|
||||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
|
||||||
if (!work)
|
|
||||||
return;
|
|
||||||
|
|
||||||
INIT_WORK(&work->work, dma_faf_cleanup);
|
|
||||||
work->req = req;
|
|
||||||
queue_work(dma_wq, &work->work);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sync == RIO_TRANSFER_FAF)
|
tx->callback = dma_xfer_callback;
|
||||||
tx->callback = dma_faf_callback;
|
|
||||||
else
|
|
||||||
tx->callback = dma_xfer_callback;
|
|
||||||
tx->callback_param = req;
|
tx->callback_param = req;
|
||||||
|
|
||||||
req->dmach = chan;
|
req->dmach = chan;
|
||||||
req->sync = sync;
|
req->sync = sync;
|
||||||
req->status = DMA_IN_PROGRESS;
|
req->status = DMA_IN_PROGRESS;
|
||||||
init_completion(&req->req_comp);
|
init_completion(&req->req_comp);
|
||||||
|
kref_get(&req->refcount);
|
||||||
|
|
||||||
cookie = dmaengine_submit(tx);
|
cookie = dmaengine_submit(tx);
|
||||||
req->cookie = cookie;
|
req->cookie = cookie;
|
||||||
|
@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
|
||||||
if (dma_submit_error(cookie)) {
|
if (dma_submit_error(cookie)) {
|
||||||
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
|
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
|
||||||
cookie, xfer->rio_addr, xfer->length);
|
cookie, xfer->rio_addr, xfer->length);
|
||||||
|
kref_put(&req->refcount, dma_req_free);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
||||||
if (!req)
|
if (!req)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
kref_init(&req->refcount);
|
||||||
|
|
||||||
ret = get_dma_channel(priv);
|
ret = get_dma_channel(priv);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(req);
|
kfree(req);
|
||||||
|
@ -968,42 +941,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
||||||
ret = do_dma_request(req, xfer, sync, nents);
|
ret = do_dma_request(req, xfer, sync, nents);
|
||||||
|
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
if (sync == RIO_TRANSFER_SYNC)
|
if (sync == RIO_TRANSFER_ASYNC)
|
||||||
goto sync_out;
|
return ret; /* return ASYNC cookie */
|
||||||
return ret; /* return ASYNC cookie */
|
} else {
|
||||||
|
rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == -ETIMEDOUT || ret == -EINTR) {
|
|
||||||
/*
|
|
||||||
* This can happen only in case of SYNC transfer.
|
|
||||||
* Do not free unfinished request structure immediately.
|
|
||||||
* Place it into pending list and deal with it later
|
|
||||||
*/
|
|
||||||
spin_lock(&priv->req_lock);
|
|
||||||
list_add_tail(&req->node, &priv->pend_list);
|
|
||||||
spin_unlock(&priv->req_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
|
|
||||||
sync_out:
|
|
||||||
dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
|
|
||||||
sg_free_table(&req->sgt);
|
|
||||||
err_pg:
|
err_pg:
|
||||||
if (page_list) {
|
if (!req->page_list) {
|
||||||
for (i = 0; i < nr_pages; i++)
|
for (i = 0; i < nr_pages; i++)
|
||||||
put_page(page_list[i]);
|
put_page(page_list[i]);
|
||||||
kfree(page_list);
|
kfree(page_list);
|
||||||
}
|
}
|
||||||
err_req:
|
err_req:
|
||||||
if (req->map) {
|
kref_put(&req->refcount, dma_req_free);
|
||||||
mutex_lock(&md->buf_mutex);
|
|
||||||
kref_put(&req->map->ref, mport_release_mapping);
|
|
||||||
mutex_unlock(&md->buf_mutex);
|
|
||||||
}
|
|
||||||
put_dma_channel(priv);
|
|
||||||
kfree(req);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1121,7 +1072,7 @@ static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
|
if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
|
||||||
dma_req_free(req);
|
kref_put(&req->refcount, dma_req_free);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1966,7 +1917,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
|
||||||
|
|
||||||
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
||||||
INIT_LIST_HEAD(&priv->async_list);
|
INIT_LIST_HEAD(&priv->async_list);
|
||||||
INIT_LIST_HEAD(&priv->pend_list);
|
|
||||||
spin_lock_init(&priv->req_lock);
|
spin_lock_init(&priv->req_lock);
|
||||||
mutex_init(&priv->dma_lock);
|
mutex_init(&priv->dma_lock);
|
||||||
#endif
|
#endif
|
||||||
|
@ -2006,8 +1956,6 @@ static void mport_cdev_release_dma(struct file *filp)
|
||||||
|
|
||||||
md = priv->md;
|
md = priv->md;
|
||||||
|
|
||||||
flush_workqueue(dma_wq);
|
|
||||||
|
|
||||||
spin_lock(&priv->req_lock);
|
spin_lock(&priv->req_lock);
|
||||||
if (!list_empty(&priv->async_list)) {
|
if (!list_empty(&priv->async_list)) {
|
||||||
rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
|
rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
|
||||||
|
@ -2023,20 +1971,7 @@ static void mport_cdev_release_dma(struct file *filp)
|
||||||
req->filp, req->cookie,
|
req->filp, req->cookie,
|
||||||
completion_done(&req->req_comp)?"yes":"no");
|
completion_done(&req->req_comp)?"yes":"no");
|
||||||
list_del(&req->node);
|
list_del(&req->node);
|
||||||
dma_req_free(req);
|
kref_put(&req->refcount, dma_req_free);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!list_empty(&priv->pend_list)) {
|
|
||||||
rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
|
|
||||||
filp, current->comm, task_pid_nr(current));
|
|
||||||
list_for_each_entry_safe(req,
|
|
||||||
req_next, &priv->pend_list, node) {
|
|
||||||
rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
|
|
||||||
req->filp, req->cookie,
|
|
||||||
completion_done(&req->req_comp)?"yes":"no");
|
|
||||||
list_del(&req->node);
|
|
||||||
dma_req_free(req);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2048,15 +1983,6 @@ static void mport_cdev_release_dma(struct file *filp)
|
||||||
current->comm, task_pid_nr(current), wret);
|
current->comm, task_pid_nr(current), wret);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&priv->req_lock);
|
|
||||||
|
|
||||||
if (!list_empty(&priv->pend_list)) {
|
|
||||||
rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
|
|
||||||
filp, current->comm, task_pid_nr(current));
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&priv->req_lock);
|
|
||||||
|
|
||||||
if (priv->dmach != priv->md->dma_chan) {
|
if (priv->dmach != priv->md->dma_chan) {
|
||||||
rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
|
rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
|
||||||
filp, current->comm, task_pid_nr(current));
|
filp, current->comm, task_pid_nr(current));
|
||||||
|
@ -2573,8 +2499,6 @@ static void mport_cdev_remove(struct mport_dev *md)
|
||||||
cdev_device_del(&md->cdev, &md->dev);
|
cdev_device_del(&md->cdev, &md->dev);
|
||||||
mport_cdev_kill_fasync(md);
|
mport_cdev_kill_fasync(md);
|
||||||
|
|
||||||
flush_workqueue(dma_wq);
|
|
||||||
|
|
||||||
/* TODO: do we need to give clients some time to close file
|
/* TODO: do we need to give clients some time to close file
|
||||||
* descriptors? Simple wait for XX, or kref?
|
* descriptors? Simple wait for XX, or kref?
|
||||||
*/
|
*/
|
||||||
|
@ -2691,17 +2615,8 @@ static int __init mport_init(void)
|
||||||
goto err_cli;
|
goto err_cli;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_wq = create_singlethread_workqueue("dma_wq");
|
|
||||||
if (!dma_wq) {
|
|
||||||
rmcd_error("failed to create DMA work queue");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_wq;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_wq:
|
|
||||||
class_interface_unregister(&rio_mport_interface);
|
|
||||||
err_cli:
|
err_cli:
|
||||||
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
|
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
|
||||||
err_chr:
|
err_chr:
|
||||||
|
@ -2717,7 +2632,6 @@ static void __exit mport_exit(void)
|
||||||
class_interface_unregister(&rio_mport_interface);
|
class_interface_unregister(&rio_mport_interface);
|
||||||
class_destroy(dev_class);
|
class_destroy(dev_class);
|
||||||
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
|
unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
|
||||||
destroy_workqueue(dma_wq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(mport_init);
|
module_init(mport_init);
|
||||||
|
|
Loading…
Reference in New Issue