[media] coda/imx-vdoa: always wait for job completion

As long as only one CODA context is running we get alternating device_run()
and wait_for_completion() calls, but when more then one CODA context is
active, other VDOA slots can be inserted between those calls for one context.

Make sure to wait on job completion before running a different context and
before destroying the currently active context.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Signed-off-by: Hans Verkuil <hansverk@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
This commit is contained in:
Lucas Stach 2017-04-05 10:09:55 -03:00 committed by Mauro Carvalho Chehab
parent fb2be08f8c
commit 8b8470fdf4
1 changed files with 33 additions and 16 deletions

View File

@ -101,6 +101,8 @@ struct vdoa_ctx {
struct vdoa_data *vdoa; struct vdoa_data *vdoa;
struct completion completion; struct completion completion;
struct vdoa_q_data q_data[2]; struct vdoa_q_data q_data[2];
unsigned int submitted_job;
unsigned int completed_job;
}; };
static irqreturn_t vdoa_irq_handler(int irq, void *data) static irqreturn_t vdoa_irq_handler(int irq, void *data)
@ -114,7 +116,7 @@ static irqreturn_t vdoa_irq_handler(int irq, void *data)
curr_ctx = vdoa->curr_ctx; curr_ctx = vdoa->curr_ctx;
if (!curr_ctx) { if (!curr_ctx) {
dev_dbg(vdoa->dev, dev_warn(vdoa->dev,
"Instance released before the end of transaction\n"); "Instance released before the end of transaction\n");
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -127,19 +129,44 @@ static irqreturn_t vdoa_irq_handler(int irq, void *data)
} else if (!(val & VDOAIST_EOT)) { } else if (!(val & VDOAIST_EOT)) {
dev_warn(vdoa->dev, "Spurious interrupt\n"); dev_warn(vdoa->dev, "Spurious interrupt\n");
} }
curr_ctx->completed_job++;
complete(&curr_ctx->completion); complete(&curr_ctx->completion);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
{
struct vdoa_data *vdoa = ctx->vdoa;
if (ctx->submitted_job == ctx->completed_job)
return 0;
if (!wait_for_completion_timeout(&ctx->completion,
msecs_to_jiffies(300))) {
dev_err(vdoa->dev,
"Timeout waiting for transfer result\n");
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL(vdoa_wait_for_completion);
void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src) void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
{ {
struct vdoa_q_data *src_q_data, *dst_q_data; struct vdoa_q_data *src_q_data, *dst_q_data;
struct vdoa_data *vdoa = ctx->vdoa; struct vdoa_data *vdoa = ctx->vdoa;
u32 val; u32 val;
if (vdoa->curr_ctx)
vdoa_wait_for_completion(vdoa->curr_ctx);
vdoa->curr_ctx = ctx; vdoa->curr_ctx = ctx;
reinit_completion(&ctx->completion);
ctx->submitted_job++;
src_q_data = &ctx->q_data[V4L2_M2M_SRC]; src_q_data = &ctx->q_data[V4L2_M2M_SRC];
dst_q_data = &ctx->q_data[V4L2_M2M_DST]; dst_q_data = &ctx->q_data[V4L2_M2M_DST];
@ -177,21 +204,6 @@ void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
} }
EXPORT_SYMBOL(vdoa_device_run); EXPORT_SYMBOL(vdoa_device_run);
int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
{
struct vdoa_data *vdoa = ctx->vdoa;
if (!wait_for_completion_timeout(&ctx->completion,
msecs_to_jiffies(300))) {
dev_err(vdoa->dev,
"Timeout waiting for transfer result\n");
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL(vdoa_wait_for_completion);
struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa) struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
{ {
struct vdoa_ctx *ctx; struct vdoa_ctx *ctx;
@ -218,6 +230,11 @@ void vdoa_context_destroy(struct vdoa_ctx *ctx)
{ {
struct vdoa_data *vdoa = ctx->vdoa; struct vdoa_data *vdoa = ctx->vdoa;
if (vdoa->curr_ctx == ctx) {
vdoa_wait_for_completion(vdoa->curr_ctx);
vdoa->curr_ctx = NULL;
}
clk_disable_unprepare(vdoa->vdoa_clk); clk_disable_unprepare(vdoa->vdoa_clk);
kfree(ctx); kfree(ctx);
} }