drm/msm: Move cmdstream dumping out of sched kthread
This is something that can block for arbitrary amounts of time as userspace consumes from the FIFO. So we don't really want this to be in the fence signaling path. Signed-off-by: Rob Clark <robdclark@chromium.org> Patchwork: https://patchwork.freedesktop.org/patch/532617/
This commit is contained in:
parent
38e27a6fbf
commit
171f580e32
|
@ -948,6 +948,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
/* The scheduler owns a ref now: */
|
/* The scheduler owns a ref now: */
|
||||||
msm_gem_submit_get(submit);
|
msm_gem_submit_get(submit);
|
||||||
|
|
||||||
|
msm_rd_dump_submit(priv->rd, submit, NULL);
|
||||||
|
|
||||||
drm_sched_entity_push_job(&submit->base);
|
drm_sched_entity_push_job(&submit->base);
|
||||||
|
|
||||||
args->fence = submit->fence_id;
|
args->fence = submit->fence_id;
|
||||||
|
|
|
@ -748,8 +748,6 @@ void msm_gpu_retire(struct msm_gpu *gpu)
|
||||||
/* add bo's to gpu's ring, and kick gpu: */
|
/* add bo's to gpu's ring, and kick gpu: */
|
||||||
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = gpu->dev;
|
|
||||||
struct msm_drm_private *priv = dev->dev_private;
|
|
||||||
struct msm_ringbuffer *ring = submit->ring;
|
struct msm_ringbuffer *ring = submit->ring;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -761,8 +759,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||||
|
|
||||||
submit->seqno = submit->hw_fence->seqno;
|
submit->seqno = submit->hw_fence->seqno;
|
||||||
|
|
||||||
msm_rd_dump_submit(priv->rd, submit, NULL);
|
|
||||||
|
|
||||||
update_sw_cntrs(gpu);
|
update_sw_cntrs(gpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -83,15 +83,10 @@ struct msm_rd_state {
|
||||||
|
|
||||||
bool open;
|
bool open;
|
||||||
|
|
||||||
/* current submit to read out: */
|
|
||||||
struct msm_gem_submit *submit;
|
|
||||||
|
|
||||||
/* fifo access is synchronized on the producer side by
|
/* fifo access is synchronized on the producer side by
|
||||||
* gpu->lock held by submit code (otherwise we could
|
* write_lock. And read_lock synchronizes the reads
|
||||||
* end up w/ cmds logged in different order than they
|
|
||||||
* were executed). And read_lock synchronizes the reads
|
|
||||||
*/
|
*/
|
||||||
struct mutex read_lock;
|
struct mutex read_lock, write_lock;
|
||||||
|
|
||||||
wait_queue_head_t fifo_event;
|
wait_queue_head_t fifo_event;
|
||||||
struct circ_buf fifo;
|
struct circ_buf fifo;
|
||||||
|
@ -243,6 +238,7 @@ static void rd_cleanup(struct msm_rd_state *rd)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_destroy(&rd->read_lock);
|
mutex_destroy(&rd->read_lock);
|
||||||
|
mutex_destroy(&rd->write_lock);
|
||||||
kfree(rd);
|
kfree(rd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,6 +254,7 @@ static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
|
||||||
rd->fifo.buf = rd->buf;
|
rd->fifo.buf = rd->buf;
|
||||||
|
|
||||||
mutex_init(&rd->read_lock);
|
mutex_init(&rd->read_lock);
|
||||||
|
mutex_init(&rd->write_lock);
|
||||||
|
|
||||||
init_waitqueue_head(&rd->fifo_event);
|
init_waitqueue_head(&rd->fifo_event);
|
||||||
|
|
||||||
|
@ -338,19 +335,15 @@ static void snapshot_buf(struct msm_rd_state *rd,
|
||||||
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
|
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
msm_gem_lock(&obj->base);
|
|
||||||
buf = msm_gem_get_vaddr_active(&obj->base);
|
buf = msm_gem_get_vaddr_active(&obj->base);
|
||||||
if (IS_ERR(buf))
|
if (IS_ERR(buf))
|
||||||
goto out_unlock;
|
return;
|
||||||
|
|
||||||
buf += offset;
|
buf += offset;
|
||||||
|
|
||||||
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
|
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
|
||||||
|
|
||||||
msm_gem_put_vaddr_locked(&obj->base);
|
msm_gem_put_vaddr_locked(&obj->base);
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
msm_gem_unlock(&obj->base);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* called under gpu->lock */
|
/* called under gpu->lock */
|
||||||
|
@ -364,10 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||||
if (!rd->open)
|
if (!rd->open)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* writing into fifo is serialized by caller, and
|
mutex_lock(&rd->write_lock);
|
||||||
* rd->read_lock is used to serialize the reads
|
|
||||||
*/
|
|
||||||
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
|
|
||||||
|
|
||||||
if (fmt) {
|
if (fmt) {
|
||||||
va_list args;
|
va_list args;
|
||||||
|
@ -424,5 +414,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&rd->write_lock);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue