drm/virtio: implement context init: stop using drv->context when creating fence
The plumbing is all here to do this. Since we always use the default fence context when allocating a fence, this makes no functional difference. We can't process just the largest fence id anymore, since it's it's associated with different timelines. It's fine for fence_id 260 to signal before 259. As such, process each fence_id individually. Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org> Acked-by: Lingfeng Yang <lfy@google.com> Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-9-gurchetansingh@chromium.org Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
parent
e8b6e76f69
commit
bbf588d7d4
drivers/gpu/drm/virtio
|
@ -75,20 +75,25 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
|
||||||
uint64_t base_fence_ctx,
|
uint64_t base_fence_ctx,
|
||||||
uint32_t ring_idx)
|
uint32_t ring_idx)
|
||||||
{
|
{
|
||||||
|
uint64_t fence_context = base_fence_ctx + ring_idx;
|
||||||
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
|
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
|
||||||
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
|
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!fence)
|
if (!fence)
|
||||||
return fence;
|
return fence;
|
||||||
|
|
||||||
fence->drv = drv;
|
fence->drv = drv;
|
||||||
|
fence->ring_idx = ring_idx;
|
||||||
|
fence->emit_fence_info = !(base_fence_ctx == drv->context);
|
||||||
|
|
||||||
/* This only partially initializes the fence because the seqno is
|
/* This only partially initializes the fence because the seqno is
|
||||||
* unknown yet. The fence must not be used outside of the driver
|
* unknown yet. The fence must not be used outside of the driver
|
||||||
* until virtio_gpu_fence_emit is called.
|
* until virtio_gpu_fence_emit is called.
|
||||||
*/
|
*/
|
||||||
dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
|
|
||||||
0);
|
dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
|
||||||
|
fence_context, 0);
|
||||||
|
|
||||||
return fence;
|
return fence;
|
||||||
}
|
}
|
||||||
|
@ -110,6 +115,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
|
||||||
|
|
||||||
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
|
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
|
||||||
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
|
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
|
||||||
|
|
||||||
|
/* Only currently defined fence param. */
|
||||||
|
if (fence->emit_fence_info) {
|
||||||
|
cmd_hdr->flags |=
|
||||||
|
cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
|
||||||
|
cmd_hdr->ring_idx = (u8)fence->ring_idx;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
|
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
|
||||||
|
|
|
@ -199,7 +199,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
|
||||||
struct list_head reclaim_list;
|
struct list_head reclaim_list;
|
||||||
struct virtio_gpu_vbuffer *entry, *tmp;
|
struct virtio_gpu_vbuffer *entry, *tmp;
|
||||||
struct virtio_gpu_ctrl_hdr *resp;
|
struct virtio_gpu_ctrl_hdr *resp;
|
||||||
u64 fence_id = 0;
|
u64 fence_id;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&reclaim_list);
|
INIT_LIST_HEAD(&reclaim_list);
|
||||||
spin_lock(&vgdev->ctrlq.qlock);
|
spin_lock(&vgdev->ctrlq.qlock);
|
||||||
|
@ -226,23 +226,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
|
||||||
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
|
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
|
||||||
}
|
}
|
||||||
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
|
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
|
||||||
u64 f = le64_to_cpu(resp->fence_id);
|
fence_id = le64_to_cpu(resp->fence_id);
|
||||||
|
virtio_gpu_fence_event_process(vgdev, fence_id);
|
||||||
if (fence_id > f) {
|
|
||||||
DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
|
|
||||||
__func__, fence_id, f);
|
|
||||||
} else {
|
|
||||||
fence_id = f;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (entry->resp_cb)
|
if (entry->resp_cb)
|
||||||
entry->resp_cb(vgdev, entry);
|
entry->resp_cb(vgdev, entry);
|
||||||
}
|
}
|
||||||
wake_up(&vgdev->ctrlq.ack_queue);
|
wake_up(&vgdev->ctrlq.ack_queue);
|
||||||
|
|
||||||
if (fence_id)
|
|
||||||
virtio_gpu_fence_event_process(vgdev, fence_id);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
|
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
|
||||||
if (entry->objs)
|
if (entry->objs)
|
||||||
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
|
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
|
||||||
|
|
Loading…
Reference in New Issue