drm/i915/gvt: Factor out vGPU workload creation/destroy
Factor out vGPU workload creation/destroy functions since they are not specific to execlist emulation. Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
This commit is contained in:
parent
a34e8def4d
commit
21527a8daf
|
@ -358,12 +358,6 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_workload(struct intel_vgpu_workload *workload)
|
|
||||||
{
|
|
||||||
intel_gvt_mm_unreference(workload->shadow_mm);
|
|
||||||
kmem_cache_free(workload->vgpu->submission.workloads, workload);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define get_desc_from_elsp_dwords(ed, i) \
|
#define get_desc_from_elsp_dwords(ed, i) \
|
||||||
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
|
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
|
||||||
|
|
||||||
|
@ -586,7 +580,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||||
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
|
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
|
||||||
out:
|
out:
|
||||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||||
free_workload(workload);
|
intel_vgpu_destroy_workload(workload);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -687,10 +681,6 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
|
|
||||||
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
|
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
|
||||||
|
|
||||||
workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
|
|
||||||
if (!workload)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* record some ring buffer register values for scan and shadow */
|
/* record some ring buffer register values for scan and shadow */
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(rb_start.val), &start, 4);
|
RING_CTX_OFF(rb_start.val), &start, 4);
|
||||||
|
@ -699,13 +689,10 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||||
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&workload->list);
|
workload = intel_vgpu_create_workload(vgpu);
|
||||||
INIT_LIST_HEAD(&workload->shadow_bb);
|
if (IS_ERR(workload))
|
||||||
|
return PTR_ERR(workload);
|
||||||
|
|
||||||
init_waitqueue_head(&workload->shadow_ctx_status_wq);
|
|
||||||
atomic_set(&workload->shadow_ctx_active, 0);
|
|
||||||
|
|
||||||
workload->vgpu = vgpu;
|
|
||||||
workload->ring_id = ring_id;
|
workload->ring_id = ring_id;
|
||||||
workload->ctx_desc = *desc;
|
workload->ctx_desc = *desc;
|
||||||
workload->ring_context_gpa = ring_context_gpa;
|
workload->ring_context_gpa = ring_context_gpa;
|
||||||
|
@ -715,9 +702,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
workload->rb_ctl = ctl;
|
workload->rb_ctl = ctl;
|
||||||
workload->prepare = prepare_execlist_workload;
|
workload->prepare = prepare_execlist_workload;
|
||||||
workload->complete = complete_execlist_workload;
|
workload->complete = complete_execlist_workload;
|
||||||
workload->status = -EINPROGRESS;
|
|
||||||
workload->emulate_schedule_in = emulate_schedule_in;
|
workload->emulate_schedule_in = emulate_schedule_in;
|
||||||
workload->shadowed = false;
|
|
||||||
|
|
||||||
if (ring_id == RCS) {
|
if (ring_id == RCS) {
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||||
|
@ -764,7 +749,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
queue_workload(workload);
|
queue_workload(workload);
|
||||||
else {
|
else {
|
||||||
free_workload(workload);
|
intel_vgpu_destroy_workload(workload);
|
||||||
if (vgpu_is_vm_unhealthy(ret)) {
|
if (vgpu_is_vm_unhealthy(ret)) {
|
||||||
intel_vgpu_clean_execlist(vgpu);
|
intel_vgpu_clean_execlist(vgpu);
|
||||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
|
||||||
|
@ -853,7 +838,7 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||||
list_for_each_entry_safe(pos, n,
|
list_for_each_entry_safe(pos, n,
|
||||||
&s->workload_q_head[engine->id], list) {
|
&s->workload_q_head[engine->id], list) {
|
||||||
list_del_init(&pos->list);
|
list_del_init(&pos->list);
|
||||||
free_workload(pos);
|
intel_vgpu_destroy_workload(pos);
|
||||||
}
|
}
|
||||||
clear_bit(engine->id, s->shadow_ctx_desc_updated);
|
clear_bit(engine->id, s->shadow_ctx_desc_updated);
|
||||||
}
|
}
|
||||||
|
|
|
@ -781,3 +781,54 @@ out_shadow_ctx:
|
||||||
i915_gem_context_put(s->shadow_ctx);
|
i915_gem_context_put(s->shadow_ctx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_vgpu_destroy_workload - destroy a vGPU workload
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
*
|
||||||
|
* This function is called when destroy a vGPU workload.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
|
||||||
|
{
|
||||||
|
struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
||||||
|
|
||||||
|
if (workload->shadow_mm)
|
||||||
|
intel_gvt_mm_unreference(workload->shadow_mm);
|
||||||
|
|
||||||
|
kmem_cache_free(s->workloads, workload);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_vgpu_create_workload - create a vGPU workload
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
*
|
||||||
|
* This function is called when creating a vGPU workload.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* struct intel_vgpu_workload * on success, negative error code in
|
||||||
|
* pointer if failed.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct intel_vgpu_workload *
|
||||||
|
intel_vgpu_create_workload(struct intel_vgpu *vgpu)
|
||||||
|
{
|
||||||
|
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||||
|
struct intel_vgpu_workload *workload;
|
||||||
|
|
||||||
|
workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
|
||||||
|
if (!workload)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&workload->list);
|
||||||
|
INIT_LIST_HEAD(&workload->shadow_bb);
|
||||||
|
|
||||||
|
init_waitqueue_head(&workload->shadow_ctx_status_wq);
|
||||||
|
atomic_set(&workload->shadow_ctx_active, 0);
|
||||||
|
|
||||||
|
workload->status = -EINPROGRESS;
|
||||||
|
workload->shadowed = false;
|
||||||
|
workload->vgpu = vgpu;
|
||||||
|
|
||||||
|
return workload;
|
||||||
|
}
|
||||||
|
|
|
@ -141,5 +141,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
|
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
|
struct intel_vgpu_workload *
|
||||||
|
intel_vgpu_create_workload(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
|
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
||||||
|
|
||||||
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
|
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue