drm/i915/gvt: Move workload cache init/clean into intel_vgpu_{setup, clean}_submission()

Move vGPU workload cache initialization/de-initialization into
intel_vgpu_{setup, clean}_submission() since they are not specific to
execlist stuffs.

Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
This commit is contained in:
Zhi Wang 2017-09-10 20:28:09 +08:00 committed by Zhenyu Wang
parent 874b6a910e
commit 9a9829e9eb
2 changed files with 24 additions and 15 deletions

View File

@ -856,14 +856,12 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
struct intel_engine_cs *engine;
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
kfree(vgpu->reserve_ring_buffer_va[i]);
vgpu->reserve_ring_buffer_va[i] = NULL;
vgpu->reserve_ring_buffer_size[i] = 0;
}
}
#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
@ -872,19 +870,8 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
enum intel_engine_id i;
struct intel_engine_cs *engine;
/* each ring has a virtual execlist engine */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
for_each_engine(engine, vgpu->gvt->dev_priv, i)
init_vgpu_execlist(vgpu, i);
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!vgpu->workloads)
return -ENOMEM;
/* each ring has a shadow ring buffer until vgpu destroyed */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {

View File

@ -719,6 +719,7 @@ err:
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
i915_gem_context_put(vgpu->shadow_ctx);
kmem_cache_destroy(vgpu->workloads);
}
/**
@ -733,7 +734,9 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
atomic_set(&vgpu->running_workload_num, 0);
enum intel_engine_id i;
struct intel_engine_cs *engine;
int ret;
vgpu->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
@ -742,5 +745,24 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!vgpu->workloads) {
ret = -ENOMEM;
goto out_shadow_ctx;
}
for_each_engine(engine, vgpu->gvt->dev_priv, i)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
atomic_set(&vgpu->running_workload_num, 0);
return 0;
out_shadow_ctx:
i915_gem_context_put(vgpu->shadow_ctx);
return ret;
}