drm/i915/gvt: Rename reserved ring buffer
"reserved" means reserve something from somewhere. Actually they are
buffers used by command scanner. Rename it to ring_scan_buffer.
v2:
- Remove the usage of an extra variable. (Zhenyu)
Fixes: 0a53bc07f0
("drm/i915/gvt: Separate cmd scan from request allocation")
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
This commit is contained in:
parent
bf4097ea57
commit
8cf80a2e4b
|
@ -2619,21 +2619,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
|||
gma_tail = workload->rb_start + workload->rb_tail;
|
||||
gma_top = workload->rb_start + guest_rb_size;
|
||||
|
||||
if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
|
||||
void *va, *p;
|
||||
if (workload->rb_len > vgpu->ring_scan_buffer_size[ring_id]) {
|
||||
void *p;
|
||||
|
||||
/* realloc the new ring buffer if needed */
|
||||
va = vgpu->reserve_ring_buffer_va[ring_id];
|
||||
p = krealloc(va, workload->rb_len, GFP_KERNEL);
|
||||
p = krealloc(vgpu->ring_scan_buffer[ring_id], workload->rb_len,
|
||||
GFP_KERNEL);
|
||||
if (!p) {
|
||||
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
|
||||
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
vgpu->reserve_ring_buffer_va[ring_id] = p;
|
||||
vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
|
||||
vgpu->ring_scan_buffer[ring_id] = p;
|
||||
vgpu->ring_scan_buffer_size[ring_id] = workload->rb_len;
|
||||
}
|
||||
|
||||
shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
|
||||
shadow_ring_buffer_va = vgpu->ring_scan_buffer[ring_id];
|
||||
|
||||
/* get shadow ring buffer va */
|
||||
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
|
||||
|
|
|
@ -864,9 +864,9 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
|
|||
clean_workloads(vgpu, ALL_ENGINES);
|
||||
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
kfree(vgpu->reserve_ring_buffer_va[i]);
|
||||
vgpu->reserve_ring_buffer_va[i] = NULL;
|
||||
vgpu->reserve_ring_buffer_size[i] = 0;
|
||||
kfree(vgpu->ring_scan_buffer[i]);
|
||||
vgpu->ring_scan_buffer[i] = NULL;
|
||||
vgpu->ring_scan_buffer_size[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -881,21 +881,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
|
|||
|
||||
/* each ring has a shadow ring buffer until vgpu destroyed */
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
vgpu->reserve_ring_buffer_va[i] =
|
||||
vgpu->ring_scan_buffer[i] =
|
||||
kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
|
||||
if (!vgpu->reserve_ring_buffer_va[i]) {
|
||||
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
|
||||
if (!vgpu->ring_scan_buffer[i]) {
|
||||
gvt_vgpu_err("fail to alloc ring scan buffer\n");
|
||||
goto out;
|
||||
}
|
||||
vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
|
||||
vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
if (vgpu->reserve_ring_buffer_size[i]) {
|
||||
kfree(vgpu->reserve_ring_buffer_va[i]);
|
||||
vgpu->reserve_ring_buffer_va[i] = NULL;
|
||||
vgpu->reserve_ring_buffer_size[i] = 0;
|
||||
if (vgpu->ring_scan_buffer_size[i]) {
|
||||
kfree(vgpu->ring_scan_buffer[i]);
|
||||
vgpu->ring_scan_buffer[i] = NULL;
|
||||
vgpu->ring_scan_buffer_size[i] = 0;
|
||||
}
|
||||
}
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -172,9 +172,9 @@ struct intel_vgpu {
|
|||
struct intel_vgpu_opregion opregion;
|
||||
struct intel_vgpu_display display;
|
||||
struct intel_vgpu_submission submission;
|
||||
/* 1/2K for each reserve ring buffer */
|
||||
void *reserve_ring_buffer_va[I915_NUM_ENGINES];
|
||||
int reserve_ring_buffer_size[I915_NUM_ENGINES];
|
||||
/* 1/2K for each engine */
|
||||
void *ring_scan_buffer[I915_NUM_ENGINES];
|
||||
int ring_scan_buffer_size[I915_NUM_ENGINES];
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
||||
|
|
Loading…
Reference in New Issue