drm/amdgpu: individualize fence allocation per entity
Allocate fences for each entity and remove ctx->fences reference as fences should be bound to amdgpu_ctx_entity instead amdgpu_ctx. Signed-off-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7db1d560a4
commit
63e3ab9a82
|
@ -87,24 +87,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||
memset(ctx, 0, sizeof(*ctx));
|
||||
ctx->adev = adev;
|
||||
|
||||
ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
|
||||
sizeof(struct dma_fence*), GFP_KERNEL);
|
||||
if (!ctx->fences)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->entities[0] = kcalloc(num_entities,
|
||||
sizeof(struct amdgpu_ctx_entity),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->entities[0]) {
|
||||
r = -ENOMEM;
|
||||
goto error_free_fences;
|
||||
}
|
||||
if (!ctx->entities[0])
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
for (i = 0; i < num_entities; ++i) {
|
||||
struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
|
||||
|
||||
entity->sequence = 1;
|
||||
entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
|
||||
entity->fences = kcalloc(amdgpu_sched_jobs,
|
||||
sizeof(struct dma_fence*), GFP_KERNEL);
|
||||
if (!entity->fences) {
|
||||
r = -ENOMEM;
|
||||
goto error_cleanup_memory;
|
||||
}
|
||||
}
|
||||
for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
|
||||
ctx->entities[i] = ctx->entities[i - 1] +
|
||||
|
@ -181,11 +181,17 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||
error_cleanup_entities:
|
||||
for (i = 0; i < num_entities; ++i)
|
||||
drm_sched_entity_destroy(&ctx->entities[0][i].entity);
|
||||
kfree(ctx->entities[0]);
|
||||
|
||||
error_free_fences:
|
||||
kfree(ctx->fences);
|
||||
ctx->fences = NULL;
|
||||
error_cleanup_memory:
|
||||
for (i = 0; i < num_entities; ++i) {
|
||||
struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
|
||||
|
||||
kfree(entity->fences);
|
||||
entity->fences = NULL;
|
||||
}
|
||||
|
||||
kfree(ctx->entities[0]);
|
||||
ctx->entities[0] = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -199,12 +205,16 @@ static void amdgpu_ctx_fini(struct kref *ref)
|
|||
if (!adev)
|
||||
return;
|
||||
|
||||
for (i = 0; i < num_entities; ++i)
|
||||
for (j = 0; j < amdgpu_sched_jobs; ++j)
|
||||
dma_fence_put(ctx->entities[0][i].fences[j]);
|
||||
kfree(ctx->fences);
|
||||
kfree(ctx->entities[0]);
|
||||
for (i = 0; i < num_entities; ++i) {
|
||||
struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
|
||||
|
||||
for (j = 0; j < amdgpu_sched_jobs; ++j)
|
||||
dma_fence_put(entity->fences[j]);
|
||||
|
||||
kfree(entity->fences);
|
||||
}
|
||||
|
||||
kfree(ctx->entities[0]);
|
||||
mutex_destroy(&ctx->lock);
|
||||
|
||||
kfree(ctx);
|
||||
|
|
|
@ -42,7 +42,6 @@ struct amdgpu_ctx {
|
|||
unsigned reset_counter_query;
|
||||
uint32_t vram_lost_counter;
|
||||
spinlock_t ring_lock;
|
||||
struct dma_fence **fences;
|
||||
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
|
||||
bool preamble_presented;
|
||||
enum drm_sched_priority init_priority;
|
||||
|
|
Loading…
Reference in New Issue