drm/amdgpu: add slap cache for sync objects as well

We need them all the time.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
This commit is contained in:
Christian König 2016-02-16 11:24:58 +01:00 committed by Alex Deucher
parent 336d1f5efe
commit 257bf15a4b
3 changed files with 36 additions and 4 deletions

View File

@ -634,6 +634,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync); int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
/* /*
* GART structures, functions & helpers * GART structures, functions & helpers

View File

@ -539,6 +539,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void) static int __init amdgpu_init(void)
{ {
amdgpu_sync_init();
#ifdef CONFIG_VGA_CONSOLE #ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force()) { if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
@ -563,6 +564,7 @@ static void __exit amdgpu_exit(void)
amdgpu_amdkfd_fini(); amdgpu_amdkfd_fini();
drm_pci_exit(driver, pdriver); drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler(); amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
} }
module_init(amdgpu_init); module_init(amdgpu_init);

View File

@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
struct fence *fence; struct fence *fence;
}; };
static struct kmem_cache *amdgpu_sync_slab;
/** /**
* amdgpu_sync_create - zero init sync object * amdgpu_sync_create - zero init sync object
* *
@ -133,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0; return 0;
} }
e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e) if (!e)
return -ENOMEM; return -ENOMEM;
@ -214,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
f = e->fence; f = e->fence;
hash_del(&e->node); hash_del(&e->node);
kfree(e); kmem_cache_free(amdgpu_sync_slab, e);
if (!fence_is_signaled(f)) if (!fence_is_signaled(f))
return f; return f;
@ -237,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
hash_del(&e->node); hash_del(&e->node);
fence_put(e->fence); fence_put(e->fence);
kfree(e); kmem_cache_free(amdgpu_sync_slab, e);
} }
return 0; return 0;
@ -259,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) { hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node); hash_del(&e->node);
fence_put(e->fence); fence_put(e->fence);
kfree(e); kmem_cache_free(amdgpu_sync_slab, e);
} }
fence_put(sync->last_vm_update); fence_put(sync->last_vm_update);
} }
/**
* amdgpu_sync_init - init sync object subsystem
*
* Allocate the slab allocator.
*/
int amdgpu_sync_init(void)
{
amdgpu_sync_slab = kmem_cache_create(
"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!amdgpu_sync_slab)
return -ENOMEM;
return 0;
}
/**
* amdgpu_sync_fini - fini sync object subsystem
*
* Free the slab allocator.
*/
void amdgpu_sync_fini(void)
{
kmem_cache_destroy(amdgpu_sync_slab);
}