drm/amdgpu: create fence slab once when amdgpu module init.
This avoids problems with multiple GPUs. For example, if the first GPU failed before amdgpu_fence_init() was called, amdgpu_fence_slab_ref is still 0 and it will get decremented in amdgpu_fence_driver_fini(). This will lead to a crash during init of the second GPU since amdgpu_fence_slab_ref is not 0. v2: add functions for init/exit instead of moving the variables into the driver. Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8b41e7a03a
commit
d573de2d00
|
@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync);
|
|||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_init(void);
|
||||
void amdgpu_sync_fini(void);
|
||||
int amdgpu_fence_slab_init(void);
|
||||
void amdgpu_fence_slab_fini(void);
|
||||
|
||||
/*
|
||||
* GART structures, functions & helpers
|
||||
|
|
|
@ -565,9 +565,12 @@ static struct pci_driver amdgpu_kms_pci_driver = {
|
|||
.driver.pm = &amdgpu_pm_ops,
|
||||
};
|
||||
|
||||
|
||||
|
||||
static int __init amdgpu_init(void)
|
||||
{
|
||||
amdgpu_sync_init();
|
||||
amdgpu_fence_slab_init();
|
||||
if (vgacon_text_force()) {
|
||||
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
|
||||
return -EINVAL;
|
||||
|
@ -578,7 +581,6 @@ static int __init amdgpu_init(void)
|
|||
driver->driver_features |= DRIVER_MODESET;
|
||||
driver->num_ioctls = amdgpu_max_kms_ioctl;
|
||||
amdgpu_register_atpx_handler();
|
||||
|
||||
/* let modprobe override vga console setting */
|
||||
return drm_pci_init(driver, pdriver);
|
||||
}
|
||||
|
@ -589,6 +591,7 @@ static void __exit amdgpu_exit(void)
|
|||
drm_pci_exit(driver, pdriver);
|
||||
amdgpu_unregister_atpx_handler();
|
||||
amdgpu_sync_fini();
|
||||
amdgpu_fence_slab_fini();
|
||||
}
|
||||
|
||||
module_init(amdgpu_init);
|
||||
|
|
|
@ -55,8 +55,21 @@ struct amdgpu_fence {
|
|||
};
|
||||
|
||||
static struct kmem_cache *amdgpu_fence_slab;
|
||||
static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
|
||||
|
||||
int amdgpu_fence_slab_init(void)
|
||||
{
|
||||
amdgpu_fence_slab = kmem_cache_create(
|
||||
"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!amdgpu_fence_slab)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_fence_slab_fini(void)
|
||||
{
|
||||
kmem_cache_destroy(amdgpu_fence_slab);
|
||||
}
|
||||
/*
|
||||
* Cast helper
|
||||
*/
|
||||
|
@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
*/
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
|
||||
{
|
||||
if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
|
||||
amdgpu_fence_slab = kmem_cache_create(
|
||||
"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!amdgpu_fence_slab)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (amdgpu_debugfs_fence_init(adev))
|
||||
dev_err(adev->dev, "fence debugfs file creation failed\n");
|
||||
|
||||
|
@ -441,9 +447,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||
kfree(ring->fence_drv.fences);
|
||||
ring->fence_drv.initialized = false;
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
|
||||
kmem_cache_destroy(amdgpu_fence_slab);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue