drm/amdgpu: Rename amdgpu_device_gpu_recover_imp back to amdgpu_device_gpu_recover
We removed the wrapper that was queueing the recover function into reset domain queue who was using this name. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
b5fd0cf3ea
commit
cf72704414
|
@ -1254,7 +1254,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
|
|||
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job* job);
|
||||
int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job);
|
||||
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
|
||||
|
|
|
@ -129,7 +129,7 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
|
|||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||
kfd.reset_work);
|
||||
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||
|
|
|
@ -5076,7 +5076,7 @@ retry:
|
|||
* Returns 0 for success or an error on failure.
|
||||
*/
|
||||
|
||||
int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
|
||||
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct list_head device_list, *device_list_handle = NULL;
|
||||
|
|
|
@ -819,7 +819,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
|
|||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||
reset_work);
|
||||
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -64,7 +64,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
|
|||
ti.process_name, ti.tgid, ti.task_name, ti.pid);
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ring->adev)) {
|
||||
r = amdgpu_device_gpu_recover_imp(ring->adev, job);
|
||||
r = amdgpu_device_gpu_recover(ring->adev, job);
|
||||
if (r)
|
||||
DRM_ERROR("GPU Recovery Failed: %d\n", r);
|
||||
} else {
|
||||
|
|
|
@ -1939,7 +1939,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ras->adev))
|
||||
amdgpu_device_gpu_recover_imp(ras->adev, NULL);
|
||||
amdgpu_device_gpu_recover(ras->adev, NULL);
|
||||
atomic_set(&ras->in_recovery, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -284,7 +284,7 @@ flr_done:
|
|||
if (amdgpu_device_should_recover_gpu(adev)
|
||||
&& (!amdgpu_device_has_job_running(adev) ||
|
||||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
}
|
||||
|
||||
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
|
|
@ -311,7 +311,7 @@ flr_done:
|
|||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
}
|
||||
|
||||
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
|
|
@ -523,7 +523,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
|||
|
||||
/* Trigger recovery due to world switch failure */
|
||||
if (amdgpu_device_should_recover_gpu(adev))
|
||||
amdgpu_device_gpu_recover_imp(adev, NULL);
|
||||
amdgpu_device_gpu_recover(adev, NULL);
|
||||
}
|
||||
|
||||
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
|
|
Loading…
Reference in New Issue