drm/amdgpu:invoke CSA functions (v2)

Make sure the CSA is mapped.

v2: agd: rebase.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Monk Liu 2017-01-09 15:54:32 +08:00 committed by Alex Deucher
parent 4e4bbe7343
commit 2493664f05
3 changed files with 40 additions and 0 deletions

View File

@ -771,6 +771,20 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r) if (r)
return r; return r;
if (amdgpu_sriov_vf(adev)) {
struct dma_fence *f;
bo_va = vm->csa_bo_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->job->sync, f);
if (r)
return r;
}
if (p->bo_list) { if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) { for (i = 0; i < p->bo_list->num_entries; i++) {
struct dma_fence *f; struct dma_fence *f;

View File

@ -1395,6 +1395,15 @@ static int amdgpu_init(struct amdgpu_device *adev)
return r; return r;
} }
adev->ip_blocks[i].status.hw = true; adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_allocate_static_csa(adev);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
return r;
}
}
} }
} }
@ -1528,6 +1537,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false; adev->ip_blocks[i].status.late_initialized = false;
} }
if (amdgpu_sriov_vf(adev))
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
return 0; return 0;
} }

View File

@ -649,6 +649,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto out_suspend; goto out_suspend;
} }
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm);
if (r)
goto out_suspend;
}
mutex_init(&fpriv->bo_list_lock); mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles); idr_init(&fpriv->bo_list_handles);
@ -687,6 +693,14 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_uvd_free_handles(adev, file_priv); amdgpu_uvd_free_handles(adev, file_priv);
amdgpu_vce_free_handles(adev, file_priv); amdgpu_vce_free_handles(adev, file_priv);
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
fpriv->vm.csa_bo_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
amdgpu_vm_fini(adev, &fpriv->vm); amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle) idr_for_each_entry(&fpriv->bo_list_handles, list, handle)