drm/amdgpu: move VM table mapping into the backend as well

Clean that up further and also fix another case where the BO
wasn't kmapped for CPU based updates.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2019-03-21 16:34:18 +01:00 committed by Alex Deucher
parent df399b0641
commit ecf96b52bf
4 changed files with 37 additions and 27 deletions

View File

@ -659,17 +659,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
if (vm->use_cpu_for_update)
r = amdgpu_bo_kmap(bo, NULL);
else
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
break;
if (bo->shadow) {
r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
if (r)
break;
}
vm->update_funcs->map_table(bo);
amdgpu_vm_bo_relocated(bo_base);
}
}
@ -751,22 +741,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
return r;
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
if (bo->shadow) {
r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
&ctx);
if (r)
return r;
r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
if (r)
return r;
}
r = vm->update_funcs->map_table(bo);
if (r)
return r;
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
@ -877,12 +862,6 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (r)
return r;
if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL);
if (r)
goto error_free_pt;
}
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/

View File

@ -215,7 +215,7 @@ struct amdgpu_vm_update_params {
};
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
struct dma_fence *exclusive);
int (*update)(struct amdgpu_vm_update_params *p,

View File

@ -24,6 +24,16 @@
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
/**
* amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
{
return amdgpu_bo_kmap(table, NULL);
}
/**
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
*
@ -110,6 +120,7 @@ static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
}
const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
.map_table = amdgpu_vm_cpu_map_table,
.prepare = amdgpu_vm_cpu_prepare,
.update = amdgpu_vm_cpu_update,
.commit = amdgpu_vm_cpu_commit

View File

@ -28,6 +28,25 @@
#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
/**
* amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
{
int r;
r = amdgpu_ttm_alloc_gart(&table->tbo);
if (r)
return r;
if (table->shadow)
r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
return r;
}
/**
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
*
@ -242,6 +261,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
}
const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
.map_table = amdgpu_vm_sdma_map_table,
.prepare = amdgpu_vm_sdma_prepare,
.update = amdgpu_vm_sdma_update,
.commit = amdgpu_vm_sdma_commit