drm/amdgpu: stop using gart_start as offset for the GTT domain
Further separate GART and GTT domain. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
0be655d1c6
commit
22d8bfafcc
|
@ -143,7 +143,8 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
|||
spin_unlock(&mgr->lock);
|
||||
|
||||
if (!r)
|
||||
mem->start = node->node.start;
|
||||
mem->start = node->node.start +
|
||||
(adev->gmc.gart_start >> PAGE_SHIFT);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
|||
case TTM_PL_TT:
|
||||
/* GTT memory */
|
||||
man->func = &amdgpu_gtt_mgr_func;
|
||||
man->gpu_offset = adev->gmc.gart_start;
|
||||
man->gpu_offset = 0;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
|
||||
|
@ -1062,7 +1062,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
|||
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
|
||||
|
||||
/* bind pages into GART page tables */
|
||||
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
||||
gtt->offset = ((u64)bo_mem->start << PAGE_SHIFT) - adev->gmc.gart_start;
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
|
||||
|
@ -1110,7 +1110,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
|||
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
|
||||
|
||||
/* Bind pages */
|
||||
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
|
||||
gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start;
|
||||
r = amdgpu_ttm_gart_bind(adev, bo, flags);
|
||||
if (unlikely(r)) {
|
||||
ttm_bo_mem_put(bo, &tmp);
|
||||
|
|
Loading…
Reference in New Issue