drm/amdgpu: revert "stop using gart_start as offset for the GTT domain"

Turned out the commit is incomplete and since we remove using the AGP
mapping from the GTT manager it is also not necessary any more.

This reverts commit 22d8bfafcc.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: James Zhu <James.Zhu@amd.com>
Tested-by: James Zhu <James.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2018-09-14 12:54:33 +02:00 committed by Alex Deucher
parent feabaad8aa
commit 0957dc7097
2 changed files with 4 additions and 6 deletions

View File

@ -143,8 +143,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (!r) if (!r)
mem->start = node->node.start + mem->start = node->node.start;
(adev->gmc.gart_start >> PAGE_SHIFT);
return r; return r;
} }

View File

@ -188,7 +188,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_TT: case TTM_PL_TT:
/* GTT memory */ /* GTT memory */
man->func = &amdgpu_gtt_mgr_func; man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = 0; man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@ -1060,7 +1060,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
/* bind pages into GART page tables */ /* bind pages into GART page tables */
gtt->offset = ((u64)bo_mem->start << PAGE_SHIFT) - adev->gmc.gart_start; gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags); ttm->pages, gtt->ttm.dma_address, flags);
@ -1112,8 +1112,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
/* Bind pages */ /* Bind pages */
gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - gtt->offset = (u64)tmp.start << PAGE_SHIFT;
adev->gmc.gart_start;
r = amdgpu_ttm_gart_bind(adev, bo, flags); r = amdgpu_ttm_gart_bind(adev, bo, flags);
if (unlikely(r)) { if (unlikely(r)) {
ttm_bo_mem_put(bo, &tmp); ttm_bo_mem_put(bo, &tmp);