drm/amdgpu: user BO priority instead of self coding it (v2)
Keeping groups of BOs on the LRU is to time consuming on command submission. Instead use the newly added BO priority to give a certain eviction order. v2: agd: trivial warning fix Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Roger.He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
cf6c467d67
commit
e1f055b301
|
@ -386,6 +386,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
bo->tbo.priority = ilog2(bo->tbo.num_pages);
|
||||||
|
bo->tbo.priority = min(bo->tbo.priority, (unsigned)(TTM_MAX_BO_PRIORITY - 1));
|
||||||
|
|
||||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
|
|
|
@ -1048,56 +1048,6 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
|
||||||
unsigned i, j;
|
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
|
|
||||||
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
|
|
||||||
|
|
||||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
|
||||||
if (&tbo->lru == lru->lru[j])
|
|
||||||
lru->lru[j] = tbo->lru.prev;
|
|
||||||
|
|
||||||
if (&tbo->swap == lru->swap_lru)
|
|
||||||
lru->swap_lru = tbo->swap.prev;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
|
||||||
unsigned log2_size = min(ilog2(tbo->num_pages),
|
|
||||||
AMDGPU_TTM_LRU_SIZE - 1);
|
|
||||||
|
|
||||||
return &adev->mman.log2_size[log2_size];
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
|
|
||||||
{
|
|
||||||
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
|
|
||||||
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
|
||||||
|
|
||||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
|
||||||
while ((++lru)->lru[tbo->mem.mem_type] == res)
|
|
||||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
|
|
||||||
{
|
|
||||||
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
|
|
||||||
struct list_head *res = lru->swap_lru;
|
|
||||||
|
|
||||||
lru->swap_lru = &tbo->swap;
|
|
||||||
while ((++lru)->swap_lru == res)
|
|
||||||
lru->swap_lru = &tbo->swap;
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||||
const struct ttm_place *place)
|
const struct ttm_place *place)
|
||||||
{
|
{
|
||||||
|
@ -1136,14 +1086,12 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
|
||||||
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
|
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
|
||||||
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
|
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
|
||||||
.io_mem_free = &amdgpu_ttm_io_mem_free,
|
.io_mem_free = &amdgpu_ttm_io_mem_free,
|
||||||
.lru_removal = &amdgpu_ttm_lru_removal,
|
.lru_tail = &ttm_bo_default_lru_tail,
|
||||||
.lru_tail = &amdgpu_ttm_lru_tail,
|
.swap_lru_tail = &ttm_bo_default_swap_lru_tail,
|
||||||
.swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_ttm_init(struct amdgpu_device *adev)
|
int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
unsigned i, j;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_ttm_global_init(adev);
|
r = amdgpu_ttm_global_init(adev);
|
||||||
|
@ -1161,19 +1109,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
|
|
||||||
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
|
|
||||||
|
|
||||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
|
||||||
lru->lru[j] = &adev->mman.bdev.man[j].lru[0];
|
|
||||||
lru->swap_lru = &adev->mman.bdev.glob->swap_lru[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
|
||||||
adev->mman.guard.lru[j] = NULL;
|
|
||||||
adev->mman.guard.swap_lru = NULL;
|
|
||||||
|
|
||||||
adev->mman.initialized = true;
|
adev->mman.initialized = true;
|
||||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
||||||
adev->mc.real_vram_size >> PAGE_SHIFT);
|
adev->mc.real_vram_size >> PAGE_SHIFT);
|
||||||
|
|
|
@ -34,13 +34,6 @@
|
||||||
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
|
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
|
||||||
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
|
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
|
||||||
|
|
||||||
#define AMDGPU_TTM_LRU_SIZE 20
|
|
||||||
|
|
||||||
struct amdgpu_mman_lru {
|
|
||||||
struct list_head *lru[TTM_NUM_MEM_TYPES];
|
|
||||||
struct list_head *swap_lru;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct amdgpu_mman {
|
struct amdgpu_mman {
|
||||||
struct ttm_bo_global_ref bo_global_ref;
|
struct ttm_bo_global_ref bo_global_ref;
|
||||||
struct drm_global_reference mem_global_ref;
|
struct drm_global_reference mem_global_ref;
|
||||||
|
@ -58,11 +51,6 @@ struct amdgpu_mman {
|
||||||
struct amdgpu_ring *buffer_funcs_ring;
|
struct amdgpu_ring *buffer_funcs_ring;
|
||||||
/* Scheduler entity for buffer moves */
|
/* Scheduler entity for buffer moves */
|
||||||
struct amd_sched_entity entity;
|
struct amd_sched_entity entity;
|
||||||
|
|
||||||
/* custom LRU management */
|
|
||||||
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
|
||||||
/* guard for log2_size array, don't add anything in between */
|
|
||||||
struct amdgpu_mman_lru guard;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
||||||
|
|
Loading…
Reference in New Issue