drm/ttm: roundup the shrink request to prevent skip huge pool
e.g. shrink reqeust is less than 512, the logic will skip huge pool Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
bae5c5b50b
commit
2bf257d662
|
@ -442,17 +442,19 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|||
/* select start pool in round robin fashion */
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
unsigned nr_free = shrink_pages;
|
||||
unsigned page_nr;
|
||||
|
||||
if (shrink_pages == 0)
|
||||
break;
|
||||
|
||||
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
|
||||
page_nr = (1 << pool->order);
|
||||
/* OK to use static buffer since global mutex is held. */
|
||||
nr_free_pool = (nr_free >> pool->order);
|
||||
if (nr_free_pool == 0)
|
||||
continue;
|
||||
|
||||
nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
|
||||
shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
|
||||
freed += ((nr_free_pool - shrink_pages) << pool->order);
|
||||
freed += (nr_free_pool - shrink_pages) << pool->order;
|
||||
if (freed >= sc->nr_to_scan)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
return freed;
|
||||
|
|
Loading…
Reference in New Issue