drm/ttm: Allow page allocations w/o triggering OOM..
This to allow drivers to choose to avoid OOM invocation and handle page allocation failures instead. v2: Remove extra new lines. Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7b158d1691
commit
cb5f1a52ca
|
@ -235,6 +235,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|||
if (bdev->need_dma32)
|
||||
page_flags |= TTM_PAGE_FLAG_DMA32;
|
||||
|
||||
if (bdev->no_retry)
|
||||
page_flags |= TTM_PAGE_FLAG_NO_RETRY;
|
||||
|
||||
switch (bo->type) {
|
||||
case ttm_bo_type_device:
|
||||
if (zero_alloc)
|
||||
|
|
|
@ -741,6 +741,9 @@ out:
|
|||
if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
|
||||
gfp_flags |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
/* ttm_alloc_new_pages doesn't reference pool so we can run
|
||||
* multiple requests in parallel.
|
||||
**/
|
||||
|
@ -893,6 +896,9 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
|
|||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
if (flags & TTM_PAGE_FLAG_NO_RETRY)
|
||||
gfp_flags |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
if (flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags |= GFP_DMA32;
|
||||
else
|
||||
|
|
|
@ -920,6 +920,9 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
|
|||
gfp_flags &= ~__GFP_COMP;
|
||||
}
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
|
||||
gfp_flags |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
return gfp_flags;
|
||||
}
|
||||
|
||||
|
|
|
@ -301,7 +301,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
|
|||
swap_space = swap_storage->f_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = shmem_read_mapping_page(swap_space, i);
|
||||
gfp_t gfp_mask = mapping_gfp_mask(swap_space);
|
||||
|
||||
gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
|
||||
from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
|
||||
|
||||
if (IS_ERR(from_page)) {
|
||||
ret = PTR_ERR(from_page);
|
||||
goto out_err;
|
||||
|
@ -350,10 +354,15 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
|||
swap_space = swap_storage->f_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
gfp_t gfp_mask = mapping_gfp_mask(swap_space);
|
||||
|
||||
gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
|
||||
|
||||
from_page = ttm->pages[i];
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = shmem_read_mapping_page(swap_space, i);
|
||||
|
||||
to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
|
||||
if (IS_ERR(to_page)) {
|
||||
ret = PTR_ERR(to_page);
|
||||
goto out_err;
|
||||
|
|
|
@ -86,6 +86,7 @@ struct ttm_backend_func {
|
|||
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
|
||||
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
|
||||
#define TTM_PAGE_FLAG_SG (1 << 8)
|
||||
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
|
||||
|
||||
enum ttm_caching_state {
|
||||
tt_uncached,
|
||||
|
@ -556,6 +557,7 @@ struct ttm_bo_global {
|
|||
* @dev_mapping: A pointer to the struct address_space representing the
|
||||
* device address space.
|
||||
* @wq: Work queue structure for the delayed delete workqueue.
|
||||
* @no_retry: Don't retry allocation if it fails
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -592,6 +594,8 @@ struct ttm_bo_device {
|
|||
struct delayed_work wq;
|
||||
|
||||
bool need_dma32;
|
||||
|
||||
bool no_retry;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue