drm/ttm/tt: add wrappers to set tt state.
This adds 2 getters and 4 setters, however unbound and populated are currently the same thing, this will change, it also drops a BUG_ON that seems not that useful. Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200915024007.67163-2-airlied@gmail.com
This commit is contained in:
parent
9c3006a4cc
commit
7eec915138
|
@ -1304,7 +1304,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ttm->page_flags |= TTM_PAGE_FLAG_SG;
|
ttm->page_flags |= TTM_PAGE_FLAG_SG;
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1324,7 +1324,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||||
gtt->ttm.dma_address,
|
gtt->ttm.dma_address,
|
||||||
ttm->num_pages);
|
ttm->num_pages);
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1298,14 +1298,14 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||||
|
|
||||||
if (ttm->state != tt_unpopulated)
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (slave && ttm->sg) {
|
if (slave && ttm->sg) {
|
||||||
/* make userspace faulting work */
|
/* make userspace faulting work */
|
||||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||||
ttm_dma->dma_address, ttm->num_pages);
|
ttm_dma->dma_address, ttm->num_pages);
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
|
||||||
struct ttm_operation_ctx ctx = { false, false };
|
struct ttm_operation_ctx ctx = { false, false };
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
|
if (!bo->tbo.ttm || !ttm_tt_is_bound(bo->tbo.ttm))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (!mmu_notifier_range_blockable(range))
|
if (!mmu_notifier_range_blockable(range))
|
||||||
|
|
|
@ -611,14 +611,14 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ttm->page_flags |= TTM_PAGE_FLAG_SG;
|
ttm->page_flags |= TTM_PAGE_FLAG_SG;
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (slave && ttm->sg) {
|
if (slave && ttm->sg) {
|
||||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||||
gtt->ttm.dma_address, ttm->num_pages);
|
gtt->ttm.dma_address, ttm->num_pages);
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -249,7 +249,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||||
* Don't move nonexistent data. Clear destination instead.
|
* Don't move nonexistent data. Clear destination instead.
|
||||||
*/
|
*/
|
||||||
if (old_iomap == NULL &&
|
if (old_iomap == NULL &&
|
||||||
(ttm == NULL || (ttm->state == tt_unpopulated &&
|
(ttm == NULL || (!ttm_tt_is_populated(ttm) &&
|
||||||
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
|
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
|
||||||
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
|
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
|
||||||
goto out2;
|
goto out2;
|
||||||
|
|
|
@ -1044,7 +1044,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
|
||||||
put_pages:
|
put_pages:
|
||||||
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
|
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
|
||||||
ttm->caching_state);
|
ttm->caching_state);
|
||||||
ttm->state = tt_unpopulated;
|
ttm_tt_set_unpopulated(ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||||
|
@ -1053,7 +1053,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ttm->state != tt_unpopulated)
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
|
if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
|
||||||
|
@ -1083,7 +1083,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_pool_populate);
|
EXPORT_SYMBOL(ttm_pool_populate);
|
||||||
|
|
|
@ -894,7 +894,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ttm->state != tt_unpopulated)
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
|
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
|
||||||
|
@ -982,7 +982,7 @@ skip_huge:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_populated(ttm);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ttm_dma_populate);
|
EXPORT_SYMBOL_GPL(ttm_dma_populate);
|
||||||
|
@ -1076,7 +1076,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||||
/* shrink pool if necessary (only on !is_cached pools)*/
|
/* shrink pool if necessary (only on !is_cached pools)*/
|
||||||
if (npages)
|
if (npages)
|
||||||
ttm_dma_page_pool_free(pool, npages, false);
|
ttm_dma_page_pool_free(pool, npages, false);
|
||||||
ttm->state = tt_unpopulated;
|
ttm_tt_set_unpopulated(ttm);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
||||||
|
|
||||||
|
|
|
@ -156,7 +156,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
|
||||||
if (ttm->caching_state == c_state)
|
if (ttm->caching_state == c_state)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ttm->state == tt_unpopulated) {
|
if (!ttm_tt_is_populated(ttm)) {
|
||||||
/* Change caching but don't populate */
|
/* Change caching but don't populate */
|
||||||
ttm->caching_state = c_state;
|
ttm->caching_state = c_state;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -214,8 +214,7 @@ void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||||
|
|
||||||
ttm_tt_unbind(bdev, ttm);
|
ttm_tt_unbind(bdev, ttm);
|
||||||
|
|
||||||
if (ttm->state == tt_unbound)
|
ttm_tt_unpopulate(bdev, ttm);
|
||||||
ttm_tt_unpopulate(bdev, ttm);
|
|
||||||
|
|
||||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
||||||
ttm->swap_storage)
|
ttm->swap_storage)
|
||||||
|
@ -232,7 +231,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
|
||||||
ttm->num_pages = bo->num_pages;
|
ttm->num_pages = bo->num_pages;
|
||||||
ttm->caching_state = tt_cached;
|
ttm->caching_state = tt_cached;
|
||||||
ttm->page_flags = page_flags;
|
ttm->page_flags = page_flags;
|
||||||
ttm->state = tt_unpopulated;
|
ttm_tt_set_unpopulated(ttm);
|
||||||
ttm->swap_storage = NULL;
|
ttm->swap_storage = NULL;
|
||||||
ttm->sg = bo->sg;
|
ttm->sg = bo->sg;
|
||||||
}
|
}
|
||||||
|
@ -309,9 +308,9 @@ EXPORT_SYMBOL(ttm_dma_tt_fini);
|
||||||
|
|
||||||
void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
if (ttm->state == tt_bound) {
|
if (ttm_tt_is_bound(ttm)) {
|
||||||
bdev->driver->ttm_tt_unbind(bdev, ttm);
|
bdev->driver->ttm_tt_unbind(bdev, ttm);
|
||||||
ttm->state = tt_unbound;
|
ttm_tt_set_unbound(ttm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,7 +323,7 @@ int ttm_tt_bind(struct ttm_bo_device *bdev,
|
||||||
if (!ttm)
|
if (!ttm)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ttm->state == tt_bound)
|
if (ttm_tt_is_bound(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = ttm_tt_populate(bdev, ttm, ctx);
|
ret = ttm_tt_populate(bdev, ttm, ctx);
|
||||||
|
@ -335,7 +334,7 @@ int ttm_tt_bind(struct ttm_bo_device *bdev,
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ttm->state = tt_bound;
|
ttm_tt_set_bound(ttm);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -393,7 +392,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev,
|
||||||
int i;
|
int i;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
|
||||||
BUG_ON(ttm->caching_state != tt_cached);
|
BUG_ON(ttm->caching_state != tt_cached);
|
||||||
|
|
||||||
if (!persistent_swap_storage) {
|
if (!persistent_swap_storage) {
|
||||||
|
@ -460,7 +458,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ttm->state != tt_unpopulated)
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (bdev->driver->ttm_tt_populate)
|
if (bdev->driver->ttm_tt_populate)
|
||||||
|
@ -489,7 +487,7 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
|
||||||
void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||||
struct ttm_tt *ttm)
|
struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
if (ttm->state == tt_unpopulated)
|
if (!ttm_tt_is_populated(ttm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ttm_tt_clear_mapping(ttm);
|
ttm_tt_clear_mapping(ttm);
|
||||||
|
|
|
@ -464,13 +464,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
||||||
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
|
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
|
||||||
dma_resv_assert_held(src->base.resv);
|
dma_resv_assert_held(src->base.resv);
|
||||||
|
|
||||||
if (dst->ttm->state == tt_unpopulated) {
|
if (!ttm_tt_is_populated(dst->ttm)) {
|
||||||
ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
|
ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (src->ttm->state == tt_unpopulated) {
|
if (!ttm_tt_is_populated(src->ttm)) {
|
||||||
ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx);
|
ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -644,7 +644,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev,
|
||||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ttm->state != tt_unpopulated)
|
if (ttm_tt_is_populated(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
||||||
|
|
|
@ -74,9 +74,39 @@ struct ttm_tt {
|
||||||
tt_bound,
|
tt_bound,
|
||||||
tt_unbound,
|
tt_unbound,
|
||||||
tt_unpopulated,
|
tt_unpopulated,
|
||||||
} state;
|
} _state;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
|
||||||
|
{
|
||||||
|
return tt->_state != tt_unpopulated;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool ttm_tt_is_bound(struct ttm_tt *tt)
|
||||||
|
{
|
||||||
|
return tt->_state == tt_bound;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt)
|
||||||
|
{
|
||||||
|
tt->_state = tt_unpopulated;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ttm_tt_set_populated(struct ttm_tt *tt)
|
||||||
|
{
|
||||||
|
tt->_state = tt_unbound;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ttm_tt_set_unbound(struct ttm_tt *tt)
|
||||||
|
{
|
||||||
|
tt->_state = tt_unbound;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ttm_tt_set_bound(struct ttm_tt *tt)
|
||||||
|
{
|
||||||
|
tt->_state = tt_bound;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ttm_dma_tt
|
* struct ttm_dma_tt
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in New Issue