drm/ttm, drm/vmwgfx: move cpu_writers handling into vmwgfx

This feature is only used by vmwgfx and superfluous for everybody else.

Signed-off-by: Christian König <christian.koenig@amd.com>
Co-developed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Tested-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/333650/
This commit is contained in:
Christian König 2019-10-01 10:02:58 +02:00
parent c6a5f8daff
commit 7fb03cc3e0
7 changed files with 19 additions and 71 deletions

View File

@ -153,7 +153,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(kref_read(&bo->list_kref)); BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref)); BUG_ON(kref_read(&bo->kref));
BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL); BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy)); BUG_ON(!list_empty(&bo->ddestroy));
@ -1315,7 +1314,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref); kref_init(&bo->kref);
kref_init(&bo->list_kref); kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->swap);
@ -1827,31 +1825,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_bo_wait); EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
int ret = 0;
/*
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
ret = ttm_bo_reserve(bo, true, no_wait, NULL);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_wait(bo, true, no_wait);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/** /**
* A buffer object shrink method that tries to swap out the first * A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list. * buffer object on the bo_global::swap_lru list.

View File

@ -511,7 +511,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
mutex_init(&fbo->base.wu_mutex); mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL; fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node); drm_vma_node_reset(&fbo->base.base.vma_node);
atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref); kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref); kref_init(&fbo->base.kref);

View File

@ -113,12 +113,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { if (ret == -EALREADY && dups) {
dma_resv_unlock(bo->base.resv);
ret = -EBUSY;
} else if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry; struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head); entry = list_prev_entry(entry, head);
list_del(&safe->head); list_del(&safe->head);

View File

@ -566,7 +566,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) { switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE: case TTM_REF_SYNCCPU_WRITE:
ttm_bo_synccpu_write_release(&user_bo->vbo.base); atomic_dec(&user_bo->vbo.cpu_writers);
break; break;
default: default:
WARN_ONCE(true, "Undefined buffer object reference release.\n"); WARN_ONCE(true, "Undefined buffer object reference release.\n");
@ -682,12 +682,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t flags) uint32_t flags)
{ {
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base; struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed; bool existed;
int ret; int ret;
if (flags & drm_vmw_synccpu_allow_cs) { if (flags & drm_vmw_synccpu_allow_cs) {
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret; long lret;
lret = dma_resv_wait_timeout_rcu lret = dma_resv_wait_timeout_rcu
@ -700,15 +700,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0; return 0;
} }
ret = ttm_bo_synccpu_write_grab ret = ttm_bo_reserve(bo, true, nonblock, NULL);
(bo, !!(flags & drm_vmw_synccpu_dontblock)); if (unlikely(ret != 0))
return ret;
ret = ttm_bo_wait(bo, true, nonblock);
if (likely(ret == 0))
atomic_inc(&user_bo->vbo.cpu_writers);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base, ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false); TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed) if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->vbo.base); atomic_dec(&user_bo->vbo.cpu_writers);
return ret; return ret;
} }

View File

@ -102,6 +102,8 @@ struct vmw_fpriv {
* @base: The TTM buffer object * @base: The TTM buffer object
* @res_list: List of resources using this buffer object as a backing MOB * @res_list: List of resources using this buffer object as a backing MOB
* @pin_count: pin depth * @pin_count: pin depth
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings * @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources * @res_prios: Eviction priority counts for attached resources
@ -110,6 +112,7 @@ struct vmw_buffer_object {
struct ttm_buffer_object base; struct ttm_buffer_object base;
struct list_head res_list; struct list_head res_list;
s32 pin_count; s32 pin_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */ /* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
/* Protected by reservation */ /* Protected by reservation */

View File

@ -521,6 +521,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
}; };
int ret; int ret;
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
if (vbo->pin_count > 0) if (vbo->pin_count > 0)
return 0; return 0;

View File

@ -147,7 +147,6 @@ struct ttm_tt;
* holds a pointer to a persistent shmem object. * holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages. * @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing. * @evicted: Whether the object was evicted without user-space knowing.
* @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list. * @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list. * @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list. * @swap: List head for swap LRU list.
@ -198,12 +197,6 @@ struct ttm_buffer_object {
struct ttm_tt *ttm; struct ttm_tt *ttm;
bool evicted; bool evicted;
/**
* Members protected by the bo::reserved lock only when written to.
*/
atomic_t cpu_writers;
/** /**
* Members protected by the bdev::lru_lock. * Members protected by the bdev::lru_lock.
*/ */
@ -441,31 +434,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place); const struct ttm_place *place);
/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
* command submission that affects the buffer will return -EBUSY
* until ttm_bo_synccpu_write_release is called.
*
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
*/
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
/**
* ttm_bo_synccpu_write_release:
*
* @bo : The buffer object.
*
* Releases a synccpu lock.
*/
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/** /**
* ttm_bo_acc_size * ttm_bo_acc_size
* *