drm/i915: drop bo->moving dependency
That should now be handled by the common dma_resv framework. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-13-christian.koenig@amd.com
This commit is contained in:
parent
46b35b33cc
commit
1d7f5e6c52
|
@ -741,30 +741,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
|
|||
/**
|
||||
* i915_gem_object_get_moving_fence - Get the object's moving fence if any
|
||||
* @obj: The object whose moving fence to get.
|
||||
* @fence: The resulting fence
|
||||
*
|
||||
* A non-signaled moving fence means that there is an async operation
|
||||
* pending on the object that needs to be waited on before setting up
|
||||
* any GPU- or CPU PTEs to the object's pages.
|
||||
*
|
||||
* Return: A refcounted pointer to the object's moving fence if any,
|
||||
* NULL otherwise.
|
||||
* Return: Negative error code or 0 for success.
|
||||
*/
|
||||
struct dma_fence *
|
||||
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
|
||||
int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
return dma_fence_get(i915_gem_to_ttm(obj)->moving);
|
||||
}
|
||||
|
||||
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
|
||||
|
||||
if (*moving == fence)
|
||||
return;
|
||||
|
||||
dma_fence_put(*moving);
|
||||
*moving = dma_fence_get(fence);
|
||||
return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
|
||||
fence);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -782,23 +771,9 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
|||
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
|
||||
bool intr)
|
||||
{
|
||||
struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
|
||||
int ret;
|
||||
|
||||
assert_object_held(obj);
|
||||
if (!fence)
|
||||
return 0;
|
||||
|
||||
ret = dma_fence_wait(fence, intr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (fence->error)
|
||||
return fence->error;
|
||||
|
||||
i915_gem_to_ttm(obj)->moving = NULL;
|
||||
dma_fence_put(fence);
|
||||
return 0;
|
||||
return dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
|
||||
intr, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
|
|
|
@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
|
|||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
struct dma_fence *
|
||||
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
|
||||
|
||||
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence *fence);
|
||||
|
||||
int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence **fence);
|
||||
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
|
||||
bool intr);
|
||||
|
||||
|
|
|
@ -467,19 +467,6 @@ out:
|
|||
return fence;
|
||||
}
|
||||
|
||||
static int
|
||||
prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
||||
struct i915_deps *deps)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = i915_deps_add_dependency(deps, bo->moving, ctx);
|
||||
if (!ret)
|
||||
ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_ttm_move - The TTM move callback used by i915.
|
||||
* @bo: The buffer object.
|
||||
|
@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
|
|||
struct i915_deps deps;
|
||||
|
||||
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
|
||||
ret = prev_deps(bo, ctx, &deps);
|
||||
ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
|
||||
if (ret) {
|
||||
i915_refct_sgt_put(dst_rsgt);
|
||||
return ret;
|
||||
|
|
|
@ -219,8 +219,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
|
|||
err = dma_resv_reserve_fences(obj->base.resv, 1);
|
||||
if (!err)
|
||||
dma_resv_add_fence(obj->base.resv, &rq->fence,
|
||||
DMA_RESV_USAGE_WRITE);
|
||||
i915_gem_object_set_moving_fence(obj, &rq->fence);
|
||||
DMA_RESV_USAGE_KERNEL);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
if (err)
|
||||
|
|
|
@ -1221,8 +1221,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
|
|||
i915_gem_object_unpin_pages(obj);
|
||||
if (rq) {
|
||||
dma_resv_add_fence(obj->base.resv, &rq->fence,
|
||||
DMA_RESV_USAGE_WRITE);
|
||||
i915_gem_object_set_moving_fence(obj, &rq->fence);
|
||||
DMA_RESV_USAGE_KERNEL);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
i915_gem_object_unlock(obj);
|
||||
|
|
|
@ -1357,10 +1357,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (vma->obj) {
|
||||
err = i915_gem_object_get_moving_fence(vma->obj, &moving);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
moving = NULL;
|
||||
}
|
||||
|
||||
if (flags & PIN_GLOBAL)
|
||||
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
|
||||
|
||||
moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
|
||||
if (flags & vma->vm->bind_async_flags || moving) {
|
||||
/* lock VM */
|
||||
err = i915_vm_lock_objects(vma->vm, ww);
|
||||
|
|
Loading…
Reference in New Issue