drm/ttm: wait for BO idle in ttm_bo_move_memcpy
When we want to pipeline accelerated moves we need to wait in the fallback path. Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
88932a7be2
commit
77dfc28bad
|
@ -433,7 +433,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
memcpy:
|
memcpy:
|
||||||
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
|
r = ttm_bo_move_memcpy(bo, evict, interruptible,
|
||||||
|
no_wait_gpu, new_mem);
|
||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1328,7 +1328,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||||
/* Fallback to software copy. */
|
/* Fallback to software copy. */
|
||||||
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
|
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
|
ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||||
|
|
|
@ -361,7 +361,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
|
||||||
qxl_move_null(bo, new_mem);
|
qxl_move_null(bo, new_mem);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
|
return ttm_bo_move_memcpy(bo, evict, interruptible,
|
||||||
|
no_wait_gpu, new_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
|
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
|
||||||
|
|
|
@ -445,7 +445,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
memcpy:
|
memcpy:
|
||||||
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
|
r = ttm_bo_move_memcpy(bo, evict, interruptible,
|
||||||
|
no_wait_gpu, new_mem);
|
||||||
if (r) {
|
if (r) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -359,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||||
ret = bdev->driver->move(bo, evict, interruptible,
|
ret = bdev->driver->move(bo, evict, interruptible,
|
||||||
no_wait_gpu, mem);
|
no_wait_gpu, mem);
|
||||||
else
|
else
|
||||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
|
ret = ttm_bo_move_memcpy(bo, evict, interruptible,
|
||||||
|
no_wait_gpu, mem);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (bdev->driver->move_notify) {
|
if (bdev->driver->move_notify) {
|
||||||
|
|
|
@ -320,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||||
bool evict, bool no_wait_gpu,
|
bool evict, bool interruptible,
|
||||||
|
bool no_wait_gpu,
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
|
@ -336,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||||
unsigned long add = 0;
|
unsigned long add = 0;
|
||||||
int dir;
|
int dir;
|
||||||
|
|
||||||
|
ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -970,6 +970,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||||
*
|
*
|
||||||
* @bo: A pointer to a struct ttm_buffer_object.
|
* @bo: A pointer to a struct ttm_buffer_object.
|
||||||
* @evict: 1: This is an eviction. Don't try to pipeline.
|
* @evict: 1: This is an eviction. Don't try to pipeline.
|
||||||
|
* @interruptible: Sleep interruptible if waiting.
|
||||||
* @no_wait_gpu: Return immediately if the GPU is busy.
|
* @no_wait_gpu: Return immediately if the GPU is busy.
|
||||||
* @new_mem: struct ttm_mem_reg indicating where to move.
|
* @new_mem: struct ttm_mem_reg indicating where to move.
|
||||||
*
|
*
|
||||||
|
@ -984,7 +985,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||||
bool evict, bool no_wait_gpu,
|
bool evict, bool interruptible,
|
||||||
|
bool no_wait_gpu,
|
||||||
struct ttm_mem_reg *new_mem);
|
struct ttm_mem_reg *new_mem);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue