drm/amdgpu: clean up hw semaphore support in driver
No longer used. Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Ken Wang <Qingqing.Wang@amd.com> Reviewed-by: Monk Liu <monk.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
a8480309df
commit
2f4b940033
|
@ -20,7 +20,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
|
||||
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
|
||||
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
|
||||
atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \
|
||||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
|
||||
|
||||
|
|
|
@ -638,31 +638,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset_p);
|
||||
|
||||
/*
|
||||
* Semaphores.
|
||||
*/
|
||||
struct amdgpu_semaphore {
|
||||
struct amdgpu_sa_bo *sa_bo;
|
||||
signed waiters;
|
||||
uint64_t gpu_addr;
|
||||
};
|
||||
|
||||
int amdgpu_semaphore_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_semaphore **semaphore);
|
||||
bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore);
|
||||
bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore);
|
||||
void amdgpu_semaphore_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_semaphore **semaphore,
|
||||
struct fence *fence);
|
||||
|
||||
/*
|
||||
* Synchronization
|
||||
*/
|
||||
struct amdgpu_sync {
|
||||
struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
|
||||
struct fence *sync_to[AMDGPU_MAX_RINGS];
|
||||
DECLARE_HASHTABLE(fences, 4);
|
||||
struct fence *last_vm_update;
|
||||
|
|
|
@ -81,7 +81,6 @@ int amdgpu_exp_hw_support = 0;
|
|||
int amdgpu_enable_scheduler = 1;
|
||||
int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_enable_semaphores = 0;
|
||||
int amdgpu_powerplay = -1;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
|
@ -162,9 +161,6 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
|
|||
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
|
||||
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
|
||||
module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
|
||||
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
/*
|
||||
* Copyright 2011 Christian König.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Christian König <deathsimple@vodafone.de>
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
int amdgpu_semaphore_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_semaphore **semaphore)
|
||||
{
|
||||
int r;
|
||||
|
||||
*semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL);
|
||||
if (*semaphore == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
|
||||
&(*semaphore)->sa_bo, 8, 8);
|
||||
if (r) {
|
||||
kfree(*semaphore);
|
||||
*semaphore = NULL;
|
||||
return r;
|
||||
}
|
||||
(*semaphore)->waiters = 0;
|
||||
(*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo);
|
||||
|
||||
*((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore)
|
||||
{
|
||||
trace_amdgpu_semaphore_signale(ring->idx, semaphore);
|
||||
|
||||
if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) {
|
||||
--semaphore->waiters;
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
ring->last_semaphore_signal_addr = semaphore->gpu_addr;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore)
|
||||
{
|
||||
trace_amdgpu_semaphore_wait(ring->idx, semaphore);
|
||||
|
||||
if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) {
|
||||
++semaphore->waiters;
|
||||
|
||||
/* for debugging lockup only, used by sysfs debug files */
|
||||
ring->last_semaphore_wait_addr = semaphore->gpu_addr;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_semaphore_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_semaphore **semaphore,
|
||||
struct fence *fence)
|
||||
{
|
||||
if (semaphore == NULL || *semaphore == NULL) {
|
||||
return;
|
||||
}
|
||||
if ((*semaphore)->waiters > 0) {
|
||||
dev_err(adev->dev, "semaphore %p has more waiters than signalers,"
|
||||
" hardware lockup imminent!\n", *semaphore);
|
||||
}
|
||||
amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence);
|
||||
kfree(*semaphore);
|
||||
*semaphore = NULL;
|
||||
}
|
|
@ -48,9 +48,6 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
|
|||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
|
||||
sync->semaphores[i] = NULL;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
sync->sync_to[i] = NULL;
|
||||
|
||||
|
@ -153,13 +150,13 @@ static void *amdgpu_sync_get_owner(struct fence *f)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_resv - use the semaphores to sync to a reservation object
|
||||
* amdgpu_sync_resv - sync to a reservation object
|
||||
*
|
||||
* @sync: sync object to add fences from reservation object to
|
||||
* @resv: reservation object with embedded fence
|
||||
* @shared: true if we should only sync to the exclusive fence
|
||||
*
|
||||
* Sync to the fence using the semaphore objects
|
||||
* Sync to the fence
|
||||
*/
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
|
@ -250,9 +247,6 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
|
|||
kfree(e);
|
||||
}
|
||||
|
||||
if (amdgpu_enable_semaphores)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct fence *fence = sync->sync_to[i];
|
||||
if (!fence)
|
||||
|
@ -279,12 +273,10 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned count = 0;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *other = adev->rings[i];
|
||||
struct amdgpu_semaphore *semaphore;
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
if (!sync->sync_to[i])
|
||||
|
@ -292,64 +284,19 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
|
||||
fence = to_amdgpu_fence(sync->sync_to[i]);
|
||||
|
||||
/* check if we really need to sync */
|
||||
if (!amdgpu_enable_scheduler &&
|
||||
!amdgpu_fence_need_sync(fence, ring))
|
||||
continue;
|
||||
|
||||
/* prevent GPU deadlocks */
|
||||
if (!other->ready) {
|
||||
dev_err(adev->dev, "Syncing to a disabled ring!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
|
||||
if (amdgpu_enable_scheduler) {
|
||||
r = fence_wait(sync->sync_to[i], true);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (count >= AMDGPU_NUM_SYNCS) {
|
||||
/* not enough room, wait manually */
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
r = amdgpu_semaphore_create(adev, &semaphore);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
sync->semaphores[count++] = semaphore;
|
||||
|
||||
/* allocate enough space for sync command */
|
||||
r = amdgpu_ring_alloc(other, 16);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* emit the signal semaphore */
|
||||
if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
|
||||
/* signaling wasn't successful wait manually */
|
||||
amdgpu_ring_undo(other);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* we assume caller has already allocated space on waiters ring */
|
||||
if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
|
||||
/* waiting wasn't successful wait manually */
|
||||
amdgpu_ring_undo(other);
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r)
|
||||
return r;
|
||||
continue;
|
||||
}
|
||||
|
||||
amdgpu_ring_commit(other);
|
||||
amdgpu_fence_note_sync(fence, ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -362,7 +309,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
* @sync: sync object to use
|
||||
* @fence: fence to use for the free
|
||||
*
|
||||
* Free the sync object by freeing all semaphores in it.
|
||||
* Free the sync object.
|
||||
*/
|
||||
void amdgpu_sync_free(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
|
@ -378,9 +325,6 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
|
|||
kfree(e);
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
|
||||
amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
fence_put(sync->sync_to[i]);
|
||||
|
||||
|
|
|
@ -238,144 +238,10 @@ void amdgpu_test_moves(struct amdgpu_device *adev)
|
|||
amdgpu_do_test_moves(adev);
|
||||
}
|
||||
|
||||
static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring,
|
||||
struct fence **fence)
|
||||
{
|
||||
uint32_t handle = ring->idx ^ 0xdeafbeef;
|
||||
int r;
|
||||
|
||||
if (ring == &adev->uvd.ring) {
|
||||
r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy create msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy destroy msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
} else if (ring == &adev->vce.ring[0] ||
|
||||
ring == &adev->vce.ring[1]) {
|
||||
r = amdgpu_vce_get_create_msg(ring, handle, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy create msg\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get dummy destroy msg\n");
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
struct amdgpu_fence *a_fence = NULL;
|
||||
r = amdgpu_ring_lock(ring, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
|
||||
return r;
|
||||
}
|
||||
amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
|
||||
amdgpu_ring_unlock_commit(ring);
|
||||
*fence = &a_fence->base;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_test_ring_sync(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ringA,
|
||||
struct amdgpu_ring *ringB)
|
||||
{
|
||||
struct fence *fence1 = NULL, *fence2 = NULL;
|
||||
struct amdgpu_semaphore *semaphore = NULL;
|
||||
int r;
|
||||
|
||||
r = amdgpu_semaphore_create(adev, &semaphore);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create semaphore\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringA, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringA);
|
||||
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
r = amdgpu_ring_lock(ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringA, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringA);
|
||||
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
if (fence_is_signaled(fence1)) {
|
||||
DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringB);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringB, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringB);
|
||||
|
||||
r = fence_wait(fence1, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence 1\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
if (fence_is_signaled(fence2)) {
|
||||
DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringB);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringB, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringB);
|
||||
|
||||
r = fence_wait(fence2, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence 1\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
out_cleanup:
|
||||
amdgpu_semaphore_free(adev, &semaphore, NULL);
|
||||
|
||||
if (fence1)
|
||||
fence_put(fence1);
|
||||
|
||||
if (fence2)
|
||||
fence_put(fence2);
|
||||
|
||||
if (r)
|
||||
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
|
||||
}
|
||||
|
||||
static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
|
||||
|
@ -383,109 +249,6 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
|
|||
struct amdgpu_ring *ringB,
|
||||
struct amdgpu_ring *ringC)
|
||||
{
|
||||
struct fence *fenceA = NULL, *fenceB = NULL;
|
||||
struct amdgpu_semaphore *semaphore = NULL;
|
||||
bool sigA, sigB;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_semaphore_create(adev, &semaphore);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create semaphore\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringA, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringA, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringA);
|
||||
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
r = amdgpu_ring_lock(ringB, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_wait(ringB, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringB);
|
||||
r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
|
||||
if (r)
|
||||
goto out_cleanup;
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
if (fence_is_signaled(fenceA)) {
|
||||
DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
if (fence_is_signaled(fenceB)) {
|
||||
DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_lock(ringC, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringC);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringC, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringC);
|
||||
|
||||
for (i = 0; i < 30; ++i) {
|
||||
mdelay(100);
|
||||
sigA = fence_is_signaled(fenceA);
|
||||
sigB = fence_is_signaled(fenceB);
|
||||
if (sigA || sigB)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!sigA && !sigB) {
|
||||
DRM_ERROR("Neither fence A nor B has been signaled\n");
|
||||
goto out_cleanup;
|
||||
} else if (sigA && sigB) {
|
||||
DRM_ERROR("Both fence A and B has been signaled\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
|
||||
|
||||
r = amdgpu_ring_lock(ringC, 64);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to lock ring B %p\n", ringC);
|
||||
goto out_cleanup;
|
||||
}
|
||||
amdgpu_semaphore_emit_signal(ringC, semaphore);
|
||||
amdgpu_ring_unlock_commit(ringC);
|
||||
|
||||
mdelay(1000);
|
||||
|
||||
r = fence_wait(fenceA, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence A\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = fence_wait(fenceB, false);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to wait for sync fence B\n");
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
out_cleanup:
|
||||
amdgpu_semaphore_free(adev, &semaphore, NULL);
|
||||
|
||||
if (fenceA)
|
||||
fence_put(fenceA);
|
||||
|
||||
if (fenceB)
|
||||
fence_put(fenceB);
|
||||
|
||||
if (r)
|
||||
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
|
||||
}
|
||||
|
||||
static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
|
||||
|
|
|
@ -247,42 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
|
|||
TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, ring)
|
||||
__field(signed, waiters)
|
||||
__field(uint64_t, gpu_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ring = ring;
|
||||
__entry->waiters = sem->waiters;
|
||||
__entry->gpu_addr = sem->gpu_addr;
|
||||
),
|
||||
|
||||
TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
|
||||
__entry->waiters, __entry->gpu_addr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait,
|
||||
|
||||
TP_PROTO(int ring, struct amdgpu_semaphore *sem),
|
||||
|
||||
TP_ARGS(ring, sem)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -742,30 +742,6 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_emit_semaphore - emit a semaphore command
|
||||
*
|
||||
* @ring: engine to use
|
||||
* @semaphore: address of semaphore
|
||||
* @emit_wait: true=emit wait, false=emit signal
|
||||
*
|
||||
*/
|
||||
bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
|
||||
amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
|
||||
if (!emit_wait)
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_emit_ib - execute indirect buffer
|
||||
*
|
||||
|
|
|
@ -34,9 +34,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
struct fence **fence);
|
||||
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
|
||||
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
|
||||
bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait);
|
||||
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
unsigned flags);
|
||||
|
|
|
@ -294,30 +294,6 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
|
|||
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @semaphore: amdgpu semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (CIK).
|
||||
*/
|
||||
static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
|
||||
|
||||
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffff8);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
|
@ -1297,7 +1273,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = cik_sdma_ring_emit_ib,
|
||||
.emit_fence = cik_sdma_ring_emit_fence,
|
||||
.emit_semaphore = cik_sdma_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
|
||||
.test_ring = cik_sdma_ring_test_ring,
|
||||
|
|
|
@ -2516,36 +2516,6 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, upper_32_bits(seq));
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring
|
||||
*
|
||||
* @ring: amdgpu ring buffer object
|
||||
* @semaphore: amdgpu semaphore object
|
||||
* @emit_wait: Is this a sempahore wait?
|
||||
*
|
||||
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
|
||||
* from running ahead of semaphore waits.
|
||||
*/
|
||||
static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
|
||||
amdgpu_ring_write(ring, addr & 0xffffffff);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
|
||||
|
||||
if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
|
||||
/* Prevent the PFP from running ahead of the semaphore wait */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||
amdgpu_ring_write(ring, 0x0);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* IB stuff
|
||||
*/
|
||||
|
@ -5567,7 +5537,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
|
||||
|
@ -5583,7 +5553,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
|
||||
|
|
|
@ -4762,44 +4762,6 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring
|
||||
*
|
||||
* @ring: amdgpu ring buffer object
|
||||
* @semaphore: amdgpu semaphore object
|
||||
* @emit_wait: Is this a sempahore wait?
|
||||
*
|
||||
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
|
||||
* from running ahead of semaphore waits.
|
||||
*/
|
||||
static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
|
||||
|
||||
if (ring->adev->asic_type == CHIP_TOPAZ ||
|
||||
ring->adev->asic_type == CHIP_TONGA ||
|
||||
ring->adev->asic_type == CHIP_FIJI)
|
||||
/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
|
||||
return false;
|
||||
else {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, sel);
|
||||
}
|
||||
|
||||
if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
|
||||
/* Prevent the PFP from running ahead of the semaphore wait */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||
amdgpu_ring_write(ring, 0x0);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
|
@ -5145,7 +5107,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
|
||||
|
@ -5161,7 +5123,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
|
||||
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
|
||||
|
|
|
@ -334,31 +334,6 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @semaphore: amdgpu semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (VI).
|
||||
*/
|
||||
static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 sig = emit_wait ? 0 : 1;
|
||||
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
|
||||
SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
|
@ -1302,7 +1277,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = sdma_v2_4_ring_emit_ib,
|
||||
.emit_fence = sdma_v2_4_ring_emit_fence,
|
||||
.emit_semaphore = sdma_v2_4_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v2_4_ring_test_ring,
|
||||
|
|
|
@ -444,32 +444,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
|||
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @semaphore: amdgpu semaphore object
|
||||
* @emit_wait: wait or signal semaphore
|
||||
*
|
||||
* Add a DMA semaphore packet to the ring wait on or signal
|
||||
* other rings (VI).
|
||||
*/
|
||||
static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
u64 addr = semaphore->gpu_addr;
|
||||
u32 sig = emit_wait ? 0 : 1;
|
||||
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
|
||||
SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
|
@ -1570,7 +1544,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
|||
.parse_cs = NULL,
|
||||
.emit_ib = sdma_v3_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v3_0_ring_emit_fence,
|
||||
.emit_semaphore = sdma_v3_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v3_0_ring_test_ring,
|
||||
|
|
|
@ -438,33 +438,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
|
|||
amdgpu_ring_write(ring, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v4_2_ring_emit_semaphore - emit semaphore command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @semaphore: semaphore to emit commands for
|
||||
* @emit_wait: true if we should emit a wait command
|
||||
*
|
||||
* Emit a semaphore command (either wait or signal) to the UVD ring.
|
||||
*/
|
||||
static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
|
||||
amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
|
||||
amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
|
||||
amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v4_2_ring_test_ring - register write test
|
||||
*
|
||||
|
@ -882,7 +855,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
|||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_ib = uvd_v4_2_ring_emit_ib,
|
||||
.emit_fence = uvd_v4_2_ring_emit_fence,
|
||||
.emit_semaphore = uvd_v4_2_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.test_ring = uvd_v4_2_ring_test_ring,
|
||||
.test_ib = uvd_v4_2_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
|
@ -482,33 +482,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
|
|||
amdgpu_ring_write(ring, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v5_0_ring_emit_semaphore - emit semaphore command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @semaphore: semaphore to emit commands for
|
||||
* @emit_wait: true if we should emit a wait command
|
||||
*
|
||||
* Emit a semaphore command (either wait or signal) to the UVD ring.
|
||||
*/
|
||||
static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
|
||||
amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
|
||||
amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
|
||||
amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v5_0_ring_test_ring - register write test
|
||||
*
|
||||
|
@ -821,7 +794,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
|||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_ib = uvd_v5_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v5_0_ring_emit_fence,
|
||||
.emit_semaphore = uvd_v5_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.test_ring = uvd_v5_0_ring_test_ring,
|
||||
.test_ib = uvd_v5_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
|
@ -721,33 +721,6 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
|
|||
amdgpu_ring_write(ring, 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_emit_semaphore - emit semaphore command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @semaphore: semaphore to emit commands for
|
||||
* @emit_wait: true if we should emit a wait command
|
||||
*
|
||||
* Emit a semaphore command (either wait or signal) to the UVD ring.
|
||||
*/
|
||||
static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
struct amdgpu_semaphore *semaphore,
|
||||
bool emit_wait)
|
||||
{
|
||||
uint64_t addr = semaphore->gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
|
||||
amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
|
||||
amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
|
||||
amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v6_0_ring_test_ring - register write test
|
||||
*
|
||||
|
@ -1062,7 +1035,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
|
|||
.parse_cs = amdgpu_uvd_ring_parse_cs,
|
||||
.emit_ib = uvd_v6_0_ring_emit_ib,
|
||||
.emit_fence = uvd_v6_0_ring_emit_fence,
|
||||
.emit_semaphore = uvd_v6_0_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.test_ring = uvd_v6_0_ring_test_ring,
|
||||
.test_ib = uvd_v6_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
|
@ -639,7 +639,7 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
|
|||
.parse_cs = amdgpu_vce_ring_parse_cs,
|
||||
.emit_ib = amdgpu_vce_ring_emit_ib,
|
||||
.emit_fence = amdgpu_vce_ring_emit_fence,
|
||||
.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
.test_ib = amdgpu_vce_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
|
@ -759,7 +759,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
|
|||
.parse_cs = amdgpu_vce_ring_parse_cs,
|
||||
.emit_ib = amdgpu_vce_ring_emit_ib,
|
||||
.emit_fence = amdgpu_vce_ring_emit_fence,
|
||||
.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
|
||||
.emit_semaphore = NULL,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
.test_ib = amdgpu_vce_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
|
Loading…
Reference in New Issue