drm/radeon: handle more than 10 UVD sessions
Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Arindam Nath <arindam.nath@amd.com> Reviewed-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
7050c6ef5f
commit
8b2cf4f575
|
@ -2071,6 +2071,7 @@
|
|||
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
|
||||
|
||||
#define UVD_LMI_EXT40_ADDR 0xf498
|
||||
#define UVD_GP_SCRATCH4 0xf4e0
|
||||
#define UVD_LMI_ADDR_EXT 0xf594
|
||||
#define UVD_VCPU_CACHE_OFFSET0 0xf608
|
||||
#define UVD_VCPU_CACHE_SIZE0 0xf60c
|
||||
|
|
|
@ -1674,15 +1674,18 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
|
|||
/*
|
||||
* UVD
|
||||
*/
|
||||
#define RADEON_MAX_UVD_HANDLES 10
|
||||
#define RADEON_UVD_STACK_SIZE (1024*1024)
|
||||
#define RADEON_UVD_HEAP_SIZE (1024*1024)
|
||||
#define RADEON_DEFAULT_UVD_HANDLES 10
|
||||
#define RADEON_MAX_UVD_HANDLES 30
|
||||
#define RADEON_UVD_STACK_SIZE (200*1024)
|
||||
#define RADEON_UVD_HEAP_SIZE (256*1024)
|
||||
#define RADEON_UVD_SESSION_SIZE (50*1024)
|
||||
|
||||
struct radeon_uvd {
|
||||
bool fw_header_present;
|
||||
struct radeon_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
unsigned max_handles;
|
||||
atomic_t handles[RADEON_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
|
||||
unsigned img_size[RADEON_MAX_UVD_HANDLES];
|
||||
|
|
|
@ -135,6 +135,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
|
|||
}
|
||||
|
||||
rdev->uvd.fw_header_present = false;
|
||||
rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
|
||||
if (fw_name) {
|
||||
/* Let's try to load the newer firmware first */
|
||||
r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
|
||||
|
@ -142,11 +143,27 @@ int radeon_uvd_init(struct radeon_device *rdev)
|
|||
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
} else {
|
||||
struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
|
||||
unsigned version_major, version_minor, family_id;
|
||||
|
||||
r = radeon_ucode_validate(rdev->uvd_fw);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->uvd.fw_header_present = true;
|
||||
|
||||
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
|
||||
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
|
||||
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
|
||||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||
version_major, version_minor, family_id);
|
||||
|
||||
/*
|
||||
* Limit the number of UVD handles depending on
|
||||
* microcode major and minor versions.
|
||||
*/
|
||||
if ((version_major >= 0x01) && (version_minor >= 0x37))
|
||||
rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,7 +183,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
|
|||
|
||||
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
|
||||
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
|
||||
RADEON_GPU_PAGE_SIZE;
|
||||
RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
|
||||
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
NULL, &rdev->uvd.vcpu_bo);
|
||||
|
@ -199,7 +216,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
|
|||
|
||||
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
|
||||
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < rdev->uvd.max_handles; ++i) {
|
||||
atomic_set(&rdev->uvd.handles[i], 0);
|
||||
rdev->uvd.filp[i] = NULL;
|
||||
rdev->uvd.img_size[i] = 0;
|
||||
|
@ -236,7 +253,7 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
|
|||
if (rdev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < rdev->uvd.max_handles; ++i) {
|
||||
uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
|
||||
if (handle != 0) {
|
||||
struct radeon_fence *fence;
|
||||
|
@ -311,7 +328,7 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
|
|||
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
|
||||
{
|
||||
int i, r;
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < rdev->uvd.max_handles; ++i) {
|
||||
uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
|
||||
if (handle != 0 && rdev->uvd.filp[i] == filp) {
|
||||
struct radeon_fence *fence;
|
||||
|
@ -496,7 +513,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
|||
return r;
|
||||
|
||||
/* try to alloc a new handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
|
||||
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
|
||||
DRM_ERROR("Handle 0x%x already in use!\n", handle);
|
||||
return -EINVAL;
|
||||
|
@ -522,7 +539,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
|||
return r;
|
||||
|
||||
/* validate the handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
|
||||
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
|
||||
if (p->rdev->uvd.filp[i] != p->filp) {
|
||||
DRM_ERROR("UVD handle collision detected!\n");
|
||||
|
@ -537,7 +554,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
|
|||
|
||||
case 2:
|
||||
/* it's a destroy msg, free the handle */
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
|
||||
for (i = 0; i < p->rdev->uvd.max_handles; ++i)
|
||||
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
|
||||
radeon_bo_kunmap(bo);
|
||||
return 0;
|
||||
|
@ -836,7 +853,7 @@ static void radeon_uvd_count_handles(struct radeon_device *rdev,
|
|||
*sd = 0;
|
||||
*hd = 0;
|
||||
|
||||
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < rdev->uvd.max_handles; ++i) {
|
||||
if (!atomic_read(&rdev->uvd.handles[i]))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -124,12 +124,13 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
|
|||
WREG32(UVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = RADEON_UVD_STACK_SIZE >> 3;
|
||||
size = RADEON_UVD_HEAP_SIZE >> 3;
|
||||
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
|
||||
WREG32(UVD_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = RADEON_UVD_HEAP_SIZE >> 3;
|
||||
size = (RADEON_UVD_STACK_SIZE +
|
||||
(RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
|
||||
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
|
||||
WREG32(UVD_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
|
|
|
@ -116,12 +116,13 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
|
|||
WREG32(UVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = RADEON_UVD_STACK_SIZE >> 3;
|
||||
size = RADEON_UVD_HEAP_SIZE >> 3;
|
||||
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
|
||||
WREG32(UVD_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = RADEON_UVD_HEAP_SIZE >> 3;
|
||||
size = (RADEON_UVD_STACK_SIZE +
|
||||
(RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
|
||||
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
|
||||
WREG32(UVD_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
|
|
|
@ -53,12 +53,13 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
|
|||
WREG32(UVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = RADEON_UVD_STACK_SIZE >> 3;
|
||||
size = RADEON_UVD_HEAP_SIZE >> 3;
|
||||
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
|
||||
WREG32(UVD_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = RADEON_UVD_HEAP_SIZE >> 3;
|
||||
size = (RADEON_UVD_STACK_SIZE +
|
||||
(RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
|
||||
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
|
||||
WREG32(UVD_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
|
@ -70,5 +71,8 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
|
|||
addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
|
||||
WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
|
||||
|
||||
if (rdev->uvd.fw_header_present)
|
||||
WREG32(UVD_GP_SCRATCH4, rdev->uvd.max_handles);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue