msm, i915 and amdgpu fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJbdhlUAAoJEAx081l5xIa++08P/jdvasJkTu3eyAczNVW2EPyG JQcpIj774tyN0Dwy9rjkW5KxlNo5cQlchlAQ/LqPnRJp8c3qSe3obwjFzmXkQgxP j//1FB1XkxK/YZbXRjudV7xUov/sMyBnXIwvmWP3NDu5rDrWfLZDznvq6r7vDy/o ImmxZboWqI94oGhrtAuwMpcFjOOuOvJQg9FSHAOMkNhRHs1xl50y5R/WSeAoY1fC R22SZEcGQkQJuq6kHa2Dgysd1uMULLpgQnbw/9rD72PeQXzIIw48xdjJkTBjPu5A ulrCaMd+loaCO3xdtIdpLqbKo4XQwGCm1gShDUWZhgVy21Z3M78u6isEtBkYDbZZ MJECEYzbp8EYkm8QiqSzTTdqvrlH3CjukKhhZeNdpVNxmIvsjZDQGTKYp21mA3S1 I+FVPFH6sykMFxIcpRa87bn4ImrJ2xSDSrWU3HhNQiWpJf+fSaZsKQkUCLdY9rxX WcwvtP5zspL0rWwtkStkKd0BSkBK+S6uZ17xlvUEK17kih2E2TTpJoGnqNE1HNUP 7Kts/UgXrxobSGhRJLxf+b7gJqWwrLmeCfF4ZWRvMpG727k6Dw87mIfkMGy0v/fJ rKp2/RYqPGVF2A++2kp5GFPfIFlHtiCCDNYwoBJKqwCFkm+ow+ehFWwLXPXeBDFH PRfMeYj5freNp2C78TTD =RsR5 -----END PGP SIGNATURE----- Merge tag 'drm-next-2018-08-17' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "First round of fixes for -rc1. I'll follow this up with the msm new hw support pull request. This just has three sets of fixes, some for msm before the new hw, a bunch of AMD fixes (includiing some required firmware changes for new hw), and a set of i915 (+gvt) fixes" * tag 'drm-next-2018-08-17' of git://anongit.freedesktop.org/drm/drm: (30 commits) drm/amdgpu: Use kvmalloc for allocating UVD/VCE/VCN BO backup memory drm/i915: set DP Main Stream Attribute for color range on DDI platforms drm/i915/selftests: Hold rpm for unparking drm/i915: Restore user forcewake domains across suspend drm/i915: Unmask user interrupts writes into HWSP on snb/ivb/vlv/hsw drm/i915/gvt: fix memory leak in intel_vgpu_ioctl() drm/i915/gvt: Off by one in intel_vgpu_write_fence() drm/i915/kvmgt: Fix potential Spectre v1 drm/i915/gvt: return error on cmd access drm/i915/gvt: initialize dmabuf mutex in vgpu_create drm/i915/gvt: fix cleanup sequence in intel_gvt_clean_device drm/amd/display: Guard against null crtc in CRC IRQ drm/amd/display: Pass connector id when executing VBIOS CT drm/amd/display: Check if clock source in use before disabling drm/amd/display: Allow clock sharing b/w HDMI and DVI drm/amd/display: Fix warning observed in mode change on Vega drm/amd/display: fix single link DVI has no display drm/amdgpu/vce: VCE entity initialization relies on ring initializtion drm/amdgpu/uvd: UVD entity initialization relys on ring initialization drm/amdgpu:add VCN booting with firmware loaded by PSP ...
This commit is contained in:
commit
f80a71b0c4
|
@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
|||
msleep(1);
|
||||
}
|
||||
|
||||
if (ucode) {
|
||||
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
|
||||
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
|
|||
AMDGPU_UCODE_ID_SMC,
|
||||
AMDGPU_UCODE_ID_UVD,
|
||||
AMDGPU_UCODE_ID_VCE,
|
||||
AMDGPU_UCODE_ID_VCN,
|
||||
AMDGPU_UCODE_ID_MAXIMUM,
|
||||
};
|
||||
|
||||
|
@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
|
|||
void *kaddr;
|
||||
/* ucode_size_bytes */
|
||||
uint32_t ucode_size;
|
||||
/* starting tmr mc address */
|
||||
uint32_t tmr_mc_addr_lo;
|
||||
uint32_t tmr_mc_addr_hi;
|
||||
};
|
||||
|
||||
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
|
||||
|
|
|
@ -122,8 +122,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
|
|||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
unsigned long bo_size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
|
@ -266,13 +264,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
ring = &adev->uvd.inst[0].ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
adev->uvd.filp[i] = NULL;
|
||||
|
@ -311,7 +302,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
if (adev->uvd.harvest_config & (1 << j))
|
||||
continue;
|
||||
kfree(adev->uvd.inst[j].saved_bo);
|
||||
kvfree(adev->uvd.inst[j].saved_bo);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
|
||||
&adev->uvd.inst[j].gpu_addr,
|
||||
|
@ -327,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_entity_init - init entity
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
int r;
|
||||
|
||||
ring = &adev->uvd.inst[0].ring;
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned size;
|
||||
|
@ -354,7 +368,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
|
||||
ptr = adev->uvd.inst[j].cpu_addr;
|
||||
|
||||
adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
|
||||
adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
|
||||
if (!adev->uvd.inst[j].saved_bo)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -380,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->uvd.inst[i].saved_bo != NULL) {
|
||||
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
|
||||
kfree(adev->uvd.inst[i].saved_bo);
|
||||
kvfree(adev->uvd.inst[i].saved_bo);
|
||||
adev->uvd.inst[i].saved_bo = NULL;
|
||||
} else {
|
||||
const struct common_firmware_header *hdr;
|
||||
|
|
|
@ -69,6 +69,7 @@ struct amdgpu_uvd {
|
|||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
|
|
|
@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
|||
*/
|
||||
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned ucode_version, version_major, version_minor, binary_id;
|
||||
|
@ -188,14 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
|
||||
atomic_set(&adev->vce.handles[i], 0);
|
||||
adev->vce.filp[i] = NULL;
|
||||
|
@ -235,6 +225,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_entity_init - init entity
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct drm_sched_rq *rq;
|
||||
int r;
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_suspend - unpin VCE fw memory
|
||||
*
|
||||
|
|
|
@ -55,6 +55,7 @@ struct amdgpu_vce {
|
|||
|
||||
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
|
||||
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_resume(struct amdgpu_device *adev);
|
||||
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
|
|
|
@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
version_major, version_minor, family_id);
|
||||
}
|
||||
|
||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
|
||||
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
|
||||
+ AMDGPU_VCN_SESSION_SIZE * 40;
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
|
||||
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
|
||||
|
@ -129,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i;
|
||||
|
||||
kfree(adev->vcn.saved_bo);
|
||||
kvfree(adev->vcn.saved_bo);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
|
||||
&adev->vcn.gpu_addr,
|
||||
|
@ -160,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
|
|||
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
|
||||
ptr = adev->vcn.cpu_addr;
|
||||
|
||||
adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||
adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
|
||||
if (!adev->vcn.saved_bo)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -182,18 +183,20 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->vcn.saved_bo != NULL) {
|
||||
memcpy_toio(ptr, adev->vcn.saved_bo, size);
|
||||
kfree(adev->vcn.saved_bo);
|
||||
kvfree(adev->vcn.saved_bo);
|
||||
adev->vcn.saved_bo = NULL;
|
||||
} else {
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned offset;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
|
||||
le32_to_cpu(hdr->ucode_size_bytes));
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
|
||||
le32_to_cpu(hdr->ucode_size_bytes));
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||
}
|
||||
memset_io(ptr, 0, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
|
|||
case AMDGPU_UCODE_ID_VCE:
|
||||
*type = GFX_FW_TYPE_VCE;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_VCN:
|
||||
*type = GFX_FW_TYPE_VCN;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_MAXIMUM:
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -123,6 +123,10 @@ static int uvd_v4_2_sw_init(void *handle)
|
|||
ring = &adev->uvd.inst->ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_uvd_entity_init(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -120,6 +120,10 @@ static int uvd_v5_0_sw_init(void *handle)
|
|||
ring = &adev->uvd.inst->ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_uvd_entity_init(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -440,6 +440,8 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
}
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_entity_init(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -410,6 +410,7 @@ static int uvd_v7_0_early_init(void *handle)
|
|||
static int uvd_v7_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
int i, j, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
|
@ -478,6 +479,10 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
}
|
||||
}
|
||||
|
||||
r = amdgpu_uvd_entity_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -439,6 +439,8 @@ static int vce_v2_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vce_entity_init(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -448,6 +448,8 @@ static int vce_v3_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vce_entity_init(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -419,6 +419,7 @@ static int vce_v4_0_sw_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
unsigned size;
|
||||
int r, i;
|
||||
|
||||
|
@ -438,7 +439,7 @@ static int vce_v4_0_sw_init(void *handle)
|
|||
const struct common_firmware_header *hdr;
|
||||
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
|
||||
|
||||
adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||
adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
|
||||
if (!adev->vce.saved_bo)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -474,6 +475,11 @@ static int vce_v4_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
|
||||
r = amdgpu_vce_entity_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_virt_alloc_mm_table(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -490,7 +496,7 @@ static int vce_v4_0_sw_fini(void *handle)
|
|||
amdgpu_virt_free_mm_table(adev);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
kfree(adev->vce.saved_bo);
|
||||
kvfree(adev->vce.saved_bo);
|
||||
adev->vce.saved_bo = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -100,6 +100,16 @@ static int vcn_v1_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
const struct common_firmware_header *hdr;
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
|
||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||
DRM_INFO("PSP loading VCN firmware\n");
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -265,26 +275,38 @@ static int vcn_v1_0_resume(void *handle)
|
|||
static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
|
||||
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
|
||||
offset = 0;
|
||||
} else {
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
|
||||
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||
offset = size;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
|
||||
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||
}
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr + size));
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr + size));
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
|
||||
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
|
||||
|
|
|
@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
|
|||
*/
|
||||
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
|
||||
{
|
||||
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
|
||||
struct dc_stream_state *stream_state = crtc_state->stream;
|
||||
struct dm_crtc_state *crtc_state;
|
||||
struct dc_stream_state *stream_state;
|
||||
uint32_t crcs[3];
|
||||
|
||||
if (crtc == NULL)
|
||||
return;
|
||||
|
||||
crtc_state = to_dm_crtc_state(crtc->state);
|
||||
stream_state = crtc_state->stream;
|
||||
|
||||
/* Early return if CRC capture is not enabled. */
|
||||
if (!crtc_state->crc_enabled)
|
||||
return;
|
||||
|
|
|
@ -1812,6 +1812,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
|||
bool is_vga_mode = (stream->timing.h_addressable == 640)
|
||||
&& (stream->timing.v_addressable == 480);
|
||||
|
||||
if (stream->phy_pix_clk == 0)
|
||||
stream->phy_pix_clk = stream->timing.pix_clk_khz;
|
||||
if (stream->phy_pix_clk > 340000)
|
||||
is_over_340mhz = true;
|
||||
|
||||
|
|
|
@ -268,24 +268,30 @@ bool resource_construct(
|
|||
|
||||
return true;
|
||||
}
|
||||
static int find_matching_clock_source(
|
||||
const struct resource_pool *pool,
|
||||
struct clock_source *clock_source)
|
||||
{
|
||||
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pool->clk_src_count; i++) {
|
||||
if (pool->clock_sources[i] == clock_source)
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void resource_unreference_clock_source(
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct clock_source *clock_source)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pool->clk_src_count; i++) {
|
||||
if (pool->clock_sources[i] != clock_source)
|
||||
continue;
|
||||
int i = find_matching_clock_source(pool, clock_source);
|
||||
|
||||
if (i > -1)
|
||||
res_ctx->clock_source_ref_count[i]--;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (pool->dp_clock_source == clock_source)
|
||||
res_ctx->dp_clock_source_ref_count--;
|
||||
}
|
||||
|
@ -295,19 +301,31 @@ void resource_reference_clock_source(
|
|||
const struct resource_pool *pool,
|
||||
struct clock_source *clock_source)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < pool->clk_src_count; i++) {
|
||||
if (pool->clock_sources[i] != clock_source)
|
||||
continue;
|
||||
int i = find_matching_clock_source(pool, clock_source);
|
||||
|
||||
if (i > -1)
|
||||
res_ctx->clock_source_ref_count[i]++;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pool->dp_clock_source == clock_source)
|
||||
res_ctx->dp_clock_source_ref_count++;
|
||||
}
|
||||
|
||||
int resource_get_clock_source_reference(
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct clock_source *clock_source)
|
||||
{
|
||||
int i = find_matching_clock_source(pool, clock_source);
|
||||
|
||||
if (i > -1)
|
||||
return res_ctx->clock_source_ref_count[i];
|
||||
|
||||
if (pool->dp_clock_source == clock_source)
|
||||
return res_ctx->dp_clock_source_ref_count;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool resource_are_streams_timing_synchronizable(
|
||||
struct dc_stream_state *stream1,
|
||||
struct dc_stream_state *stream2)
|
||||
|
@ -372,11 +390,11 @@ static bool is_sharable_clk_src(
|
|||
return false;
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
|
||||
&& dc_is_dvi_signal(pipe->stream->signal))
|
||||
&& dc_is_dual_link_signal(pipe->stream->signal))
|
||||
return false;
|
||||
|
||||
if (dc_is_hdmi_signal(pipe->stream->signal)
|
||||
&& dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
|
||||
&& dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
|
||||
return false;
|
||||
|
||||
if (!resource_are_streams_timing_synchronizable(
|
||||
|
|
|
@ -930,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
|
|||
enum bp_result result;
|
||||
|
||||
/* Enable the PHY */
|
||||
|
||||
cntl.connector_obj_id = enc110->base.connector;
|
||||
cntl.action = TRANSMITTER_CONTROL_ENABLE;
|
||||
cntl.engine_id = enc->preferred_engine;
|
||||
cntl.transmitter = enc110->base.transmitter;
|
||||
|
@ -972,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
|
|||
* We need to set number of lanes manually.
|
||||
*/
|
||||
configure_encoder(enc110, link_settings);
|
||||
|
||||
cntl.connector_obj_id = enc110->base.connector;
|
||||
cntl.action = TRANSMITTER_CONTROL_ENABLE;
|
||||
cntl.engine_id = enc->preferred_engine;
|
||||
cntl.transmitter = enc110->base.transmitter;
|
||||
|
|
|
@ -1908,7 +1908,9 @@ static void dce110_reset_hw_ctx_wrap(
|
|||
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
|
||||
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
|
||||
|
||||
if (old_clk)
|
||||
if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
|
||||
dc->res_pool,
|
||||
old_clk))
|
||||
old_clk->funcs->cs_power_down(old_clk);
|
||||
|
||||
dc->hwss.disable_plane(dc, pipe_ctx_old);
|
||||
|
|
|
@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
|
|||
|
||||
CRTC_REG_SET(
|
||||
CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
|
||||
CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
|
||||
CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
|
||||
|
||||
if (enable_blanking)
|
||||
CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
|
||||
|
|
|
@ -103,6 +103,11 @@ void resource_reference_clock_source(
|
|||
const struct resource_pool *pool,
|
||||
struct clock_source *clock_source);
|
||||
|
||||
int resource_get_clock_source_reference(
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct clock_source *clock_source);
|
||||
|
||||
bool resource_are_streams_timing_synchronizable(
|
||||
struct dc_stream_state *stream1,
|
||||
struct dc_stream_state *stream2);
|
||||
|
|
|
@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
|||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
||||
if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
|
||||
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
||||
reg = vgpu->fence.regs[fence];
|
||||
|
|
|
@ -874,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
|
||||
gvt_vgpu_err("%s access to non-render register (%x)\n",
|
||||
cmd, offset);
|
||||
return 0;
|
||||
return -EBADRQC;
|
||||
}
|
||||
|
||||
if (is_shadowed_mmio(offset)) {
|
||||
|
|
|
@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
|||
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
|
||||
.vgpu_create = intel_gvt_create_vgpu,
|
||||
.vgpu_destroy = intel_gvt_destroy_vgpu,
|
||||
.vgpu_release = intel_gvt_release_vgpu,
|
||||
.vgpu_reset = intel_gvt_reset_vgpu,
|
||||
.vgpu_activate = intel_gvt_activate_vgpu,
|
||||
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
||||
|
@ -315,6 +316,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
|||
if (WARN_ON(!gvt))
|
||||
return;
|
||||
|
||||
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_cleanup_vgpu_type_groups(gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
intel_gvt_debugfs_clean(gvt);
|
||||
clean_service_thread(gvt);
|
||||
intel_gvt_clean_cmd_parser(gvt);
|
||||
|
@ -322,17 +328,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
|||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
intel_gvt_free_firmware(gvt);
|
||||
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_cleanup_vgpu_type_groups(gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||
|
||||
kfree(dev_priv->gvt);
|
||||
dev_priv->gvt = NULL;
|
||||
}
|
||||
|
|
|
@ -486,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
|
|||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_type *type);
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
@ -563,7 +564,8 @@ struct intel_gvt_ops {
|
|||
unsigned int);
|
||||
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
|
||||
struct intel_vgpu_type *);
|
||||
void (*vgpu_destroy)(struct intel_vgpu *);
|
||||
void (*vgpu_destroy)(struct intel_vgpu *vgpu);
|
||||
void (*vgpu_release)(struct intel_vgpu *vgpu);
|
||||
void (*vgpu_reset)(struct intel_vgpu *);
|
||||
void (*vgpu_activate)(struct intel_vgpu *);
|
||||
void (*vgpu_deactivate)(struct intel_vgpu *);
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
#include <linux/mdev.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
|
||||
|
@ -187,14 +189,14 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||
|
||||
/* Setup DMA mapping. */
|
||||
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
|
||||
ret = dma_mapping_error(dev, *dma_addr);
|
||||
if (ret) {
|
||||
if (dma_mapping_error(dev, *dma_addr)) {
|
||||
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
|
||||
page_to_pfn(page), ret);
|
||||
gvt_unpin_guest_page(vgpu, gfn, size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
|
@ -666,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
|||
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
|
||||
return;
|
||||
|
||||
intel_gvt_ops->vgpu_deactivate(vgpu);
|
||||
intel_gvt_ops->vgpu_release(vgpu);
|
||||
|
||||
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
|
||||
&vgpu->vdev.iommu_notifier);
|
||||
|
@ -1139,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
|
||||
struct vfio_region_info info;
|
||||
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
|
||||
int i, ret;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
|
||||
size_t size;
|
||||
int nr_areas = 1;
|
||||
|
@ -1224,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
if (info.index >= VFIO_PCI_NUM_REGIONS +
|
||||
vgpu->vdev.num_regions)
|
||||
return -EINVAL;
|
||||
info.index =
|
||||
array_index_nospec(info.index,
|
||||
VFIO_PCI_NUM_REGIONS +
|
||||
vgpu->vdev.num_regions);
|
||||
|
||||
i = info.index - VFIO_PCI_NUM_REGIONS;
|
||||
|
||||
|
@ -1250,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
&sparse->header, sizeof(*sparse) +
|
||||
(sparse->nr_areas *
|
||||
sizeof(*sparse->areas)));
|
||||
kfree(sparse);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(sparse);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
kfree(sparse);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -1270,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
sizeof(info), caps.buf,
|
||||
caps.size)) {
|
||||
kfree(caps.buf);
|
||||
kfree(sparse);
|
||||
return -EFAULT;
|
||||
}
|
||||
info.cap_offset = sizeof(info);
|
||||
|
@ -1278,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
kfree(caps.buf);
|
||||
}
|
||||
|
||||
kfree(sparse);
|
||||
return copy_to_user((void __user *)arg, &info, minsz) ?
|
||||
-EFAULT : 0;
|
||||
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
|
||||
|
@ -1615,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
|||
kvmgt_protect_table_init(info);
|
||||
gvt_cache_init(vgpu);
|
||||
|
||||
mutex_init(&vgpu->dmabuf_lock);
|
||||
init_completion(&vgpu->vblank_done);
|
||||
|
||||
info->track_node.track_write = kvmgt_page_track_write;
|
||||
|
|
|
@ -784,7 +784,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
|||
kunmap(page);
|
||||
}
|
||||
|
||||
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
@ -879,7 +880,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
* cleaned up during the resetting process later, so doing
|
||||
* the workload clean up here doesn't have any impact.
|
||||
**/
|
||||
clean_workloads(vgpu, ENGINE_MASK(ring_id));
|
||||
intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
|
||||
}
|
||||
|
||||
workload->complete(workload);
|
||||
|
@ -1081,7 +1082,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
|||
if (!s->active)
|
||||
return;
|
||||
|
||||
clean_workloads(vgpu, engine_mask);
|
||||
intel_vgpu_clean_workloads(vgpu, engine_mask);
|
||||
s->ops->reset(vgpu, engine_mask);
|
||||
}
|
||||
|
||||
|
|
|
@ -158,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
|||
|
||||
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
||||
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -222,7 +222,7 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
|||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to deactivate a virtual GPU.
|
||||
* All virtual GPU runtime information will be destroyed.
|
||||
* The virtual GPU will be stopped.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
||||
|
@ -238,11 +238,29 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
|||
}
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_release_vgpu - release a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to release a virtual GPU.
|
||||
* The virtual GPU will be stopped and all runtime information will be
|
||||
* destroyed.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
intel_gvt_deactivate_vgpu(vgpu);
|
||||
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
|
||||
intel_vgpu_dmabuf_cleanup(vgpu);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_destroy_vgpu - destroy a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
|
@ -361,6 +379,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
vgpu->gvt = gvt;
|
||||
vgpu->sched_ctl.weight = param->weight;
|
||||
mutex_init(&vgpu->vgpu_lock);
|
||||
mutex_init(&vgpu->dmabuf_lock);
|
||||
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
|
||||
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
|
||||
idr_init(&vgpu->object_idr);
|
||||
|
|
|
@ -9201,6 +9201,7 @@ enum skl_power_gate {
|
|||
#define TRANS_MSA_10_BPC (2 << 5)
|
||||
#define TRANS_MSA_12_BPC (3 << 5)
|
||||
#define TRANS_MSA_16_BPC (4 << 5)
|
||||
#define TRANS_MSA_CEA_RANGE (1 << 3)
|
||||
|
||||
/* LCPLL Control */
|
||||
#define LCPLL_CTL _MMIO(0x130040)
|
||||
|
|
|
@ -1685,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
|
|||
WARN_ON(transcoder_is_dsi(cpu_transcoder));
|
||||
|
||||
temp = TRANS_MSA_SYNC_CLK;
|
||||
|
||||
if (crtc_state->limited_color_range)
|
||||
temp |= TRANS_MSA_CEA_RANGE;
|
||||
|
||||
switch (crtc_state->pipe_bpp) {
|
||||
case 18:
|
||||
temp |= TRANS_MSA_6_BPC;
|
||||
|
|
|
@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
|
|||
mmio = RING_HWS_PGA(engine->mmio_base);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
u32 mask = ~0u;
|
||||
|
||||
/*
|
||||
* Keep the render interrupt unmasked as this papers over
|
||||
* lost interrupts following a reset.
|
||||
*/
|
||||
if (engine->id == RCS)
|
||||
mask &= ~BIT(0);
|
||||
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
|
||||
}
|
||||
|
||||
I915_WRITE(mmio, engine->status_page.ggtt_offset);
|
||||
POSTING_READ(mmio);
|
||||
|
|
|
@ -359,8 +359,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
|
|||
}
|
||||
|
||||
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
|
||||
static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
|
||||
bool restore)
|
||||
static unsigned int
|
||||
intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct intel_uncore_forcewake_domain *domain;
|
||||
|
@ -412,20 +412,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
|
|||
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
|
||||
|
||||
fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
|
||||
|
||||
if (restore) { /* If reset with a user forcewake, try to restore */
|
||||
if (fw)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
|
||||
dev_priv->uncore.fifo_count =
|
||||
fifo_free_entries(dev_priv);
|
||||
}
|
||||
|
||||
if (!restore)
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
return fw; /* track the lost user forcewake domains */
|
||||
}
|
||||
|
||||
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
|
||||
|
@ -534,7 +525,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
|
||||
bool restore_forcewake)
|
||||
unsigned int restore_forcewake)
|
||||
{
|
||||
/* clear out unclaimed reg detection bit */
|
||||
if (check_for_unclaimed_mmio(dev_priv))
|
||||
|
@ -549,7 +540,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
iosf_mbi_punit_acquire();
|
||||
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
if (restore_forcewake) {
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
restore_forcewake);
|
||||
|
||||
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
|
||||
dev_priv->uncore.fifo_count =
|
||||
fifo_free_entries(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
}
|
||||
iosf_mbi_punit_release();
|
||||
}
|
||||
|
||||
|
@ -558,13 +559,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv)
|
|||
iosf_mbi_punit_acquire();
|
||||
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
|
||||
&dev_priv->uncore.pmic_bus_access_nb);
|
||||
intel_uncore_forcewake_reset(dev_priv, false);
|
||||
dev_priv->uncore.fw_domains_saved =
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
iosf_mbi_punit_release();
|
||||
}
|
||||
|
||||
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
__intel_uncore_early_sanitize(dev_priv, true);
|
||||
unsigned int restore_forcewake;
|
||||
|
||||
restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
|
||||
__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
|
||||
|
||||
iosf_mbi_register_pmic_bus_access_notifier(
|
||||
&dev_priv->uncore.pmic_bus_access_nb);
|
||||
i915_check_and_clear_faults(dev_priv);
|
||||
|
@ -1545,7 +1551,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_uncore_edram_detect(dev_priv);
|
||||
intel_uncore_fw_domains_init(dev_priv);
|
||||
__intel_uncore_early_sanitize(dev_priv, false);
|
||||
__intel_uncore_early_sanitize(dev_priv, 0);
|
||||
|
||||
dev_priv->uncore.unclaimed_mmio_check = 1;
|
||||
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
|
||||
|
@ -1632,7 +1638,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
|
|||
iosf_mbi_punit_acquire();
|
||||
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
|
||||
&dev_priv->uncore.pmic_bus_access_nb);
|
||||
intel_uncore_forcewake_reset(dev_priv, false);
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
iosf_mbi_punit_release();
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ struct intel_uncore {
|
|||
|
||||
enum forcewake_domains fw_domains;
|
||||
enum forcewake_domains fw_domains_active;
|
||||
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
|
||||
|
||||
u32 fw_set;
|
||||
u32 fw_clear;
|
||||
|
|
|
@ -499,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
|
|||
return err == expected;
|
||||
}
|
||||
|
||||
static void disable_retire_worker(struct drm_i915_private *i915)
|
||||
{
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
if (!i915->gt.active_requests++) {
|
||||
intel_runtime_pm_get(i915);
|
||||
i915_gem_unpark(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
}
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
cancel_delayed_work_sync(&i915->gt.retire_work);
|
||||
cancel_delayed_work_sync(&i915->gt.idle_work);
|
||||
}
|
||||
|
||||
static int igt_mmap_offset_exhaustion(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
|
@ -509,12 +522,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
|
|||
int loop, err;
|
||||
|
||||
/* Disable background reaper */
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
if (!i915->gt.active_requests++)
|
||||
i915_gem_unpark(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
cancel_delayed_work_sync(&i915->gt.retire_work);
|
||||
cancel_delayed_work_sync(&i915->gt.idle_work);
|
||||
disable_retire_worker(i915);
|
||||
GEM_BUG_ON(!i915->gt.awake);
|
||||
|
||||
/* Trim the device mmap space to only a page */
|
||||
|
|
|
@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
|
|||
i915_reg_t reg = { offset };
|
||||
|
||||
iosf_mbi_punit_acquire();
|
||||
intel_uncore_forcewake_reset(dev_priv, false);
|
||||
intel_uncore_forcewake_reset(dev_priv);
|
||||
iosf_mbi_punit_release();
|
||||
|
||||
check_for_unclaimed_mmio(dev_priv);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
|
@ -20,6 +21,7 @@
|
|||
#include <linux/pm_opp.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/slab.h>
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
#include "a5xx_gpu.h"
|
||||
|
@ -92,12 +94,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
|
|||
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
|
||||
mem_region, mem_phys, mem_size, NULL);
|
||||
} else {
|
||||
char newname[strlen("qcom/") + strlen(fwname) + 1];
|
||||
char *newname;
|
||||
|
||||
sprintf(newname, "qcom/%s", fwname);
|
||||
newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
|
||||
|
||||
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
|
||||
mem_region, mem_phys, mem_size, NULL);
|
||||
kfree(newname);
|
||||
}
|
||||
if (ret)
|
||||
goto out;
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
*/
|
||||
|
||||
#include <linux/ascii85.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/slab.h>
|
||||
#include "adreno_gpu.h"
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
|
@ -71,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
|||
{
|
||||
struct drm_device *drm = adreno_gpu->base.dev;
|
||||
const struct firmware *fw = NULL;
|
||||
char newname[strlen("qcom/") + strlen(fwname) + 1];
|
||||
char *newname;
|
||||
int ret;
|
||||
|
||||
sprintf(newname, "qcom/%s", fwname);
|
||||
newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
|
||||
if (!newname)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/*
|
||||
* Try first to load from qcom/$fwfile using a direct load (to avoid
|
||||
|
@ -88,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
|||
dev_info(drm->dev, "loaded %s from new location\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_NEW;
|
||||
return fw;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
newname, ret);
|
||||
return ERR_PTR(ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
|||
dev_info(drm->dev, "loaded %s from legacy location\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
|
||||
return fw;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
fwname, ret);
|
||||
return ERR_PTR(ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,16 +133,20 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
|
|||
dev_info(drm->dev, "loaded %s with helper\n",
|
||||
newname);
|
||||
adreno_gpu->fwloc = FW_LOCATION_HELPER;
|
||||
return fw;
|
||||
goto out;
|
||||
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
|
||||
dev_err(drm->dev, "failed to load %s: %d\n",
|
||||
newname, ret);
|
||||
return ERR_PTR(ret);
|
||||
fw = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
dev_err(drm->dev, "failed to load %s\n", fwname);
|
||||
return ERR_PTR(-ENOENT);
|
||||
fw = ERR_PTR(-ENOENT);
|
||||
out:
|
||||
kfree(newname);
|
||||
return fw;
|
||||
}
|
||||
|
||||
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
|
||||
|
|
|
@ -421,7 +421,7 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
|
|||
|
||||
ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
|
||||
DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
|
||||
DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
|
||||
irq->irq_idx, ret);
|
||||
}
|
||||
|
@ -2444,6 +2444,8 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
|||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
if (!phys)
|
||||
continue;
|
||||
|
||||
switch (event) {
|
||||
case MSM_ENC_COMMIT_DONE:
|
||||
|
@ -2461,7 +2463,7 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
|||
return -EINVAL;
|
||||
};
|
||||
|
||||
if (phys && fn_wait) {
|
||||
if (fn_wait) {
|
||||
DPU_ATRACE_BEGIN("wait_for_completion_event");
|
||||
ret = fn_wait(phys);
|
||||
DPU_ATRACE_END("wait_for_completion_event");
|
||||
|
|
|
@ -121,7 +121,7 @@ void dpu_power_resource_deinit(struct platform_device *pdev,
|
|||
mutex_lock(&phandle->phandle_lock);
|
||||
list_for_each_entry_safe(curr_client, next_client,
|
||||
&phandle->power_client_clist, list) {
|
||||
pr_err("cliend:%s-%d still registered with refcount:%d\n",
|
||||
pr_err("client:%s-%d still registered with refcount:%d\n",
|
||||
curr_client->name, curr_client->id,
|
||||
curr_client->refcount);
|
||||
curr_client->active = false;
|
||||
|
|
|
@ -263,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
|
|||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
int msm_gem_fault(struct vm_fault *vmf);
|
||||
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
|
|
|
@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
return msm_gem_mmap_obj(vma->vm_private_data, vma);
|
||||
}
|
||||
|
||||
int msm_gem_fault(struct vm_fault *vmf)
|
||||
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
|
@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
|
|||
struct page **pages;
|
||||
unsigned long pfn;
|
||||
pgoff_t pgoff;
|
||||
int ret;
|
||||
int err;
|
||||
vm_fault_t ret;
|
||||
|
||||
/*
|
||||
* vm_ops.open/drm_gem_mmap_obj and close get and put
|
||||
* a reference on obj. So, we dont need to hold one here.
|
||||
*/
|
||||
ret = mutex_lock_interruptible(&msm_obj->lock);
|
||||
if (ret)
|
||||
err = mutex_lock_interruptible(&msm_obj->lock);
|
||||
if (err) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
|
|||
/* make sure we have pages attached now */
|
||||
pages = get_pages(obj);
|
||||
if (IS_ERR(pages)) {
|
||||
ret = PTR_ERR(pages);
|
||||
ret = vmf_error(PTR_ERR(pages));
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
|
|||
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
|
||||
pfn, pfn << PAGE_SHIFT);
|
||||
|
||||
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
|
||||
ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
out_unlock:
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
out:
|
||||
switch (ret) {
|
||||
case -EAGAIN:
|
||||
case 0:
|
||||
case -ERESTARTSYS:
|
||||
case -EINTR:
|
||||
case -EBUSY:
|
||||
/*
|
||||
* EBUSY is ok: this just means that another thread
|
||||
* already did the job.
|
||||
*/
|
||||
return VM_FAULT_NOPAGE;
|
||||
case -ENOMEM:
|
||||
return VM_FAULT_OOM;
|
||||
default:
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** get mmap offset */
|
||||
|
|
Loading…
Reference in New Issue