Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Fixes all over the place: - amdkfd: two static checker fixes - mst: a bunch of static checker and spec/hw interaction fixes - amdgpu: fix Iceland hw properly, and some fiji bugs, along with some write-combining fixes. - exynos: some regression fixes - adv7511: fix some EDID reading issues" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (38 commits) drm/dp/mst: deallocate payload on port destruction drm/dp/mst: Reverse order of MST enable and clearing VC payload table. drm/dp/mst: move GUID storage from mgr, port to only mst branch drm/dp/mst: change MST detection scheme drm/dp/mst: Calculate MST PBN with 31.32 fixed point drm: Add drm_fixp_from_fraction and drm_fixp2int_ceil drm/mst: Add range check for max_payloads during init drm/mst: Don't ignore the MST PBN self-test result drm: fix missing reference counting decrease drm/amdgpu: disable uvd and vce clockgating on Fiji drm/amdgpu: remove exp hardware support from iceland drm/amdgpu: load MEC ucode manually on iceland drm/amdgpu: don't load MEC2 on topaz drm/amdgpu: drop topaz support from gmc8 module drm/amdgpu: pull topaz gmc bits into gmc_v7 drm/amdgpu: The VI specific EXE bit should only apply to GMC v8.0 above drm/amdgpu: iceland use CI based MC IP drm/amdgpu: move gmc7 support out of CIK dependency drm/amdgpu/gfx7: enable cp inst/reg error interrupts drm/amdgpu/gfx8: enable cp inst/reg error interrupts ...
This commit is contained in:
commit
9b108828ed
|
@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
|
||||
amdgpu_amdkfd_gfx_v7.o
|
||||
|
||||
|
@ -34,6 +34,7 @@ amdgpu-y += \
|
|||
|
||||
# add GMC block
|
||||
amdgpu-y += \
|
||||
gmc_v7_0.o \
|
||||
gmc_v8_0.o
|
||||
|
||||
# add IH block
|
||||
|
|
|
@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
|||
.get_fw_version = get_fw_version
|
||||
};
|
||||
|
||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
|
||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
|
||||
{
|
||||
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
|||
.get_fw_version = get_fw_version
|
||||
};
|
||||
|
||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
|
||||
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
|
||||
{
|
||||
return (struct kfd2kgd_calls *)&kfd2kgd;
|
||||
}
|
||||
|
|
|
@ -256,11 +256,11 @@ static struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
#endif
|
||||
/* topaz */
|
||||
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
/* tonga */
|
||||
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
|
||||
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_cache.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
|
@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
AMDGPU_GEM_DOMAIN_OA);
|
||||
|
||||
bo->flags = flags;
|
||||
|
||||
/* For architectures that don't support WC memory,
|
||||
* mask out the WC flag from the BO
|
||||
*/
|
||||
if (!drm_arch_can_wc_memory())
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
|
||||
amdgpu_fill_placement_to_bo(bo, placement);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
|
|
|
@ -808,7 +808,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|||
flags |= AMDGPU_PTE_SNOOPED;
|
||||
}
|
||||
|
||||
if (adev->asic_type >= CHIP_TOPAZ)
|
||||
if (adev->asic_type >= CHIP_TONGA)
|
||||
flags |= AMDGPU_PTE_EXECUTABLE;
|
||||
|
||||
flags |= AMDGPU_PTE_READABLE;
|
||||
|
|
|
@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v7_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v7_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
|
@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
gfx_v7_0_cp_enable(adev, false);
|
||||
gfx_v7_0_rlc_stop(adev);
|
||||
gfx_v7_0_fini_pg(adev);
|
||||
|
@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
|
|||
|
||||
const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
||||
.early_init = gfx_v7_0_early_init,
|
||||
.late_init = NULL,
|
||||
.late_init = gfx_v7_0_late_init,
|
||||
.sw_init = gfx_v7_0_sw_init,
|
||||
.sw_fini = gfx_v7_0_sw_fini,
|
||||
.hw_init = gfx_v7_0_hw_init,
|
||||
|
|
|
@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
|
|||
MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
|
||||
|
@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
|
||||
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
||||
|
||||
if (adev->asic_type != CHIP_STONEY) {
|
||||
if ((adev->asic_type != CHIP_STONEY) &&
|
||||
(adev->asic_type != CHIP_TOPAZ)) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
||||
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
||||
if (!err) {
|
||||
|
@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
return -EINVAL;
|
||||
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_CP_MEC1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
if (adev->asic_type == CHIP_TOPAZ) {
|
||||
r = gfx_v8_0_cp_compute_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_CP_MEC1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
gfx_v8_0_cp_enable(adev, false);
|
||||
gfx_v8_0_rlc_stop(adev);
|
||||
gfx_v8_0_cp_compute_fini(adev);
|
||||
|
@ -4329,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* requires IBs so do in late init after IB pool is initialized */
|
||||
r = gfx_v8_0_do_edc_gpr_workarounds(adev);
|
||||
if (r)
|
||||
|
|
|
@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
|
|||
|
||||
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
|
||||
|
||||
static const u32 golden_settings_iceland_a11[] =
|
||||
{
|
||||
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
|
||||
};
|
||||
|
||||
static const u32 iceland_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
iceland_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
golden_settings_iceland_a11,
|
||||
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc8_mc_wait_for_idle - wait for MC idle callback.
|
||||
* gmc7_mc_wait_for_idle - wait for MC idle callback.
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
|
@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_HAWAII:
|
||||
chip_name = "hawaii";
|
||||
break;
|
||||
case CHIP_TOPAZ:
|
||||
chip_name = "topaz";
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_KABINI:
|
||||
return 0;
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
if (adev->asic_type == CHIP_TOPAZ)
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
gmc_v7_0_init_golden_registers(adev);
|
||||
|
||||
gmc_v7_0_mc_program(adev);
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
|
|
|
@ -42,9 +42,7 @@
|
|||
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
|
||||
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
|
||||
|
||||
static const u32 golden_settings_tonga_a11[] =
|
||||
{
|
||||
|
@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
|
|||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
static const u32 golden_settings_iceland_a11[] =
|
||||
{
|
||||
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
|
||||
};
|
||||
|
||||
static const u32 iceland_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
static const u32 cz_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
|
@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
|
|||
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
iceland_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
golden_settings_iceland_a11,
|
||||
(const u32)ARRAY_SIZE(golden_settings_iceland_a11));
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
fiji_mgcg_cgcg_init,
|
||||
|
@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
DRM_DEBUG("\n");
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
chip_name = "topaz";
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
chip_name = "tonga";
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
chip_name = "fiji";
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
return 0;
|
||||
|
@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle)
|
|||
|
||||
gmc_v8_0_mc_program(adev);
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
if (adev->asic_type == CHIP_TONGA) {
|
||||
r = gmc_v8_0_mc_load_microcode(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to load MC firmware!\n");
|
||||
|
|
|
@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
|
|||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
return UCODE_ID_CP_ME_MASK;
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
|
||||
return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
|
||||
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||
return UCODE_ID_CP_MEC_MASK;
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
|
@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
|
||||
&toc->entry[toc->num_entries++])) {
|
||||
DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
|
||||
&toc->entry[toc->num_entries++])) {
|
||||
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
|
||||
|
@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
|
|||
UCODE_ID_CP_ME_MASK |
|
||||
UCODE_ID_CP_PFP_MASK |
|
||||
UCODE_ID_CP_MEC_MASK |
|
||||
UCODE_ID_CP_MEC_JT1_MASK |
|
||||
UCODE_ID_CP_MEC_JT2_MASK;
|
||||
UCODE_ID_CP_MEC_JT1_MASK;
|
||||
|
||||
|
||||
if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
|
||||
DRM_ERROR("Fail to request SMU load ucode\n");
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include "vi.h"
|
||||
#include "vi_dpm.h"
|
||||
#include "gmc_v8_0.h"
|
||||
#include "gmc_v7_0.h"
|
||||
#include "gfx_v8_0.h"
|
||||
#include "sdma_v2_4.h"
|
||||
#include "sdma_v3_0.h"
|
||||
|
@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
|
|||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GMC,
|
||||
.major = 8,
|
||||
.minor = 0,
|
||||
.major = 7,
|
||||
.minor = 4,
|
||||
.rev = 0,
|
||||
.funcs = &gmc_v8_0_ip_funcs,
|
||||
.funcs = &gmc_v7_0_ip_funcs,
|
||||
},
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_IH,
|
||||
|
@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle)
|
|||
break;
|
||||
case CHIP_FIJI:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x3c;
|
||||
break;
|
||||
|
|
|
@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work)
|
|||
|
||||
kfree(p);
|
||||
|
||||
kfree((void *)work);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void kfd_process_destroy_delayed(struct rcu_head *rcu)
|
||||
|
|
|
@ -803,12 +803,33 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
|
|||
return mstb;
|
||||
}
|
||||
|
||||
static void drm_dp_free_mst_port(struct kref *kref);
|
||||
|
||||
static void drm_dp_free_mst_branch_device(struct kref *kref)
|
||||
{
|
||||
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
|
||||
if (mstb->port_parent) {
|
||||
if (list_empty(&mstb->port_parent->next))
|
||||
kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
|
||||
}
|
||||
kfree(mstb);
|
||||
}
|
||||
|
||||
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
|
||||
{
|
||||
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
|
||||
struct drm_dp_mst_port *port, *tmp;
|
||||
bool wake_tx = false;
|
||||
|
||||
/*
|
||||
* init kref again to be used by ports to remove mst branch when it is
|
||||
* not needed anymore
|
||||
*/
|
||||
kref_init(kref);
|
||||
|
||||
if (mstb->port_parent && list_empty(&mstb->port_parent->next))
|
||||
kref_get(&mstb->port_parent->kref);
|
||||
|
||||
/*
|
||||
* destroy all ports - don't need lock
|
||||
* as there are no more references to the mst branch
|
||||
|
@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
|
|||
|
||||
if (wake_tx)
|
||||
wake_up(&mstb->mgr->tx_waitq);
|
||||
kfree(mstb);
|
||||
|
||||
kref_put(kref, drm_dp_free_mst_branch_device);
|
||||
}
|
||||
|
||||
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
|
||||
|
@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref)
|
|||
* from an EDID retrieval */
|
||||
|
||||
mutex_lock(&mgr->destroy_connector_lock);
|
||||
kref_get(&port->parent->kref);
|
||||
list_add(&port->next, &mgr->destroy_connector_list);
|
||||
mutex_unlock(&mgr->destroy_connector_lock);
|
||||
schedule_work(&mgr->destroy_connector_work);
|
||||
|
@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
|
|||
return send_link;
|
||||
}
|
||||
|
||||
static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
|
||||
struct drm_dp_mst_port *port)
|
||||
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
|
||||
{
|
||||
int ret;
|
||||
if (port->dpcd_rev >= 0x12) {
|
||||
port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
|
||||
if (!port->guid_valid) {
|
||||
ret = drm_dp_send_dpcd_write(mstb->mgr,
|
||||
port,
|
||||
DP_GUID,
|
||||
16, port->guid);
|
||||
port->guid_valid = true;
|
||||
|
||||
memcpy(mstb->guid, guid, 16);
|
||||
|
||||
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
|
||||
if (mstb->port_parent) {
|
||||
ret = drm_dp_send_dpcd_write(
|
||||
mstb->mgr,
|
||||
mstb->port_parent,
|
||||
DP_GUID,
|
||||
16,
|
||||
mstb->guid);
|
||||
} else {
|
||||
|
||||
ret = drm_dp_dpcd_write(
|
||||
mstb->mgr->aux,
|
||||
DP_GUID,
|
||||
mstb->guid,
|
||||
16);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
|
|||
port->dpcd_rev = port_msg->dpcd_revision;
|
||||
port->num_sdp_streams = port_msg->num_sdp_streams;
|
||||
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
|
||||
memcpy(port->guid, port_msg->peer_guid, 16);
|
||||
|
||||
/* manage mstb port lists with mgr lock - take a reference
|
||||
for this list */
|
||||
|
@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
|
|||
|
||||
if (old_ddps != port->ddps) {
|
||||
if (port->ddps) {
|
||||
drm_dp_check_port_guid(mstb, port);
|
||||
if (!port->input)
|
||||
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
|
||||
} else {
|
||||
port->guid_valid = false;
|
||||
port->available_pbn = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
|
|||
drm_dp_put_port(port);
|
||||
goto out;
|
||||
}
|
||||
if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
|
||||
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
|
||||
drm_mode_connector_set_tile_property(port->connector);
|
||||
}
|
||||
|
||||
drm_mode_connector_set_tile_property(port->connector);
|
||||
|
||||
(*mstb->mgr->cbs->register_connector)(port->connector);
|
||||
}
|
||||
|
||||
out:
|
||||
/* put reference to this port */
|
||||
drm_dp_put_port(port);
|
||||
|
@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
|
|||
port->ddps = conn_stat->displayport_device_plug_status;
|
||||
|
||||
if (old_ddps != port->ddps) {
|
||||
dowork = true;
|
||||
if (port->ddps) {
|
||||
drm_dp_check_port_guid(mstb, port);
|
||||
dowork = true;
|
||||
} else {
|
||||
port->guid_valid = false;
|
||||
port->available_pbn = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
|
|||
struct drm_dp_mst_branch *found_mstb;
|
||||
struct drm_dp_mst_port *port;
|
||||
|
||||
if (memcmp(mstb->guid, guid, 16) == 0)
|
||||
return mstb;
|
||||
|
||||
|
||||
list_for_each_entry(port, &mstb->ports, next) {
|
||||
if (!port->mstb)
|
||||
continue;
|
||||
|
||||
if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
|
||||
return port->mstb;
|
||||
|
||||
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
|
||||
|
||||
if (found_mstb)
|
||||
|
@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
|
|||
/* find the port by iterating down */
|
||||
mutex_lock(&mgr->lock);
|
||||
|
||||
if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
|
||||
mstb = mgr->mst_primary;
|
||||
else
|
||||
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
|
||||
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
|
||||
|
||||
if (mstb)
|
||||
kref_get(&mstb->kref);
|
||||
|
@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
|
|||
if (port->input)
|
||||
continue;
|
||||
|
||||
if (!port->ddps)
|
||||
if (!port->ddps) {
|
||||
if (port->cached_edid) {
|
||||
kfree(port->cached_edid);
|
||||
port->cached_edid = NULL;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!port->available_pbn)
|
||||
drm_dp_send_enum_path_resources(mgr, mstb, port);
|
||||
|
@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
|
|||
drm_dp_check_and_send_link_address(mgr, mstb_child);
|
||||
drm_dp_put_mst_branch_device(mstb_child);
|
||||
}
|
||||
} else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
|
||||
port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
|
||||
if (!port->cached_edid) {
|
||||
port->cached_edid =
|
||||
drm_get_edid(port->connector, &port->aux.ddc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
|
|||
drm_dp_check_and_send_link_address(mgr, mstb);
|
||||
drm_dp_put_mst_branch_device(mstb);
|
||||
}
|
||||
|
||||
(*mgr->cbs->hotplug)(mgr);
|
||||
}
|
||||
|
||||
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
|
|||
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
|
||||
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
|
||||
}
|
||||
|
||||
drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
|
||||
|
||||
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
|
||||
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
|
||||
}
|
||||
(*mgr->cbs->hotplug)(mgr);
|
||||
}
|
||||
} else {
|
||||
mstb->link_address_sent = false;
|
||||
|
@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
|
||||
{
|
||||
if (!mstb->port_parent)
|
||||
return NULL;
|
||||
|
||||
if (mstb->port_parent->mstb != mstb)
|
||||
return mstb->port_parent;
|
||||
|
||||
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
|
||||
}
|
||||
|
||||
static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_branch *mstb,
|
||||
int *port_num)
|
||||
{
|
||||
struct drm_dp_mst_branch *rmstb = NULL;
|
||||
struct drm_dp_mst_port *found_port;
|
||||
mutex_lock(&mgr->lock);
|
||||
if (mgr->mst_primary) {
|
||||
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
|
||||
|
||||
if (found_port) {
|
||||
rmstb = found_port->parent;
|
||||
kref_get(&rmstb->kref);
|
||||
*port_num = found_port->port_num;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
return rmstb;
|
||||
}
|
||||
|
||||
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_port *port,
|
||||
int id,
|
||||
|
@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
|||
{
|
||||
struct drm_dp_sideband_msg_tx *txmsg;
|
||||
struct drm_dp_mst_branch *mstb;
|
||||
int len, ret;
|
||||
int len, ret, port_num;
|
||||
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
|
||||
int i;
|
||||
|
||||
port_num = port->port_num;
|
||||
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
||||
if (!mstb)
|
||||
return -EINVAL;
|
||||
if (!mstb) {
|
||||
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
||||
|
||||
if (!mstb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||
if (!txmsg) {
|
||||
|
@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
|||
sinks[i] = i;
|
||||
|
||||
txmsg->dst = mstb;
|
||||
len = build_allocate_payload(txmsg, port->port_num,
|
||||
len = build_allocate_payload(txmsg, port_num,
|
||||
id,
|
||||
pbn, port->num_sdp_streams, sinks);
|
||||
|
||||
|
@ -1983,6 +2057,12 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
|
|||
mgr->mst_primary = mstb;
|
||||
kref_get(&mgr->mst_primary->kref);
|
||||
|
||||
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
|
||||
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
|
||||
if (ret < 0) {
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
{
|
||||
struct drm_dp_payload reset_pay;
|
||||
reset_pay.start_slot = 0;
|
||||
|
@ -1990,26 +2070,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
|
|||
drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
|
||||
}
|
||||
|
||||
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
|
||||
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
|
||||
if (ret < 0) {
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
||||
/* sort out guid */
|
||||
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
|
||||
if (ret != 16) {
|
||||
DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
|
||||
if (!mgr->guid_valid) {
|
||||
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
|
||||
mgr->guid_valid = true;
|
||||
}
|
||||
|
||||
queue_work(system_long_wq, &mgr->work);
|
||||
|
||||
ret = 0;
|
||||
|
@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
|||
}
|
||||
|
||||
drm_dp_update_port(mstb, &msg.u.conn_stat);
|
||||
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
|
||||
(*mgr->cbs->hotplug)(mgr);
|
||||
|
||||
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
|
||||
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
|
||||
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
|
||||
if (!mstb)
|
||||
|
@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
|
|||
|
||||
case DP_PEER_DEVICE_SST_SINK:
|
||||
status = connector_status_connected;
|
||||
/* for logical ports - cache the EDID */
|
||||
if (port->port_num >= 8 && !port->cached_edid) {
|
||||
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
|
||||
}
|
||||
break;
|
||||
case DP_PEER_DEVICE_DP_LEGACY_CONV:
|
||||
if (port->ldps)
|
||||
|
@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
|
|||
|
||||
if (port->cached_edid)
|
||||
edid = drm_edid_duplicate(port->cached_edid);
|
||||
else {
|
||||
edid = drm_get_edid(connector, &port->aux.ddc);
|
||||
drm_mode_connector_set_tile_property(connector);
|
||||
}
|
||||
|
||||
port->has_audio = drm_detect_monitor_audio(edid);
|
||||
drm_dp_put_port(port);
|
||||
return edid;
|
||||
|
@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
|
|||
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
|
||||
if (pbn == port->vcpi.pbn) {
|
||||
*slots = port->vcpi.num_slots;
|
||||
drm_dp_put_port(port);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
|
|||
*/
|
||||
int drm_dp_calc_pbn_mode(int clock, int bpp)
|
||||
{
|
||||
fixed20_12 pix_bw;
|
||||
fixed20_12 fbpp;
|
||||
fixed20_12 result;
|
||||
fixed20_12 margin, tmp;
|
||||
u32 res;
|
||||
u64 kbps;
|
||||
s64 peak_kbps;
|
||||
u32 numerator;
|
||||
u32 denominator;
|
||||
|
||||
pix_bw.full = dfixed_const(clock);
|
||||
fbpp.full = dfixed_const(bpp);
|
||||
tmp.full = dfixed_const(8);
|
||||
fbpp.full = dfixed_div(fbpp, tmp);
|
||||
kbps = clock * bpp;
|
||||
|
||||
result.full = dfixed_mul(pix_bw, fbpp);
|
||||
margin.full = dfixed_const(54);
|
||||
tmp.full = dfixed_const(64);
|
||||
margin.full = dfixed_div(margin, tmp);
|
||||
result.full = dfixed_div(result, margin);
|
||||
/*
|
||||
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
|
||||
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
|
||||
* common multiplier to render an integer PBN for all link rate/lane
|
||||
* counts combinations
|
||||
* calculate
|
||||
* peak_kbps *= (1006/1000)
|
||||
* peak_kbps *= (64/54)
|
||||
* peak_kbps *= 8 convert to bytes
|
||||
*/
|
||||
|
||||
margin.full = dfixed_const(1006);
|
||||
tmp.full = dfixed_const(1000);
|
||||
margin.full = dfixed_div(margin, tmp);
|
||||
result.full = dfixed_mul(result, margin);
|
||||
numerator = 64 * 1006;
|
||||
denominator = 54 * 8 * 1000 * 1000;
|
||||
|
||||
result.full = dfixed_div(result, tmp);
|
||||
result.full = dfixed_ceil(result);
|
||||
res = dfixed_trunc(result);
|
||||
return res;
|
||||
kbps *= numerator;
|
||||
peak_kbps = drm_fixp_from_fraction(kbps, denominator);
|
||||
|
||||
return drm_fixp2int_ceil(peak_kbps);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
|
||||
|
||||
|
@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void)
|
|||
{
|
||||
int ret;
|
||||
ret = drm_dp_calc_pbn_mode(154000, 30);
|
||||
if (ret != 689)
|
||||
if (ret != 689) {
|
||||
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
|
||||
154000, 30, 689, ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = drm_dp_calc_pbn_mode(234000, 30);
|
||||
if (ret != 1047)
|
||||
if (ret != 1047) {
|
||||
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
|
||||
234000, 30, 1047, ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = drm_dp_calc_pbn_mode(297000, 24);
|
||||
if (ret != 1063) {
|
||||
DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
|
||||
297000, 24, 1063, ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work)
|
|||
mutex_unlock(&mgr->qlock);
|
||||
}
|
||||
|
||||
static void drm_dp_free_mst_port(struct kref *kref)
|
||||
{
|
||||
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
|
||||
kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
|
||||
kfree(port);
|
||||
}
|
||||
|
||||
static void drm_dp_destroy_connector_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
|
||||
|
@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
|
|||
list_del(&port->next);
|
||||
mutex_unlock(&mgr->destroy_connector_lock);
|
||||
|
||||
kref_init(&port->kref);
|
||||
INIT_LIST_HEAD(&port->next);
|
||||
|
||||
mgr->cbs->destroy_connector(mgr, port->connector);
|
||||
|
||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||
|
||||
if (!port->input && port->vcpi.vcpi > 0)
|
||||
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
|
||||
kfree(port);
|
||||
if (!port->input && port->vcpi.vcpi > 0) {
|
||||
if (mgr->mst_state) {
|
||||
drm_dp_mst_reset_vcpi_slots(mgr, port);
|
||||
drm_dp_update_payload_part1(mgr);
|
||||
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
|
||||
}
|
||||
}
|
||||
|
||||
kref_put(&port->kref, drm_dp_free_mst_port);
|
||||
send_hotplug = true;
|
||||
}
|
||||
if (send_hotplug)
|
||||
|
@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
|
|||
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
|
||||
mgr->max_payloads = max_payloads;
|
||||
mgr->conn_base_id = conn_base_id;
|
||||
if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
|
||||
max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
|
||||
return -EINVAL;
|
||||
mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
|
||||
if (!mgr->payloads)
|
||||
return -ENOMEM;
|
||||
|
@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
|
|||
if (!mgr->proposed_vcpis)
|
||||
return -ENOMEM;
|
||||
set_bit(0, &mgr->payload_mask);
|
||||
test_calc_pbn_mode();
|
||||
if (test_calc_pbn_mode() < 0)
|
||||
DRM_ERROR("MST PBN self-test failed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
|
||||
|
|
|
@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = {
|
|||
static int exynos_dp_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
|
||||
struct device_node *np = NULL, *endpoint = NULL;
|
||||
struct exynos_dp_device *dp;
|
||||
int ret;
|
||||
|
||||
|
@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, dp);
|
||||
|
||||
/* This is for the backward compatibility. */
|
||||
panel_node = of_parse_phandle(dev->of_node, "panel", 0);
|
||||
if (panel_node) {
|
||||
dp->panel = of_drm_find_panel(panel_node);
|
||||
of_node_put(panel_node);
|
||||
np = of_parse_phandle(dev->of_node, "panel", 0);
|
||||
if (np) {
|
||||
dp->panel = of_drm_find_panel(np);
|
||||
of_node_put(np);
|
||||
if (!dp->panel)
|
||||
return -EPROBE_DEFER;
|
||||
} else {
|
||||
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
|
||||
if (endpoint) {
|
||||
panel_node = of_graph_get_remote_port_parent(endpoint);
|
||||
if (panel_node) {
|
||||
dp->panel = of_drm_find_panel(panel_node);
|
||||
of_node_put(panel_node);
|
||||
if (!dp->panel)
|
||||
return -EPROBE_DEFER;
|
||||
} else {
|
||||
DRM_ERROR("no port node for panel device.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (endpoint)
|
||||
goto out;
|
||||
}
|
||||
|
||||
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
|
||||
if (endpoint) {
|
||||
bridge_node = of_graph_get_remote_port_parent(endpoint);
|
||||
if (bridge_node) {
|
||||
dp->ptn_bridge = of_drm_find_bridge(bridge_node);
|
||||
of_node_put(bridge_node);
|
||||
if (!dp->ptn_bridge)
|
||||
return -EPROBE_DEFER;
|
||||
} else
|
||||
return -EPROBE_DEFER;
|
||||
np = of_graph_get_remote_port_parent(endpoint);
|
||||
if (np) {
|
||||
/* The remote port can be either a panel or a bridge */
|
||||
dp->panel = of_drm_find_panel(np);
|
||||
if (!dp->panel) {
|
||||
dp->ptn_bridge = of_drm_find_bridge(np);
|
||||
if (!dp->ptn_bridge) {
|
||||
of_node_put(np);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
}
|
||||
of_node_put(np);
|
||||
} else {
|
||||
DRM_ERROR("no remote endpoint device node found.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("no port endpoint subnode found.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int exynos_dsi_suspend(struct device *dev)
|
||||
static int __maybe_unused exynos_dsi_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_encoder *encoder = dev_get_drvdata(dev);
|
||||
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
|
||||
|
@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_dsi_resume(struct device *dev)
|
||||
static int __maybe_unused exynos_dsi_resume(struct device *dev)
|
||||
{
|
||||
struct drm_encoder *encoder = dev_get_drvdata(dev);
|
||||
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
|
||||
|
@ -1972,7 +1971,6 @@ err_clk:
|
|||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops exynos_dsi_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
|
||||
|
|
|
@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int exynos_mixer_suspend(struct device *dev)
|
||||
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
|
||||
{
|
||||
struct mixer_context *ctx = dev_get_drvdata(dev);
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
|
@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_mixer_resume(struct device *dev)
|
||||
static int __maybe_unused exynos_mixer_resume(struct device *dev)
|
||||
{
|
||||
struct mixer_context *ctx = dev_get_drvdata(dev);
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
|
@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops exynos_mixer_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
|
||||
|
|
|
@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
|
|||
case ADV7511_REG_BKSV(3):
|
||||
case ADV7511_REG_BKSV(4):
|
||||
case ADV7511_REG_DDC_STATUS:
|
||||
case ADV7511_REG_EDID_READ_CTRL:
|
||||
case ADV7511_REG_BSTATUS(0):
|
||||
case ADV7511_REG_BSTATUS(1):
|
||||
case ADV7511_REG_CHIP_ID_HIGH:
|
||||
|
@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511)
|
|||
{
|
||||
adv7511->current_edid_segment = -1;
|
||||
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
|
||||
ADV7511_POWER_POWER_DOWN, 0);
|
||||
if (adv7511->i2c_main->irq) {
|
||||
/*
|
||||
* Documentation says the INT_ENABLE registers are reset in
|
||||
* POWER_DOWN mode. My 7511w preserved the bits, however.
|
||||
* Still, let's be safe and stick to the documentation.
|
||||
*/
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per spec it is allowed to pulse the HDP signal to indicate that the
|
||||
* Per spec it is allowed to pulse the HPD signal to indicate that the
|
||||
* EDID information has changed. Some monitors do this when they wakeup
|
||||
* from standby or are enabled. When the HDP goes low the adv7511 is
|
||||
* from standby or are enabled. When the HPD goes low the adv7511 is
|
||||
* reset and the outputs are disabled which might cause the monitor to
|
||||
* go to standby again. To avoid this we ignore the HDP pin for the
|
||||
* go to standby again. To avoid this we ignore the HPD pin for the
|
||||
* first few seconds after enabling the output.
|
||||
*/
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
|
||||
ADV7511_REG_POWER2_HDP_SRC_MASK,
|
||||
ADV7511_REG_POWER2_HDP_SRC_NONE);
|
||||
ADV7511_REG_POWER2_HPD_SRC_MASK,
|
||||
ADV7511_REG_POWER2_HPD_SRC_NONE);
|
||||
|
||||
/*
|
||||
* Most of the registers are reset during power down or when HPD is low.
|
||||
|
@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
|
|||
if (ret < 0)
|
||||
return false;
|
||||
|
||||
if (irq0 & ADV7511_INT0_HDP) {
|
||||
if (irq0 & ADV7511_INT0_HPD) {
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
|
||||
ADV7511_INT0_HDP);
|
||||
ADV7511_INT0_HPD);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
|
|||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
|
||||
|
||||
if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
|
||||
if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
|
||||
drm_helper_hpd_irq_event(adv7511->encoder->dev);
|
||||
|
||||
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
|
||||
|
@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
|
|||
|
||||
/* Reading the EDID only works if the device is powered */
|
||||
if (!adv7511->powered) {
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
|
||||
ADV7511_POWER_POWER_DOWN, 0);
|
||||
if (adv7511->i2c_main->irq) {
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
|
||||
ADV7511_INT0_EDID_READY);
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
|
||||
ADV7511_INT1_DDC_ERROR);
|
||||
}
|
||||
adv7511->current_edid_segment = -1;
|
||||
}
|
||||
|
||||
|
@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
|
|||
if (adv7511->status == connector_status_connected)
|
||||
status = connector_status_disconnected;
|
||||
} else {
|
||||
/* Renable HDP sensing */
|
||||
/* Renable HPD sensing */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
|
||||
ADV7511_REG_POWER2_HDP_SRC_MASK,
|
||||
ADV7511_REG_POWER2_HDP_SRC_BOTH);
|
||||
ADV7511_REG_POWER2_HPD_SRC_MASK,
|
||||
ADV7511_REG_POWER2_HPD_SRC_BOTH);
|
||||
}
|
||||
|
||||
adv7511->status = status;
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
#define ADV7511_CSC_ENABLE BIT(7)
|
||||
#define ADV7511_CSC_UPDATE_MODE BIT(5)
|
||||
|
||||
#define ADV7511_INT0_HDP BIT(7)
|
||||
#define ADV7511_INT0_HPD BIT(7)
|
||||
#define ADV7511_INT0_VSYNC BIT(5)
|
||||
#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
|
||||
#define ADV7511_INT0_EDID_READY BIT(2)
|
||||
|
@ -157,11 +157,11 @@
|
|||
#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
|
||||
#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
|
||||
|
||||
#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0
|
||||
#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00
|
||||
#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40
|
||||
#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80
|
||||
#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0
|
||||
#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
|
||||
#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
|
||||
#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
|
||||
#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
|
||||
#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
|
||||
#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
|
||||
#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ config DRM_I915
|
|||
# the shmem_readpage() which depends upon tmpfs
|
||||
select SHMEM
|
||||
select TMPFS
|
||||
select STOP_MACHINE
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL
|
||||
select DRM_MIPI_DSI
|
||||
|
|
|
@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev)
|
|||
WARN_ON(!IS_SKYLAKE(dev) &&
|
||||
!IS_KABYLAKE(dev));
|
||||
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
|
||||
(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
|
||||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
|
||||
pch->subsystem_vendor == 0x1af4 &&
|
||||
pch->subsystem_device == 0x1100)) {
|
||||
dev_priv->pch_type = intel_virt_detect_pch(dev);
|
||||
} else
|
||||
continue;
|
||||
|
|
|
@ -2946,7 +2946,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
|
|||
struct i915_vma *vma;
|
||||
u64 offset;
|
||||
|
||||
intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
|
||||
intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
|
||||
intel_plane->base.state);
|
||||
|
||||
vma = i915_gem_obj_to_ggtt_view(obj, &view);
|
||||
|
@ -12075,11 +12075,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
|
|||
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
|
||||
}
|
||||
|
||||
/* Clamp bpp to 8 on screens without EDID 1.4 */
|
||||
if (connector->base.display_info.bpc == 0 && bpp > 24) {
|
||||
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
|
||||
bpp);
|
||||
pipe_config->pipe_bpp = 24;
|
||||
/* Clamp bpp to default limit on screens without EDID 1.4 */
|
||||
if (connector->base.display_info.bpc == 0) {
|
||||
int type = connector->base.connector_type;
|
||||
int clamp_bpp = 24;
|
||||
|
||||
/* Fall back to 18 bpp when DP sink capability is unknown. */
|
||||
if (type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
type == DRM_MODE_CONNECTOR_eDP)
|
||||
clamp_bpp = 18;
|
||||
|
||||
if (bpp > clamp_bpp) {
|
||||
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
|
||||
bpp, clamp_bpp);
|
||||
pipe_config->pipe_bpp = clamp_bpp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13883,11 +13893,12 @@ intel_check_primary_plane(struct drm_plane *plane,
|
|||
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
|
||||
bool can_position = false;
|
||||
|
||||
/* use scaler when colorkey is not required */
|
||||
if (INTEL_INFO(plane->dev)->gen >= 9 &&
|
||||
state->ckey.flags == I915_SET_COLORKEY_NONE) {
|
||||
min_scale = 1;
|
||||
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
|
||||
if (INTEL_INFO(plane->dev)->gen >= 9) {
|
||||
/* use scaler when colorkey is not required */
|
||||
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
|
||||
min_scale = 1;
|
||||
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
|
||||
}
|
||||
can_position = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1707,6 +1707,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
|
|||
if (flush_domains) {
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
|
||||
flags |= PIPE_CONTROL_FLUSH_ENABLE;
|
||||
}
|
||||
|
||||
|
|
|
@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
if (flush_domains) {
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
|
||||
flags |= PIPE_CONTROL_FLUSH_ENABLE;
|
||||
}
|
||||
if (invalidate_domains) {
|
||||
|
@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
|
|||
if (flush_domains) {
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
|
||||
flags |= PIPE_CONTROL_FLUSH_ENABLE;
|
||||
}
|
||||
if (invalidate_domains) {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include <drm/drm_cache.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_trace.h"
|
||||
|
||||
|
@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
|
||||
"better performance thanks to write-combining\n");
|
||||
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
|
||||
#else
|
||||
/* For architectures that don't support WC memory,
|
||||
* mask out the WC flag from the BO
|
||||
*/
|
||||
if (!drm_arch_can_wc_memory())
|
||||
bo->flags &= ~RADEON_GEM_GTT_WC;
|
||||
#endif
|
||||
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
|
|
|
@ -35,4 +35,13 @@
|
|||
|
||||
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
||||
|
||||
static inline bool drm_arch_can_wc_memory(void)
|
||||
{
|
||||
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -44,8 +44,6 @@ struct drm_dp_vcpi {
|
|||
/**
|
||||
* struct drm_dp_mst_port - MST port
|
||||
* @kref: reference count for this port.
|
||||
* @guid_valid: for DP 1.2 devices if we have validated the GUID.
|
||||
* @guid: guid for DP 1.2 device on this port.
|
||||
* @port_num: port number
|
||||
* @input: if this port is an input port.
|
||||
* @mcs: message capability status - DP 1.2 spec.
|
||||
|
@ -70,10 +68,6 @@ struct drm_dp_vcpi {
|
|||
struct drm_dp_mst_port {
|
||||
struct kref kref;
|
||||
|
||||
/* if dpcd 1.2 device is on this port - its GUID info */
|
||||
bool guid_valid;
|
||||
u8 guid[16];
|
||||
|
||||
u8 port_num;
|
||||
bool input;
|
||||
bool mcs;
|
||||
|
@ -110,10 +104,12 @@ struct drm_dp_mst_port {
|
|||
* @tx_slots: transmission slots for this device.
|
||||
* @last_seqno: last sequence number used to talk to this.
|
||||
* @link_address_sent: if a link address message has been sent to this device yet.
|
||||
* @guid: guid for DP 1.2 branch device. port under this branch can be
|
||||
* identified by port #.
|
||||
*
|
||||
* This structure represents an MST branch device, there is one
|
||||
* primary branch device at the root, along with any others connected
|
||||
* to downstream ports
|
||||
* primary branch device at the root, along with any other branches connected
|
||||
* to downstream port of parent branches.
|
||||
*/
|
||||
struct drm_dp_mst_branch {
|
||||
struct kref kref;
|
||||
|
@ -132,6 +128,9 @@ struct drm_dp_mst_branch {
|
|||
struct drm_dp_sideband_msg_tx *tx_slots[2];
|
||||
int last_seqno;
|
||||
bool link_address_sent;
|
||||
|
||||
/* global unique identifier to identify branch devices */
|
||||
u8 guid[16];
|
||||
};
|
||||
|
||||
|
||||
|
@ -406,11 +405,9 @@ struct drm_dp_payload {
|
|||
* @conn_base_id: DRM connector ID this mgr is connected to.
|
||||
* @down_rep_recv: msg receiver state for down replies.
|
||||
* @up_req_recv: msg receiver state for up requests.
|
||||
* @lock: protects mst state, primary, guid, dpcd.
|
||||
* @lock: protects mst state, primary, dpcd.
|
||||
* @mst_state: if this manager is enabled for an MST capable port.
|
||||
* @mst_primary: pointer to the primary branch device.
|
||||
* @guid_valid: GUID valid for the primary branch device.
|
||||
* @guid: GUID for primary port.
|
||||
* @dpcd: cache of DPCD for primary port.
|
||||
* @pbn_div: PBN to slots divisor.
|
||||
*
|
||||
|
@ -432,13 +429,11 @@ struct drm_dp_mst_topology_mgr {
|
|||
struct drm_dp_sideband_msg_rx up_req_recv;
|
||||
|
||||
/* pointer to info about the initial MST device */
|
||||
struct mutex lock; /* protects mst_state + primary + guid + dpcd */
|
||||
struct mutex lock; /* protects mst_state + primary + dpcd */
|
||||
|
||||
bool mst_state;
|
||||
struct drm_dp_mst_branch *mst_primary;
|
||||
/* primary MST device GUID */
|
||||
bool guid_valid;
|
||||
u8 guid[16];
|
||||
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
u8 sink_count;
|
||||
int pbn_div;
|
||||
|
|
|
@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
|||
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
|
||||
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
|
||||
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
|
||||
#define DRM_FIXED_EPSILON 1LL
|
||||
#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
|
||||
|
||||
static inline s64 drm_int2fixp(int a)
|
||||
{
|
||||
return ((s64)a) << DRM_FIXED_POINT;
|
||||
}
|
||||
|
||||
static inline int drm_fixp2int(int64_t a)
|
||||
static inline int drm_fixp2int(s64 a)
|
||||
{
|
||||
return ((s64)a) >> DRM_FIXED_POINT;
|
||||
}
|
||||
|
||||
static inline unsigned drm_fixp_msbset(int64_t a)
|
||||
static inline int drm_fixp2int_ceil(s64 a)
|
||||
{
|
||||
if (a > 0)
|
||||
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
|
||||
else
|
||||
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
|
||||
}
|
||||
|
||||
static inline unsigned drm_fixp_msbset(s64 a)
|
||||
{
|
||||
unsigned shift, sign = (a >> 63) & 1;
|
||||
|
||||
|
@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
|
|||
return result;
|
||||
}
|
||||
|
||||
static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
|
||||
{
|
||||
s64 res;
|
||||
bool a_neg = a < 0;
|
||||
bool b_neg = b < 0;
|
||||
u64 a_abs = a_neg ? -a : a;
|
||||
u64 b_abs = b_neg ? -b : b;
|
||||
u64 rem;
|
||||
|
||||
/* determine integer part */
|
||||
u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
|
||||
|
||||
/* determine fractional part */
|
||||
{
|
||||
u32 i = DRM_FIXED_POINT;
|
||||
|
||||
do {
|
||||
rem <<= 1;
|
||||
res_abs <<= 1;
|
||||
if (rem >= b_abs) {
|
||||
res_abs |= 1;
|
||||
rem -= b_abs;
|
||||
}
|
||||
} while (--i != 0);
|
||||
}
|
||||
|
||||
/* round up LSB */
|
||||
{
|
||||
u64 summand = (rem << 1) >= b_abs;
|
||||
|
||||
res_abs += summand;
|
||||
}
|
||||
|
||||
res = (s64) res_abs;
|
||||
if (a_neg ^ b_neg)
|
||||
res = -res;
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline s64 drm_fixp_exp(s64 x)
|
||||
{
|
||||
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
|
||||
|
|
Loading…
Reference in New Issue