ast, msm display fixes, amdgpu fixes, lease fix, omap fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcCc0fAAoJEAx081l5xIa+6X4P/1pTMY/XJob0UVrmygU+oZu8 NIRO+axzV5EksEaWHCxVN5VtxaXpA1BMbFQeMpen2Vv2+Bts/YYP36Vm6okPK2Kh rBByQDdZvpaTovrypososqJ/BBD2ak6CGqiedbb35RKdetxYpVW+gFjEb9spNeOw fQAqSaP605sJg+YxVMIg8bEPP4hENSjx+CsmyjDcw3Q5CzeyYz3kFjZ9QKMtb8UK inCDc/A4phY9YiglKbdiMfJzJf8HN3ZYLwIOrXJqsmi4HTElyoWCXMMOZNMp4UVO c95+AGart+YIXRJ/mbO+l9fitvvaymHNRfTpyvgF7P/MjCrazul+rzhfpjP+hU8a oz/8XFUqVphBdWDLHo4FesqpGPMBuEWSs8143Ohz2lG3F/wZknrXkVWi9ojviZ1u ahUmaCi+OwyEMCZQNJVcgNdHZrSu06ZjOLyhfy0oab3mXyr03cS2BX4Gv5FslMjQ IQUntn9vPu7Po4nsENKV5uSoBozoLlKw4b+r3w2sQbJFLlUXavrsx5lSlTB9ELDF AHZsJ0NkLaXbY5It+atzmLLeHwdepYoM1Ocx3DKy9D2gqdaPXQdDTamvoLeq7dOg zmGh9wNJdtX3Y2/I/5YVeClpUhuR6vQtj1x2x7gDHE2rIs+IoGnfUBg2NW67HH4o JhF+LXQclvtsAzp3P86L =6wgz -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2018-12-07' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "There's a bit more in here than I'd like, and I'm hoping things calm down when I'm out. msm: - a bunch of display fixes for the new DPU - a couple of command submission fixes omap: - some DSI fixes ast: - driver unload crash fix core: - fix the lease uevent so userspace can distinguish it amd: - fix a bpc regression - fix lru handling regression - fixed firmware support for new GPUs - power management fixes for vega20" * tag 'drm-fixes-2018-12-07' of git://anongit.freedesktop.org/drm/drm: (37 commits) drm/ast: Fix connector leak during driver unload drm/amdgpu/vcn: Update vcn.cur_state during suspend drm/amd/display: Fix overflow/truncation from strncpy. drm/amd/powerplay: improve OD code robustness drm/amdgpu: enlarge maximum waiting time of KIQ drm/fb-helper: Fix typo in parameter description drm/amd/powerplay: support SoftMin/Max setting for some specific DPM drm/amd/powerplay: issue pre-display settings for display change event drm/amd/powerplay: support new pptable upload on Vega20 drm/amdgpu/gmc8: always load MC firmware in the driver drm/amdgpu/gmc8: update MC firmware for polaris drm/amdgpu: update mc firmware image for polaris12 variants drm/msm: Fix error return checking drm/msm/dpu: Ignore alpha for XBGR8888 format drm/msm: dpu: Fix "WARNING: invalid free of devm_ allocated data" drm/msm/hdmi: Drop pointless static qualifier in msm_hdmi_bind() drm/msm: Move fence put to where failure occurs drm/msm: dpu: Don't set legacy plane->crtc pointer drm/msm/gpu: Don't map command buffers with nr_relocs equal to 0 drm/msm/hdmi: Enable HPD after HDMI IRQ is set up ...
This commit is contained in:
commit
d387ac13ad
|
@ -233,7 +233,7 @@ enum amdgpu_kiq_irq {
|
|||
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 20
|
||||
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
|
||||
|
||||
int amdgpu_device_ip_set_clockgating_state(void *dev,
|
||||
enum amd_ip_block_type block_type,
|
||||
|
|
|
@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
|
|||
[AMDGPU_HW_IP_UVD_ENC] = 1,
|
||||
[AMDGPU_HW_IP_VCN_DEC] = 1,
|
||||
[AMDGPU_HW_IP_VCN_ENC] = 1,
|
||||
[AMDGPU_HW_IP_VCN_JPEG] = 1,
|
||||
};
|
||||
|
||||
static int amdgput_ctx_total_num_entities(void)
|
||||
|
|
|
@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
if (!info->return_size || !info->return_pointer)
|
||||
return -EINVAL;
|
||||
|
||||
/* Ensure IB tests are run on ring */
|
||||
flush_delayed_work(&adev->late_init_work);
|
||||
|
||||
switch (info->query) {
|
||||
case AMDGPU_INFO_ACCEL_WORKING:
|
||||
ui32 = adev->accel_working;
|
||||
|
@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
struct amdgpu_fpriv *fpriv;
|
||||
int r, pasid;
|
||||
|
||||
/* Ensure IB tests are run on ring */
|
||||
flush_delayed_work(&adev->late_init_work);
|
||||
|
||||
file_priv->driver_priv = NULL;
|
||||
|
||||
r = pm_runtime_get_sync(dev->dev);
|
||||
|
|
|
@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
|
|||
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
|
||||
|
||||
static const u32 golden_settings_tonga_a11[] =
|
||||
{
|
||||
|
@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
chip_name = "tonga";
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
chip_name = "polaris11";
|
||||
if (((adev->pdev->device == 0x67ef) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe5))) ||
|
||||
((adev->pdev->device == 0x67ff) &&
|
||||
((adev->pdev->revision == 0xcf) ||
|
||||
(adev->pdev->revision == 0xef) ||
|
||||
(adev->pdev->revision == 0xff))))
|
||||
chip_name = "polaris11_k";
|
||||
else if ((adev->pdev->device == 0x67ef) &&
|
||||
(adev->pdev->revision == 0xe2))
|
||||
chip_name = "polaris11_k";
|
||||
else
|
||||
chip_name = "polaris11";
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
if ((adev->pdev->device == 0x67df) &&
|
||||
((adev->pdev->revision == 0xe1) ||
|
||||
(adev->pdev->revision == 0xf7)))
|
||||
chip_name = "polaris10_k";
|
||||
else
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
if (((adev->pdev->device == 0x6987) &&
|
||||
((adev->pdev->revision == 0xc0) ||
|
||||
(adev->pdev->revision == 0xc3))) ||
|
||||
((adev->pdev->device == 0x6981) &&
|
||||
((adev->pdev->revision == 0x00) ||
|
||||
(adev->pdev->revision == 0x01) ||
|
||||
(adev->pdev->revision == 0x10))))
|
||||
chip_name = "polaris12_k";
|
||||
else
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
case CHIP_CARRIZO:
|
||||
|
@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
|
|||
const struct mc_firmware_header_v1_0 *hdr;
|
||||
const __le32 *fw_data = NULL;
|
||||
const __le32 *io_mc_regs = NULL;
|
||||
u32 data, vbios_version;
|
||||
u32 data;
|
||||
int i, ucode_size, regs_size;
|
||||
|
||||
/* Skip MC ucode loading on SR-IOV capable boards.
|
||||
|
@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
|
|||
if (amdgpu_sriov_bios(adev))
|
||||
return 0;
|
||||
|
||||
WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
|
||||
data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
|
||||
vbios_version = data & 0xf;
|
||||
|
||||
if (vbios_version == 0)
|
||||
return 0;
|
||||
|
||||
if (!adev->gmc.fw)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
|
|||
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
|
||||
static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
|
||||
|
||||
/**
|
||||
* vcn_v1_0_early_init - set function pointers
|
||||
|
@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle)
|
|||
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
|
||||
|
||||
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
|
||||
vcn_v1_0_stop(adev);
|
||||
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
|
|
|
@ -2554,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info,
|
|||
|
||||
cea_revision = drm_connector->display_info.cea_rev;
|
||||
|
||||
strncpy(audio_info->display_name,
|
||||
strscpy(audio_info->display_name,
|
||||
edid_caps->display_name,
|
||||
AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
|
||||
AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
|
||||
|
||||
if (cea_revision >= 3) {
|
||||
audio_info->mode_count = edid_caps->audio_mode_count;
|
||||
|
@ -3042,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
|
|||
state->underscan_enable = false;
|
||||
state->underscan_hborder = 0;
|
||||
state->underscan_vborder = 0;
|
||||
state->max_bpc = 8;
|
||||
|
||||
__drm_atomic_helper_connector_reset(connector, &state->base);
|
||||
}
|
||||
|
@ -3063,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
|
|||
|
||||
new_state->freesync_capable = state->freesync_capable;
|
||||
new_state->freesync_enable = state->freesync_enable;
|
||||
new_state->max_bpc = state->max_bpc;
|
||||
|
||||
return &new_state->base;
|
||||
}
|
||||
|
@ -3650,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
|
|||
mode->hdisplay = hdisplay;
|
||||
mode->vdisplay = vdisplay;
|
||||
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
|
||||
strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
|
||||
strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
|
||||
|
||||
return mode;
|
||||
|
||||
|
|
|
@ -2512,6 +2512,8 @@ static void pplib_apply_display_requirements(
|
|||
dc,
|
||||
context->bw.dce.sclk_khz);
|
||||
|
||||
pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
|
||||
|
||||
pp_display_cfg->min_engine_clock_deep_sleep_khz
|
||||
= context->bw.dce.sclk_deep_sleep_khz;
|
||||
|
||||
|
|
|
@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
|
|||
PHM_FUNC_CHECK(hwmgr);
|
||||
adev = hwmgr->adev;
|
||||
|
||||
if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
|
||||
/* Skip for suspend/resume case */
|
||||
if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
|
||||
&& adev->in_suspend) {
|
||||
pr_info("dpm has been enabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
|
|||
|
||||
switch (task_id) {
|
||||
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
|
||||
ret = phm_pre_display_configuration_changed(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = phm_set_cpu_power_state(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
|
|||
if (skip)
|
||||
return 0;
|
||||
|
||||
phm_pre_display_configuration_changed(hwmgr);
|
||||
|
||||
phm_display_configuration_changed(hwmgr);
|
||||
|
||||
if (hwmgr->ps)
|
||||
|
|
|
@ -3589,8 +3589,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
|
|||
}
|
||||
|
||||
if (i >= sclk_table->count) {
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
|
||||
sclk_table->dpm_levels[i-1].value = sclk;
|
||||
if (sclk > sclk_table->dpm_levels[i-1].value) {
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
|
||||
sclk_table->dpm_levels[i-1].value = sclk;
|
||||
}
|
||||
} else {
|
||||
/* TODO: Check SCLK in DAL's minimum clocks
|
||||
* in case DeepSleep divider update is required.
|
||||
|
@ -3607,8 +3609,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
|
|||
}
|
||||
|
||||
if (i >= mclk_table->count) {
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
|
||||
mclk_table->dpm_levels[i-1].value = mclk;
|
||||
if (mclk > mclk_table->dpm_levels[i-1].value) {
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
|
||||
mclk_table->dpm_levels[i-1].value = mclk;
|
||||
}
|
||||
}
|
||||
|
||||
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
|
||||
|
|
|
@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
|
|||
}
|
||||
|
||||
if (i >= sclk_table->count) {
|
||||
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
|
||||
sclk_table->dpm_levels[i-1].value = sclk;
|
||||
if (sclk > sclk_table->dpm_levels[i-1].value) {
|
||||
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
|
||||
sclk_table->dpm_levels[i-1].value = sclk;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++) {
|
||||
|
@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
|
|||
}
|
||||
|
||||
if (i >= mclk_table->count) {
|
||||
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
|
||||
mclk_table->dpm_levels[i-1].value = mclk;
|
||||
if (mclk > mclk_table->dpm_levels[i-1].value) {
|
||||
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
|
||||
mclk_table->dpm_levels[i-1].value = mclk;
|
||||
}
|
||||
}
|
||||
|
||||
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
|
||||
|
|
|
@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
|
|||
return i;
|
||||
}
|
||||
|
||||
static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
|
||||
static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
|
||||
{
|
||||
struct vega20_hwmgr *data =
|
||||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||||
uint32_t min_freq;
|
||||
int ret = 0;
|
||||
|
||||
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
|
||||
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
|
||||
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
|
||||
|
@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_UCLK].enabled &&
|
||||
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
|
||||
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
|
||||
|
@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_UVD].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_UVD].enabled &&
|
||||
(feature_mask & FEATURE_DPM_UVD_MASK)) {
|
||||
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_VCE].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_VCE].enabled &&
|
||||
(feature_mask & FEATURE_DPM_VCE_MASK)) {
|
||||
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
|
||||
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
|
||||
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
|
||||
static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
|
||||
{
|
||||
struct vega20_hwmgr *data =
|
||||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||||
uint32_t max_freq;
|
||||
int ret = 0;
|
||||
|
||||
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
|
||||
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
|
||||
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_UCLK].enabled &&
|
||||
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
|
||||
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_UVD].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_UVD].enabled &&
|
||||
(feature_mask & FEATURE_DPM_UVD_MASK)) {
|
||||
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_VCE].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_VCE].enabled &&
|
||||
(feature_mask & FEATURE_DPM_VCE_MASK)) {
|
||||
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
|
|||
return ret);
|
||||
}
|
||||
|
||||
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
|
||||
if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
|
||||
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
|
||||
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
|
||||
|
||||
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
|
||||
|
@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
|
|||
data->dpm_table.mem_table.dpm_state.soft_max_level =
|
||||
data->dpm_table.mem_table.dpm_levels[soft_level].value;
|
||||
|
||||
ret = vega20_upload_dpm_min_level(hwmgr);
|
||||
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload boot level to highest!",
|
||||
return ret);
|
||||
|
||||
ret = vega20_upload_dpm_max_level(hwmgr);
|
||||
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload dpm max level to highest!",
|
||||
return ret);
|
||||
|
@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
|
|||
data->dpm_table.mem_table.dpm_state.soft_max_level =
|
||||
data->dpm_table.mem_table.dpm_levels[soft_level].value;
|
||||
|
||||
ret = vega20_upload_dpm_min_level(hwmgr);
|
||||
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload boot level to highest!",
|
||||
return ret);
|
||||
|
||||
ret = vega20_upload_dpm_max_level(hwmgr);
|
||||
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload dpm max level to highest!",
|
||||
return ret);
|
||||
|
@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = vega20_upload_dpm_min_level(hwmgr);
|
||||
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload DPM Bootup Levels!",
|
||||
return ret);
|
||||
|
||||
ret = vega20_upload_dpm_max_level(hwmgr);
|
||||
ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload DPM Max Levels!",
|
||||
return ret);
|
||||
|
@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
|
|||
data->dpm_table.gfx_table.dpm_state.soft_max_level =
|
||||
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
|
||||
|
||||
ret = vega20_upload_dpm_min_level(hwmgr);
|
||||
ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload boot level to lowest!",
|
||||
return ret);
|
||||
|
||||
ret = vega20_upload_dpm_max_level(hwmgr);
|
||||
ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload dpm max level to highest!",
|
||||
return ret);
|
||||
|
@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
|
|||
data->dpm_table.mem_table.dpm_state.soft_max_level =
|
||||
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
|
||||
|
||||
ret = vega20_upload_dpm_min_level(hwmgr);
|
||||
ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload boot level to lowest!",
|
||||
return ret);
|
||||
|
||||
ret = vega20_upload_dpm_max_level(hwmgr);
|
||||
ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Failed to upload dpm max level to highest!",
|
||||
return ret);
|
||||
|
|
|
@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
|
|||
{
|
||||
struct ast_framebuffer *afb = &afbdev->afb;
|
||||
|
||||
drm_crtc_force_disable_all(dev);
|
||||
drm_fb_helper_unregister_fbi(&afbdev->helper);
|
||||
|
||||
if (afb->obj) {
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define SN_AUX_ADDR_7_0_REG 0x76
|
||||
#define SN_AUX_LENGTH_REG 0x77
|
||||
#define SN_AUX_CMD_REG 0x78
|
||||
#define AUX_CMD_SEND BIT(1)
|
||||
#define AUX_CMD_SEND BIT(0)
|
||||
#define AUX_CMD_REQ(x) ((x) << 4)
|
||||
#define SN_AUX_RDATA_REG(x) (0x79 + (x))
|
||||
#define SN_SSC_CONFIG_REG 0x93
|
||||
|
|
|
@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
|
|||
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
|
||||
static bool drm_leak_fbdev_smem = false;
|
||||
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
|
||||
MODULE_PARM_DESC(fbdev_emulation,
|
||||
MODULE_PARM_DESC(drm_leak_fbdev_smem,
|
||||
"Allow unsafe leaking fbdev physical smem address [default=false]");
|
||||
#endif
|
||||
|
||||
|
|
|
@ -104,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
|
|||
int drm_sysfs_connector_add(struct drm_connector *connector);
|
||||
void drm_sysfs_connector_remove(struct drm_connector *connector);
|
||||
|
||||
void drm_sysfs_lease_event(struct drm_device *dev);
|
||||
|
||||
/* drm_gem.c */
|
||||
int drm_gem_init(struct drm_device *dev);
|
||||
void drm_gem_destroy(struct drm_device *dev);
|
||||
|
|
|
@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master)
|
|||
|
||||
if (master->lessor) {
|
||||
/* Tell the master to check the lessee list */
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
drm_sysfs_lease_event(dev);
|
||||
drm_master_put(&master->lessor);
|
||||
}
|
||||
|
||||
|
|
|
@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
|
|||
connector->kdev = NULL;
|
||||
}
|
||||
|
||||
void drm_sysfs_lease_event(struct drm_device *dev)
|
||||
{
|
||||
char *event_string = "LEASE=1";
|
||||
char *envp[] = { event_string, NULL };
|
||||
|
||||
DRM_DEBUG("generating lease event\n");
|
||||
|
||||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sysfs_hotplug_event - generate a DRM uevent
|
||||
* @dev: DRM device
|
||||
|
|
|
@ -1594,7 +1594,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
|
|||
NULL);
|
||||
|
||||
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
|
||||
plane->crtc = crtc;
|
||||
|
||||
/* save user friendly CRTC name for later */
|
||||
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
|
||||
|
|
|
@ -488,8 +488,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
|
|||
|
||||
drm_encoder_cleanup(drm_enc);
|
||||
mutex_destroy(&dpu_enc->enc_lock);
|
||||
|
||||
kfree(dpu_enc);
|
||||
}
|
||||
|
||||
void dpu_encoder_helper_split_config(
|
||||
|
|
|
@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
|
|||
INTERLEAVED_RGB_FMT(XBGR8888,
|
||||
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
|
||||
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
|
||||
true, 4, 0,
|
||||
false, 4, 0,
|
||||
DPU_FETCH_LINEAR, 1),
|
||||
|
||||
INTERLEAVED_RGB_FMT(RGBA8888,
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
#define DSI_PIXEL_PLL_CLK 1
|
||||
#define NUM_PROVIDED_CLKS 2
|
||||
|
||||
#define VCO_REF_CLK_RATE 19200000
|
||||
|
||||
struct dsi_pll_regs {
|
||||
u32 pll_prop_gain_rate;
|
||||
u32 pll_lockdet_rate;
|
||||
|
@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
parent_rate);
|
||||
|
||||
pll_10nm->vco_current_rate = rate;
|
||||
pll_10nm->vco_ref_clk_rate = parent_rate;
|
||||
pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
|
||||
|
||||
dsi_pll_setup_config(pll_10nm);
|
||||
|
||||
|
|
|
@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
ret = msm_hdmi_hpd_enable(hdmi->connector);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
encoder->bridge = hdmi->bridge;
|
||||
|
||||
priv->bridges[priv->num_bridges++] = hdmi->bridge;
|
||||
|
@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||
{
|
||||
struct drm_device *drm = dev_get_drvdata(master);
|
||||
struct msm_drm_private *priv = drm->dev_private;
|
||||
static struct hdmi_platform_config *hdmi_cfg;
|
||||
struct hdmi_platform_config *hdmi_cfg;
|
||||
struct hdmi *hdmi;
|
||||
struct device_node *of_node = dev->of_node;
|
||||
int i, err;
|
||||
|
|
|
@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
|
|||
|
||||
void msm_hdmi_connector_irq(struct drm_connector *connector);
|
||||
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
|
||||
int msm_hdmi_hpd_enable(struct drm_connector *connector);
|
||||
|
||||
/*
|
||||
* i2c adapter for ddc:
|
||||
|
|
|
@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
|
|||
}
|
||||
}
|
||||
|
||||
static int hpd_enable(struct hdmi_connector *hdmi_connector)
|
||||
int msm_hdmi_hpd_enable(struct drm_connector *connector)
|
||||
{
|
||||
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
|
||||
struct hdmi *hdmi = hdmi_connector->hdmi;
|
||||
const struct hdmi_platform_config *config = hdmi->config;
|
||||
struct device *dev = &hdmi->pdev->dev;
|
||||
|
@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
|
|||
{
|
||||
struct drm_connector *connector = NULL;
|
||||
struct hdmi_connector *hdmi_connector;
|
||||
int ret;
|
||||
|
||||
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
|
||||
if (!hdmi_connector)
|
||||
|
@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
|
|||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
ret = hpd_enable(hdmi_connector);
|
||||
if (ret) {
|
||||
dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
drm_connector_attach_encoder(connector, hdmi->encoder);
|
||||
|
||||
return connector;
|
||||
|
|
|
@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
|
|||
if (!new_crtc_state->active)
|
||||
continue;
|
||||
|
||||
if (drm_crtc_vblank_get(crtc))
|
||||
continue;
|
||||
|
||||
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
|
||||
|
||||
drm_crtc_vblank_put(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
|||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto free_priv;
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
||||
|
@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
|||
|
||||
if (IS_ERR(show_priv->state)) {
|
||||
ret = PTR_ERR(show_priv->state);
|
||||
kfree(show_priv);
|
||||
return ret;
|
||||
goto free_priv;
|
||||
}
|
||||
|
||||
show_priv->dev = dev;
|
||||
|
||||
return single_open(file, msm_gpu_show, show_priv);
|
||||
ret = single_open(file, msm_gpu_show, show_priv);
|
||||
if (ret)
|
||||
goto free_priv;
|
||||
|
||||
return 0;
|
||||
|
||||
free_priv:
|
||||
kfree(show_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations msm_gpu_fops = {
|
||||
|
|
|
@ -553,17 +553,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
kthread_run(kthread_worker_fn,
|
||||
&priv->disp_thread[i].worker,
|
||||
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
|
||||
ret = sched_setscheduler(priv->disp_thread[i].thread,
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
pr_warn("display thread priority update failed: %d\n",
|
||||
ret);
|
||||
|
||||
if (IS_ERR(priv->disp_thread[i].thread)) {
|
||||
dev_err(dev, "failed to create crtc_commit kthread\n");
|
||||
priv->disp_thread[i].thread = NULL;
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
|
||||
ret = sched_setscheduler(priv->disp_thread[i].thread,
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
dev_warn(dev, "disp_thread set priority failed: %d\n",
|
||||
ret);
|
||||
|
||||
/* initialize event thread */
|
||||
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
|
||||
kthread_init_worker(&priv->event_thread[i].worker);
|
||||
|
@ -572,6 +573,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
kthread_run(kthread_worker_fn,
|
||||
&priv->event_thread[i].worker,
|
||||
"crtc_event:%d", priv->event_thread[i].crtc_id);
|
||||
if (IS_ERR(priv->event_thread[i].thread)) {
|
||||
dev_err(dev, "failed to create crtc_event kthread\n");
|
||||
priv->event_thread[i].thread = NULL;
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
|
||||
/**
|
||||
* event thread should also run at same priority as disp_thread
|
||||
* because it is handling frame_done events. A lower priority
|
||||
|
@ -580,34 +587,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
* failure at crtc commit level.
|
||||
*/
|
||||
ret = sched_setscheduler(priv->event_thread[i].thread,
|
||||
SCHED_FIFO, ¶m);
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
pr_warn("display event thread priority update failed: %d\n",
|
||||
ret);
|
||||
|
||||
if (IS_ERR(priv->event_thread[i].thread)) {
|
||||
dev_err(dev, "failed to create crtc_event kthread\n");
|
||||
priv->event_thread[i].thread = NULL;
|
||||
}
|
||||
|
||||
if ((!priv->disp_thread[i].thread) ||
|
||||
!priv->event_thread[i].thread) {
|
||||
/* clean up previously created threads if any */
|
||||
for ( ; i >= 0; i--) {
|
||||
if (priv->disp_thread[i].thread) {
|
||||
kthread_stop(
|
||||
priv->disp_thread[i].thread);
|
||||
priv->disp_thread[i].thread = NULL;
|
||||
}
|
||||
|
||||
if (priv->event_thread[i].thread) {
|
||||
kthread_stop(
|
||||
priv->event_thread[i].thread);
|
||||
priv->event_thread[i].thread = NULL;
|
||||
}
|
||||
}
|
||||
goto err_msm_uninit;
|
||||
}
|
||||
dev_warn(dev, "event_thread set priority failed:%d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
ret = drm_vblank_init(ddev, priv->num_crtcs);
|
||||
|
|
|
@ -317,6 +317,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
|||
uint32_t *ptr;
|
||||
int ret = 0;
|
||||
|
||||
if (!nr_relocs)
|
||||
return 0;
|
||||
|
||||
if (offset % 4) {
|
||||
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
|
||||
return -EINVAL;
|
||||
|
@ -410,7 +413,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
struct msm_file_private *ctx = file->driver_priv;
|
||||
struct msm_gem_submit *submit;
|
||||
struct msm_gpu *gpu = priv->gpu;
|
||||
struct dma_fence *in_fence = NULL;
|
||||
struct sync_file *sync_file = NULL;
|
||||
struct msm_gpu_submitqueue *queue;
|
||||
struct msm_ringbuffer *ring;
|
||||
|
@ -443,6 +445,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
ring = gpu->rb[queue->prio];
|
||||
|
||||
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
|
||||
struct dma_fence *in_fence;
|
||||
|
||||
in_fence = sync_file_get_fence(args->fence_fd);
|
||||
|
||||
if (!in_fence)
|
||||
|
@ -452,11 +456,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
* Wait if the fence is from a foreign context, or if the fence
|
||||
* array contains any fence from a foreign context.
|
||||
*/
|
||||
if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
|
||||
ret = 0;
|
||||
if (!dma_fence_match_context(in_fence, ring->fctx->context))
|
||||
ret = dma_fence_wait(in_fence, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_fence_put(in_fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
|
@ -582,8 +588,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
out:
|
||||
if (in_fence)
|
||||
dma_fence_put(in_fence);
|
||||
submit_cleanup(submit);
|
||||
if (ret)
|
||||
msm_gem_submit_free(submit);
|
||||
|
|
|
@ -345,6 +345,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
|||
{
|
||||
struct msm_gpu_state *state;
|
||||
|
||||
/* Check if the target supports capturing crash state */
|
||||
if (!gpu->funcs->gpu_state_get)
|
||||
return;
|
||||
|
||||
/* Only save one crash state at a time */
|
||||
if (gpu->crashstate)
|
||||
return;
|
||||
|
@ -434,10 +438,9 @@ static void recover_worker(struct work_struct *work)
|
|||
if (submit) {
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
task = pid_task(submit->pid, PIDTYPE_PID);
|
||||
task = get_pid_task(submit->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
comm = kstrdup(task->comm, GFP_ATOMIC);
|
||||
comm = kstrdup(task->comm, GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* So slightly annoying, in other paths like
|
||||
|
@ -450,10 +453,10 @@ static void recover_worker(struct work_struct *work)
|
|||
* about the submit going away.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
|
||||
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
|
||||
put_task_struct(task);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (comm && cmd) {
|
||||
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
|
||||
|
|
|
@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
|||
// pm_runtime_get_sync(mmu->dev);
|
||||
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
|
||||
// pm_runtime_put_sync(mmu->dev);
|
||||
WARN_ON(ret < 0);
|
||||
WARN_ON(!ret);
|
||||
|
||||
return (ret == len) ? 0 : -EINVAL;
|
||||
}
|
||||
|
|
|
@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
|
|||
uint64_t iova, uint32_t size)
|
||||
{
|
||||
struct msm_gem_object *obj = submit->bos[idx].obj;
|
||||
unsigned offset = 0;
|
||||
const char *buf;
|
||||
|
||||
if (iova) {
|
||||
buf += iova - submit->bos[idx].iova;
|
||||
offset = iova - submit->bos[idx].iova;
|
||||
} else {
|
||||
iova = submit->bos[idx].iova;
|
||||
size = obj->base.size;
|
||||
|
@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd,
|
|||
if (IS_ERR(buf))
|
||||
return;
|
||||
|
||||
buf += offset;
|
||||
|
||||
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
|
||||
|
||||
msm_gem_put_vaddr(&obj->base);
|
||||
|
|
|
@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
|
|||
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
|
||||
dssdev->owner = THIS_MODULE;
|
||||
dssdev->of_ports = BIT(0);
|
||||
drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
|
||||
|
||||
omapdss_display_init(dssdev);
|
||||
omapdss_device_register(dssdev);
|
||||
|
|
|
@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev)
|
|||
dsi->num_lanes_supported = 3;
|
||||
}
|
||||
|
||||
r = of_platform_populate(dev->of_node, NULL, NULL, dev);
|
||||
if (r) {
|
||||
DSSERR("Failed to populate DSI child devices: %d\n", r);
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
r = dsi_init_output(dsi);
|
||||
if (r)
|
||||
goto err_pm_disable;
|
||||
goto err_of_depopulate;
|
||||
|
||||
r = dsi_probe_of(dsi);
|
||||
if (r) {
|
||||
|
@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev)
|
|||
goto err_uninit_output;
|
||||
}
|
||||
|
||||
r = of_platform_populate(dev->of_node, NULL, NULL, dev);
|
||||
if (r) {
|
||||
DSSERR("Failed to populate DSI child devices: %d\n", r);
|
||||
goto err_uninit_output;
|
||||
}
|
||||
|
||||
r = component_add(&pdev->dev, &dsi_component_ops);
|
||||
if (r)
|
||||
goto err_of_depopulate;
|
||||
goto err_uninit_output;
|
||||
|
||||
return 0;
|
||||
|
||||
err_of_depopulate:
|
||||
of_platform_depopulate(dev);
|
||||
err_uninit_output:
|
||||
dsi_uninit_output(dsi);
|
||||
err_of_depopulate:
|
||||
of_platform_depopulate(dev);
|
||||
err_pm_disable:
|
||||
pm_runtime_disable(dev);
|
||||
return r;
|
||||
|
|
|
@ -432,7 +432,7 @@ struct omap_dss_device {
|
|||
const struct omap_dss_driver *driver;
|
||||
const struct omap_dss_device_ops *ops;
|
||||
unsigned long ops_flags;
|
||||
unsigned long bus_flags;
|
||||
u32 bus_flags;
|
||||
|
||||
/* helper variable for driver suspend/resume */
|
||||
bool activate_after_resume;
|
||||
|
|
|
@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
|
|||
.destroy = omap_encoder_destroy,
|
||||
};
|
||||
|
||||
static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
struct omap_dss_device *dssdev = omap_encoder->output;
|
||||
struct drm_connector *connector;
|
||||
bool hdmi_mode;
|
||||
|
||||
hdmi_mode = false;
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
hdmi_mode = omap_connector_get_hdmi_mode(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (dssdev->ops->hdmi.set_hdmi_mode)
|
||||
dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
|
||||
|
||||
if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
|
||||
struct hdmi_avi_infoframe avi;
|
||||
int r;
|
||||
|
||||
r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
|
||||
false);
|
||||
if (r == 0)
|
||||
dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
|
||||
}
|
||||
}
|
||||
|
||||
static void omap_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
struct drm_connector *connector;
|
||||
struct omap_dss_device *dssdev;
|
||||
struct videomode vm = { 0 };
|
||||
bool hdmi_mode;
|
||||
int r;
|
||||
|
||||
drm_display_mode_to_videomode(adjusted_mode, &vm);
|
||||
|
||||
|
@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
/* Set the HDMI mode and HDMI infoframe if applicable. */
|
||||
hdmi_mode = false;
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
hdmi_mode = omap_connector_get_hdmi_mode(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dssdev = omap_encoder->output;
|
||||
|
||||
if (dssdev->ops->hdmi.set_hdmi_mode)
|
||||
dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
|
||||
|
||||
if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
|
||||
struct hdmi_avi_infoframe avi;
|
||||
|
||||
r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
|
||||
false);
|
||||
if (r == 0)
|
||||
dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
|
||||
}
|
||||
if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
|
||||
omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
static void omap_encoder_disable(struct drm_encoder *encoder)
|
||||
|
|
|
@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
|||
if (!fbo)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_bo_get(bo);
|
||||
fbo->base = *bo;
|
||||
fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
ttm_bo_get(bo);
|
||||
fbo->bo = bo;
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue