drm fixes for 5.17-rc4
fbdev: - MAINTAINERS: add Daniel as fbdev core module maintainer - build warning fix - implicit type cast fix panel: - simple: Fix assignments from panel_dpi_probe() privacy-screen: - fix docs warning i915: - non-x86 build fix - ttm error propogation fix - drrs on hsw/ivb disabled - BIOS readout fixes - missing stackdepot oops fix amd: - DCN 3.1 display fixes - GC 10.3.1 harvest fix - Page flip irq fix - hwmon label fix - DCN 2.0 display fix rockchip: - fix HDMI error cleanup - fix RK3399 VOP register fields vc4: - HDMI fixes - remove redundant code. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmIF24QACgkQDHTzWXnE hr5pwBAAmj3Ds8OveLc58LFymWun9fXMEpYyqWrTRKP9oJZ8sJm0H4ginnIspnUZ VGzhz+BaxdlktLQlrVMfqH29SZoeQIXW+NoF1eCBGoiWIYhY+5BU4YUSVUg7lNvP ojbgy5jFiuxwOw1JeVbHhQp1aLeUWqzAb9qv+qe4sCMJIoikGSKO111BY1n+5fzg FwRLegzG6+HNhVppcbEz+vPDV/PgxXg0usSPLUigdduvulw6VxfsfJlPUC3wc0pZ erqsYeQawYJg4+arK1R6yHXfzrpti2IwubL+jSoqbqdoS0L3Di2c+IrrI/MBGoTz wyHNmMQhVvEPHlGxkb+jHZwMZHc7+jgXtvjo8JGoIcrqAN/hc+fu6GrkukTFW5vL 9P9yCt3u/Tphs3XAVRAXbvMH1uW3Bq/xx1XbpSQYQkEyFGwwW0AaRJsayT9paG1b iDKYWng//w9cJSWriG3OwpRTbQCLnYaE/4xpQE3shUpZh+SoKUSsc2itudz0FR2Y Nw/nAG0FbGgl+pX7c9qcUaTdy2XYY0sgVlqu7BBYx4JQNvHP9KPBGIRhc5bweS99 B4sBWXpzFkADPX+AXcgxDrSVMY5HEWDqhyqwNKh2Ay28qQqynq9WJxZLbIF4qyfm OdRs0mj/q71nRJGLBYhf99wXSBpSz5f0mapld3rTaGWMkFN12Bo= =YcCw -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2022-02-11' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular fixes pull, mostly i915 and amd fixes, along with a maintainers update for fbdev core. Otherwise just some build fixes and vc4 HDMI fixes. fbdev: - MAINTAINERS: add Daniel as fbdev core module maintainer - build warning fix - implicit type cast fix panel: - simple: Fix assignments from panel_dpi_probe() privacy-screen: - fix docs warning i915: - non-x86 build fix - ttm error propogation fix - drrs on hsw/ivb disabled - BIOS readout fixes - missing stackdepot oops fix amd: - DCN 3.1 display fixes - GC 10.3.1 harvest fix - Page flip irq fix - hwmon label fix - DCN 2.0 display fix rockchip: - fix HDMI error cleanup - fix RK3399 VOP register fields vc4: - HDMI fixes - remove redundant code" * tag 'drm-fixes-2022-02-11' of git://anongit.freedesktop.org/drm/drm: (25 commits) drm/amdgpu/display: change pipe policy for DCN 2.0 drm/amd/pm: fix hwmon node of power1_label create issue drm/amd/display: keep eDP Vdd on when eDP stream is already enabled drm/amd/display: fix yellow carp wm clamping drm/amd/display: Cap pflip irqs per max otg number drm/amdgpu: add utcl2_harvest to gc 10.3.1 display/amd: decrease message verbosity about watermarks table failure drm/rockchip: vop: Correct RK3399 VOP register fields drm/rockchip: dw_hdmi: Do not leave clock enabled in error case MAINTAINERS: Add entry for fbdev core fbcon: Avoid 'cap' set but not used warning drm/privacy-screen: Fix sphinx warning drm/i915: Workaround broken BIOS DBUF configuration on TGL/RKL drm/i915: Populate pipe dbuf slices more accurately during readout drm/i915: Allow !join_mbus cases for adlp+ dbuf configuration drm/i915: Fix header test for !CONFIG_X86 drm/i915/ttm: Return some errors instead of trying memcpy move drm/i915: Disable DRRS on IVB/HSW port != A drm/i915: Fix oops due to missing stack depot drm/vc4: crtc: Fix redundant variable assignment ...
This commit is contained in:
commit
c3ee3a9e4f
|
@ -7575,6 +7575,12 @@ S: Maintained
|
|||
W: http://floatingpoint.sourceforge.net/emulator/index.html
|
||||
F: arch/x86/math-emu/
|
||||
|
||||
FRAMEBUFFER CORE
|
||||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
F: drivers/video/fbdev/core/
|
||||
S: Odd Fixes
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
|
||||
FRAMEBUFFER LAYER
|
||||
M: Helge Deller <deller@gmx.de>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
|
|
|
@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
|
|||
adev->gfx.config.max_sh_per_se *
|
||||
adev->gfx.config.max_shader_engines);
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(10, 3, 1):
|
||||
case IP_VERSION(10, 3, 3):
|
||||
/* Get SA disabled bitmap from eFuse setting */
|
||||
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
|
||||
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
|
||||
|
@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
|
|||
disabled_sa = tmp;
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
|||
|
||||
/* Use GRPH_PFLIP interrupt */
|
||||
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
|
||||
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
|
||||
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
|
||||
i++) {
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
|
||||
if (r) {
|
||||
|
|
|
@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
|||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
if (result == VBIOSSMC_Result_Failed) {
|
||||
ASSERT(0);
|
||||
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
|
||||
param == TABLE_WATERMARKS)
|
||||
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
|
||||
else
|
||||
ASSERT(0);
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -1220,6 +1220,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
|||
|
||||
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
|
||||
|
||||
dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
|
||||
|
||||
if (dc->res_pool->dmcu != NULL)
|
||||
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
||||
}
|
||||
|
|
|
@ -202,6 +202,7 @@ struct dc_caps {
|
|||
bool edp_dsc_support;
|
||||
bool vbios_lttpr_aware;
|
||||
bool vbios_lttpr_enable;
|
||||
uint32_t max_otg_num;
|
||||
};
|
||||
|
||||
struct dc_bug_wa {
|
||||
|
|
|
@ -1834,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
|
|||
break;
|
||||
}
|
||||
}
|
||||
// We are trying to enable eDP, don't power down VDD
|
||||
if (can_apply_edp_fast_boot)
|
||||
|
||||
/*
|
||||
* TO-DO: So far the code logic below only addresses single eDP case.
|
||||
* For dual eDP case, there are a few things that need to be
|
||||
* implemented first:
|
||||
*
|
||||
* 1. Change the fastboot logic above, so eDP link[0 or 1]'s
|
||||
* stream[0 or 1] will all be checked.
|
||||
*
|
||||
* 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
|
||||
* for each eDP.
|
||||
*
|
||||
* Once above 2 things are completed, we can then change the logic below
|
||||
* correspondingly, so dual eDP case will be fully covered.
|
||||
*/
|
||||
|
||||
// We are trying to enable eDP, don't power down VDD if eDP stream is existing
|
||||
if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
|
||||
keep_edp_vdd_on = true;
|
||||
DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
|
||||
} else {
|
||||
DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Check seamless boot support
|
||||
|
|
|
@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.timing_trace = false,
|
||||
.clock_trace = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.vsr_support = true,
|
||||
|
|
|
@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
|
|||
ret_val = wm_ns * refclk_mhz;
|
||||
ret_val /= 1000;
|
||||
|
||||
if (ret_val > clamp_value)
|
||||
if (ret_val > clamp_value) {
|
||||
/* clamping WMs is abnormal, unexpected and may lead to underflow*/
|
||||
ASSERT(0);
|
||||
ret_val = clamp_value;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
|
||||
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
||||
|
||||
|
@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
|
||||
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
|
||||
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
|
||||
|
@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
|
||||
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
|
||||
|
||||
|
@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
|
||||
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
|
||||
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
|
||||
|
@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
|
||||
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
|
||||
|
||||
|
@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
|
||||
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
|
||||
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
|
||||
|
@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
|
||||
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
|
||||
|
||||
|
@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
|
|||
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
|
||||
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
|
||||
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0x3fff);
|
||||
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
|
||||
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
|
||||
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
|
||||
|
@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
|
||||
|
@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->a.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
|
||||
|
@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
|
||||
|
@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
|
||||
|
@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
|
||||
|
@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->b.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
|
||||
|
@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
|
||||
|
@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
|
||||
|
@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
|
||||
|
@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->c.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
|
||||
|
@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
|
||||
|
@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
|
||||
|
@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
|
||||
|
@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->d.cstate_pstate.cstate_exit_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_exit_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
|
||||
|
@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
|
||||
|
@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
|
|||
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
|
||||
|
@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||
watermarks->a.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->a.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
|
||||
|
@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||
watermarks->b.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->b.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
|
||||
|
@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||
watermarks->c.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->c.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
|
||||
|
@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
|
|||
watermarks->d.cstate_pstate.pstate_change_ns;
|
||||
prog_wm_value = convert_and_clamp(
|
||||
watermarks->d.cstate_pstate.pstate_change_ns,
|
||||
refclk_mhz, 0x1fffff);
|
||||
refclk_mhz, 0xffff);
|
||||
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
|
||||
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
|
||||
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
|
||||
|
|
|
@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_power1_label.dev_attr.attr))
|
||||
attr == &sensor_dev_attr_power2_label.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
return effective_mode;
|
||||
|
|
|
@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state);
|
|||
*
|
||||
* The notifier is called with no locks held. The new hw_state and sw_state
|
||||
* can be retrieved using the drm_privacy_screen_get_state() function.
|
||||
* A pointer to the drm_privacy_screen's struct is passed as the void *data
|
||||
* A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
|
||||
* argument of the notifier_block's notifier_call.
|
||||
*
|
||||
* The notifier will NOT be called when changes are made through
|
||||
|
|
|
@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
vlv_wm_sanitize(dev_priv);
|
||||
} else if (DISPLAY_VER(dev_priv) >= 9) {
|
||||
skl_wm_get_hw_state(dev_priv);
|
||||
skl_wm_sanitize(dev_priv);
|
||||
} else if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
ilk_wm_get_hw_state(dev_priv);
|
||||
}
|
||||
|
|
|
@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
|
|||
struct drm_display_mode *fixed_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_encoder *encoder = connector->encoder;
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
|
||||
|
@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
|
||||
encoder->port != PORT_A) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DRRS only supported on eDP port A\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
|
||||
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
|
||||
return NULL;
|
||||
|
|
|
@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
|
|||
|
||||
if (!IS_ERR(fence))
|
||||
goto out;
|
||||
} else if (move_deps) {
|
||||
int err = i915_deps_sync(move_deps, ctx);
|
||||
} else {
|
||||
int err = PTR_ERR(fence);
|
||||
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
|
||||
return fence;
|
||||
|
||||
if (move_deps) {
|
||||
err = i915_deps_sync(move_deps, ctx);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
/* Error intercept failed or no accelerated migration to start with */
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#ifndef __I915_MM_H__
|
||||
#define __I915_MM_H__
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct vm_area_struct;
|
||||
|
|
|
@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
|
|||
};
|
||||
|
||||
static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
|
||||
/*
|
||||
* Keep the join_mbus cases first so check_mbus_joined()
|
||||
* will prefer them over the !join_mbus cases.
|
||||
*/
|
||||
{
|
||||
.active_pipes = BIT(PIPE_A),
|
||||
.dbuf_mask = {
|
||||
|
@ -4731,6 +4735,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
|
|||
},
|
||||
.join_mbus = true,
|
||||
},
|
||||
{
|
||||
.active_pipes = BIT(PIPE_A),
|
||||
.dbuf_mask = {
|
||||
[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
|
||||
},
|
||||
.join_mbus = false,
|
||||
},
|
||||
{
|
||||
.active_pipes = BIT(PIPE_B),
|
||||
.dbuf_mask = {
|
||||
[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
|
||||
},
|
||||
.join_mbus = false,
|
||||
},
|
||||
{
|
||||
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
|
||||
.dbuf_mask = {
|
||||
|
@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes)
|
|||
return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
|
||||
}
|
||||
|
||||
static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
|
||||
static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
|
||||
const struct dbuf_slice_conf_entry *dbuf_slices)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
|
||||
if (dbuf_slices[i].active_pipes == active_pipes)
|
||||
if (dbuf_slices[i].active_pipes == active_pipes &&
|
||||
dbuf_slices[i].join_mbus == join_mbus)
|
||||
return dbuf_slices[i].dbuf_mask[pipe];
|
||||
}
|
||||
return 0;
|
||||
|
@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
|
|||
* returns correspondent DBuf slice mask as stated in BSpec for particular
|
||||
* platform.
|
||||
*/
|
||||
static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
|
||||
static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
|
||||
{
|
||||
/*
|
||||
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
|
||||
|
@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
|
|||
* still here - we will need it once those additional constraints
|
||||
* pop up.
|
||||
*/
|
||||
return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
|
||||
return compute_dbuf_slices(pipe, active_pipes, join_mbus,
|
||||
icl_allowed_dbufs);
|
||||
}
|
||||
|
||||
static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
|
||||
static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
|
||||
{
|
||||
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
|
||||
return compute_dbuf_slices(pipe, active_pipes, join_mbus,
|
||||
tgl_allowed_dbufs);
|
||||
}
|
||||
|
||||
static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
|
||||
static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
|
||||
{
|
||||
return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
|
||||
return compute_dbuf_slices(pipe, active_pipes, join_mbus,
|
||||
adlp_allowed_dbufs);
|
||||
}
|
||||
|
||||
static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
|
||||
static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
|
||||
{
|
||||
return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
|
||||
return compute_dbuf_slices(pipe, active_pipes, join_mbus,
|
||||
dg2_allowed_dbufs);
|
||||
}
|
||||
|
||||
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
|
||||
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if (IS_DG2(dev_priv))
|
||||
return dg2_compute_dbuf_slices(pipe, active_pipes);
|
||||
return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
|
||||
else if (IS_ALDERLAKE_P(dev_priv))
|
||||
return adlp_compute_dbuf_slices(pipe, active_pipes);
|
||||
return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
|
||||
else if (DISPLAY_VER(dev_priv) == 12)
|
||||
return tgl_compute_dbuf_slices(pipe, active_pipes);
|
||||
return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
|
||||
else if (DISPLAY_VER(dev_priv) == 11)
|
||||
return icl_compute_dbuf_slices(pipe, active_pipes);
|
||||
return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
|
||||
/*
|
||||
* For anything else just return one slice yet.
|
||||
* Should be extended for other platforms.
|
||||
|
@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (IS_ALDERLAKE_P(dev_priv))
|
||||
new_dbuf_state->joined_mbus =
|
||||
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
|
||||
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
new_dbuf_state->slices[pipe] =
|
||||
skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
|
||||
skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
|
||||
new_dbuf_state->joined_mbus);
|
||||
|
||||
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
|
||||
continue;
|
||||
|
@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
|
|||
|
||||
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
|
||||
|
||||
if (IS_ALDERLAKE_P(dev_priv))
|
||||
new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
|
||||
|
||||
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
|
||||
old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
|
||||
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
|
||||
|
@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
|
|||
enum pipe pipe = crtc->pipe;
|
||||
unsigned int mbus_offset;
|
||||
enum plane_id plane_id;
|
||||
u8 slices;
|
||||
|
||||
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
|
||||
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
|
||||
|
@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
|
|||
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
|
||||
}
|
||||
|
||||
dbuf_state->slices[pipe] =
|
||||
skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
|
||||
|
||||
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
|
||||
|
||||
/*
|
||||
* Used for checking overlaps, so we need absolute
|
||||
* offsets instead of MBUS relative offsets.
|
||||
*/
|
||||
mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
|
||||
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
|
||||
dbuf_state->joined_mbus);
|
||||
mbus_offset = mbus_ddb_offset(dev_priv, slices);
|
||||
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
|
||||
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
|
||||
|
||||
/* The slices actually used by the planes on the pipe */
|
||||
dbuf_state->slices[pipe] =
|
||||
skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
|
@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
|
|||
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
|
||||
}
|
||||
|
||||
static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct intel_dbuf_state *dbuf_state =
|
||||
to_intel_dbuf_state(i915->dbuf.obj.state);
|
||||
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
|
||||
}
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
u8 slices;
|
||||
|
||||
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
|
||||
dbuf_state->joined_mbus);
|
||||
if (dbuf_state->slices[crtc->pipe] & ~slices)
|
||||
return true;
|
||||
|
||||
if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
|
||||
I915_MAX_PIPES, crtc->pipe))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void skl_wm_sanitize(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/*
|
||||
* On TGL/RKL (at least) the BIOS likes to assign the planes
|
||||
* to the wrong DBUF slices. This will cause an infinite loop
|
||||
* in skl_commit_modeset_enables() as it can't find a way to
|
||||
* transition between the old bogus DBUF layout to the new
|
||||
* proper DBUF layout without DBUF allocation overlaps between
|
||||
* the planes (which cannot be allowed or else the hardware
|
||||
* may hang). If we detect a bogus DBUF layout just turn off
|
||||
* all the planes so that skl_commit_modeset_enables() can
|
||||
* simply ignore them.
|
||||
*/
|
||||
if (!skl_dbuf_is_misconfigured(i915))
|
||||
return;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
|
||||
const struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
if (plane_state->uapi.visible)
|
||||
intel_plane_disable_noatomic(crtc, plane);
|
||||
|
||||
drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
|
||||
|
||||
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
|
||||
}
|
||||
}
|
||||
|
||||
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
|
|
|
@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
|
|||
struct skl_pipe_wm *out);
|
||||
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
void skl_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
|
||||
const struct intel_bw_state *bw_state);
|
||||
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
|
||||
|
|
|
@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
|
|||
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
spin_lock_init(&rpm->debug.lock);
|
||||
|
||||
if (rpm->available)
|
||||
stack_depot_init();
|
||||
stack_depot_init();
|
||||
}
|
||||
|
||||
static noinline depot_stack_handle_t
|
||||
|
|
|
@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
|
|||
err = panel_dpi_probe(dev, panel);
|
||||
if (err)
|
||||
goto free_ddc;
|
||||
desc = panel->desc;
|
||||
} else {
|
||||
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
|
||||
panel_simple_parse_panel_timing_node(dev, panel, &dt);
|
||||
|
|
|
@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(hdmi->vpll_clk);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
|
||||
if (IS_ERR(hdmi->phy)) {
|
||||
ret = PTR_ERR(hdmi->phy);
|
||||
|
@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(hdmi->vpll_clk);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
|
||||
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
|
||||
|
|
|
@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = {
|
|||
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
|
||||
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
|
||||
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
|
||||
.x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
|
||||
.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
|
||||
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
|
||||
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
|
||||
|
@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = {
|
|||
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
|
||||
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
|
||||
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
|
||||
.channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = {
|
|||
static const struct vop_win_data rk3399_vop_win_data[] = {
|
||||
{ .base = 0x00, .phy = &rk3399_win01_data,
|
||||
.type = DRM_PLANE_TYPE_PRIMARY },
|
||||
{ .base = 0x40, .phy = &rk3288_win01_data,
|
||||
{ .base = 0x40, .phy = &rk3368_win01_data,
|
||||
.type = DRM_PLANE_TYPE_OVERLAY },
|
||||
{ .base = 0x00, .phy = &rk3288_win23_data,
|
||||
{ .base = 0x00, .phy = &rk3368_win23_data,
|
||||
.type = DRM_PLANE_TYPE_OVERLAY },
|
||||
{ .base = 0x50, .phy = &rk3288_win23_data,
|
||||
{ .base = 0x50, .phy = &rk3368_win23_data,
|
||||
.type = DRM_PLANE_TYPE_CURSOR },
|
||||
};
|
||||
|
||||
|
|
|
@ -671,7 +671,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
|
||||
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
|
||||
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
|
||||
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
|
||||
mode->clock * 9 / 10) * 1000;
|
||||
|
|
|
@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
|
|||
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
|
||||
connected = true;
|
||||
} else {
|
||||
unsigned long flags;
|
||||
u32 hotplug;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
hotplug = HDMI_READ(HDMI_HOTPLUG);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
|
||||
if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
|
||||
if (vc4_hdmi->variant->hp_detect &&
|
||||
vc4_hdmi->variant->hp_detect(vc4_hdmi))
|
||||
connected = true;
|
||||
}
|
||||
|
||||
|
@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
|
|||
unsigned long long tmds_rate;
|
||||
|
||||
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
|
||||
!(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
|
||||
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
|
||||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return -EINVAL;
|
||||
|
@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
|
|||
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
|
||||
|
||||
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
|
||||
!(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
|
||||
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
|
||||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return MODE_H_ILLEGAL;
|
||||
|
@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
|
|||
return channel_map;
|
||||
}
|
||||
|
||||
static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 hotplug;
|
||||
|
||||
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
|
||||
hotplug = HDMI_READ(HDMI_HOTPLUG);
|
||||
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
|
||||
|
||||
return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
|
||||
}
|
||||
|
||||
/* HDMI audio codec callbacks */
|
||||
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
|
||||
unsigned int samplerate)
|
||||
|
@ -2504,7 +2512,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||
* vc4_hdmi_disable_scrambling() will thus run at boot, make
|
||||
* sure it's disabled, and avoid any inconsistency.
|
||||
*/
|
||||
vc4_hdmi->scdc_enabled = true;
|
||||
if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
|
||||
vc4_hdmi->scdc_enabled = true;
|
||||
|
||||
ret = variant->init_resources(vc4_hdmi);
|
||||
if (ret)
|
||||
|
@ -2723,6 +2732,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
|
|||
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
|
||||
.channel_map = vc5_hdmi_channel_map,
|
||||
.supports_hdr = true,
|
||||
.hp_detect = vc5_hdmi_hp_detect,
|
||||
};
|
||||
|
||||
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
|
||||
|
@ -2751,6 +2761,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
|
|||
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
|
||||
.channel_map = vc5_hdmi_channel_map,
|
||||
.supports_hdr = true,
|
||||
.hp_detect = vc5_hdmi_hp_detect,
|
||||
};
|
||||
|
||||
static const struct of_device_id vc4_hdmi_dt_match[] = {
|
||||
|
|
|
@ -102,6 +102,9 @@ struct vc4_hdmi_variant {
|
|||
|
||||
/* Enables HDR metadata */
|
||||
bool supports_hdr;
|
||||
|
||||
/* Callback for hardware specific hotplug detect */
|
||||
bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi);
|
||||
};
|
||||
|
||||
/* HDMI audio information */
|
||||
|
|
|
@ -1025,7 +1025,7 @@ static void fbcon_init(struct vc_data *vc, int init)
|
|||
struct vc_data *svc = *default_mode;
|
||||
struct fbcon_display *t, *p = &fb_display[vc->vc_num];
|
||||
int logo = 1, new_rows, new_cols, rows, cols;
|
||||
int cap, ret;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(info_idx == -1))
|
||||
return;
|
||||
|
@ -1034,7 +1034,6 @@ static void fbcon_init(struct vc_data *vc, int init)
|
|||
con2fb_map[vc->vc_num] = info_idx;
|
||||
|
||||
info = registered_fb[con2fb_map[vc->vc_num]];
|
||||
cap = info->flags;
|
||||
|
||||
if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
|
||||
logo_shown = FBCON_LOGO_DONTSHOW;
|
||||
|
@ -1137,8 +1136,8 @@ static void fbcon_init(struct vc_data *vc, int init)
|
|||
ops->graphics = 0;
|
||||
|
||||
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
|
||||
if ((cap & FBINFO_HWACCEL_COPYAREA) &&
|
||||
!(cap & FBINFO_HWACCEL_DISABLED))
|
||||
if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
|
||||
!(info->flags & FBINFO_HWACCEL_DISABLED))
|
||||
p->scrollmode = SCROLL_MOVE;
|
||||
else /* default to something safe */
|
||||
p->scrollmode = SCROLL_REDRAW;
|
||||
|
|
|
@ -1160,6 +1160,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
|
|||
ret = fbcon_set_con2fb_map_ioctl(argp);
|
||||
break;
|
||||
case FBIOBLANK:
|
||||
if (arg > FB_BLANK_POWERDOWN)
|
||||
return -EINVAL;
|
||||
console_lock();
|
||||
lock_fb_info(info);
|
||||
ret = fb_blank(info, arg);
|
||||
|
|
Loading…
Reference in New Issue