drm/i915: constify display wm vtable
Use a nop table for the cases where CxSR doesn't init properly. v2: use a nop table (Jani) Reviewed-by: Jani Nikula <jani.nikula@intel.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/bb0658d14afd02cca692cd58223800f68f4ff4ce.1632869550.git.jani.nikula@intel.com
This commit is contained in:
parent
eba4b7960f
commit
dde98a573c
|
@ -161,16 +161,16 @@ static void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
*/
|
||||
static void intel_update_watermarks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (dev_priv->wm_disp.update_wm)
|
||||
dev_priv->wm_disp.update_wm(dev_priv);
|
||||
if (dev_priv->wm_disp->update_wm)
|
||||
dev_priv->wm_disp->update_wm(dev_priv);
|
||||
}
|
||||
|
||||
static int intel_compute_pipe_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
if (dev_priv->wm_disp.compute_pipe_wm)
|
||||
return dev_priv->wm_disp.compute_pipe_wm(state, crtc);
|
||||
if (dev_priv->wm_disp->compute_pipe_wm)
|
||||
return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -178,20 +178,20 @@ static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
|
|||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
if (!dev_priv->wm_disp.compute_intermediate_wm)
|
||||
if (!dev_priv->wm_disp->compute_intermediate_wm)
|
||||
return 0;
|
||||
if (drm_WARN_ON(&dev_priv->drm,
|
||||
!dev_priv->wm_disp.compute_pipe_wm))
|
||||
!dev_priv->wm_disp->compute_pipe_wm))
|
||||
return 0;
|
||||
return dev_priv->wm_disp.compute_intermediate_wm(state, crtc);
|
||||
return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
|
||||
}
|
||||
|
||||
static bool intel_initial_watermarks(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
if (dev_priv->wm_disp.initial_watermarks) {
|
||||
dev_priv->wm_disp.initial_watermarks(state, crtc);
|
||||
if (dev_priv->wm_disp->initial_watermarks) {
|
||||
dev_priv->wm_disp->initial_watermarks(state, crtc);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -201,23 +201,23 @@ static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
|
|||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
if (dev_priv->wm_disp.atomic_update_watermarks)
|
||||
dev_priv->wm_disp.atomic_update_watermarks(state, crtc);
|
||||
if (dev_priv->wm_disp->atomic_update_watermarks)
|
||||
dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
|
||||
}
|
||||
|
||||
static void intel_optimize_watermarks(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
if (dev_priv->wm_disp.optimize_watermarks)
|
||||
dev_priv->wm_disp.optimize_watermarks(state, crtc);
|
||||
if (dev_priv->wm_disp->optimize_watermarks)
|
||||
dev_priv->wm_disp->optimize_watermarks(state, crtc);
|
||||
}
|
||||
|
||||
static int intel_compute_global_watermarks(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
if (dev_priv->wm_disp.compute_global_watermarks)
|
||||
return dev_priv->wm_disp.compute_global_watermarks(state);
|
||||
if (dev_priv->wm_disp->compute_global_watermarks)
|
||||
return dev_priv->wm_disp->compute_global_watermarks(state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3743,7 +3743,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
|
|||
if (DISPLAY_VER(dev_priv) != 2)
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
if (!dev_priv->wm_disp.initial_watermarks)
|
||||
if (!dev_priv->wm_disp->initial_watermarks)
|
||||
intel_update_watermarks(dev_priv);
|
||||
|
||||
/* clock the pipe down to 640x480@60 to potentially save power */
|
||||
|
@ -11425,7 +11425,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
|
|||
int i;
|
||||
|
||||
/* Only supported on platforms that use atomic watermark design */
|
||||
if (!dev_priv->wm_disp.optimize_watermarks)
|
||||
if (!dev_priv->wm_disp->optimize_watermarks)
|
||||
return;
|
||||
|
||||
state = drm_atomic_state_alloc(&dev_priv->drm);
|
||||
|
|
|
@ -978,7 +978,7 @@ struct drm_i915_private {
|
|||
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
|
||||
|
||||
/* pm display functions */
|
||||
struct drm_i915_wm_disp_funcs wm_disp;
|
||||
const struct drm_i915_wm_disp_funcs *wm_disp;
|
||||
|
||||
/* irq display functions */
|
||||
const struct intel_hotplug_funcs *hotplug_funcs;
|
||||
|
|
|
@ -7975,6 +7975,51 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
|
||||
.compute_global_watermarks = skl_compute_wm,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
|
||||
.compute_pipe_wm = ilk_compute_pipe_wm,
|
||||
.compute_intermediate_wm = ilk_compute_intermediate_wm,
|
||||
.initial_watermarks = ilk_initial_watermarks,
|
||||
.optimize_watermarks = ilk_optimize_watermarks,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
|
||||
.compute_pipe_wm = vlv_compute_pipe_wm,
|
||||
.compute_intermediate_wm = vlv_compute_intermediate_wm,
|
||||
.initial_watermarks = vlv_initial_watermarks,
|
||||
.optimize_watermarks = vlv_optimize_watermarks,
|
||||
.atomic_update_watermarks = vlv_atomic_update_fifo,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
|
||||
.compute_pipe_wm = g4x_compute_pipe_wm,
|
||||
.compute_intermediate_wm = g4x_compute_intermediate_wm,
|
||||
.initial_watermarks = g4x_initial_watermarks,
|
||||
.optimize_watermarks = g4x_optimize_watermarks,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
|
||||
.update_wm = pnv_update_wm,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
|
||||
.update_wm = i965_update_wm,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
|
||||
.update_wm = i9xx_update_wm,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
|
||||
.update_wm = i845_update_wm,
|
||||
};
|
||||
|
||||
static const struct drm_i915_wm_disp_funcs nop_funcs = {
|
||||
};
|
||||
|
||||
/* Set up chip specific power management-related functions */
|
||||
void intel_init_pm(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -7990,7 +8035,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
|
|||
/* For FIFO watermark updates */
|
||||
if (DISPLAY_VER(dev_priv) >= 9) {
|
||||
skl_setup_wm_latency(dev_priv);
|
||||
dev_priv->wm_disp.compute_global_watermarks = skl_compute_wm;
|
||||
dev_priv->wm_disp = &skl_wm_funcs;
|
||||
} else if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
ilk_setup_wm_latency(dev_priv);
|
||||
|
||||
|
@ -7998,31 +8043,19 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
|
|||
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
|
||||
(DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
|
||||
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
|
||||
dev_priv->wm_disp.compute_pipe_wm = ilk_compute_pipe_wm;
|
||||
dev_priv->wm_disp.compute_intermediate_wm =
|
||||
ilk_compute_intermediate_wm;
|
||||
dev_priv->wm_disp.initial_watermarks =
|
||||
ilk_initial_watermarks;
|
||||
dev_priv->wm_disp.optimize_watermarks =
|
||||
ilk_optimize_watermarks;
|
||||
dev_priv->wm_disp = &ilk_wm_funcs;
|
||||
} else {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to read display plane latency. "
|
||||
"Disable CxSR\n");
|
||||
dev_priv->wm_disp = &nop_funcs;
|
||||
}
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
vlv_setup_wm_latency(dev_priv);
|
||||
dev_priv->wm_disp.compute_pipe_wm = vlv_compute_pipe_wm;
|
||||
dev_priv->wm_disp.compute_intermediate_wm = vlv_compute_intermediate_wm;
|
||||
dev_priv->wm_disp.initial_watermarks = vlv_initial_watermarks;
|
||||
dev_priv->wm_disp.optimize_watermarks = vlv_optimize_watermarks;
|
||||
dev_priv->wm_disp.atomic_update_watermarks = vlv_atomic_update_fifo;
|
||||
dev_priv->wm_disp = &vlv_wm_funcs;
|
||||
} else if (IS_G4X(dev_priv)) {
|
||||
g4x_setup_wm_latency(dev_priv);
|
||||
dev_priv->wm_disp.compute_pipe_wm = g4x_compute_pipe_wm;
|
||||
dev_priv->wm_disp.compute_intermediate_wm = g4x_compute_intermediate_wm;
|
||||
dev_priv->wm_disp.initial_watermarks = g4x_initial_watermarks;
|
||||
dev_priv->wm_disp.optimize_watermarks = g4x_optimize_watermarks;
|
||||
dev_priv->wm_disp = &g4x_wm_funcs;
|
||||
} else if (IS_PINEVIEW(dev_priv)) {
|
||||
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
|
||||
dev_priv->is_ddr3,
|
||||
|
@ -8036,21 +8069,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
|
|||
dev_priv->fsb_freq, dev_priv->mem_freq);
|
||||
/* Disable CxSR and never update its watermark again */
|
||||
intel_set_memory_cxsr(dev_priv, false);
|
||||
dev_priv->wm_disp.update_wm = NULL;
|
||||
dev_priv->wm_disp = &nop_funcs;
|
||||
} else
|
||||
dev_priv->wm_disp.update_wm = pnv_update_wm;
|
||||
dev_priv->wm_disp = &pnv_wm_funcs;
|
||||
} else if (DISPLAY_VER(dev_priv) == 4) {
|
||||
dev_priv->wm_disp.update_wm = i965_update_wm;
|
||||
dev_priv->wm_disp = &i965_wm_funcs;
|
||||
} else if (DISPLAY_VER(dev_priv) == 3) {
|
||||
dev_priv->wm_disp.update_wm = i9xx_update_wm;
|
||||
dev_priv->wm_disp = &i9xx_wm_funcs;
|
||||
} else if (DISPLAY_VER(dev_priv) == 2) {
|
||||
if (INTEL_NUM_PIPES(dev_priv) == 1)
|
||||
dev_priv->wm_disp.update_wm = i845_update_wm;
|
||||
dev_priv->wm_disp = &i845_wm_funcs;
|
||||
else
|
||||
dev_priv->wm_disp.update_wm = i9xx_update_wm;
|
||||
dev_priv->wm_disp = &i9xx_wm_funcs;
|
||||
} else {
|
||||
drm_err(&dev_priv->drm,
|
||||
"unexpected fall-through in %s\n", __func__);
|
||||
dev_priv->wm_disp = &nop_funcs;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue