Merge tag 'drm-intel-next-2021-11-30' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

drm/i915 feature pull for v5.17:

Features and functionality:
- Implement per-lane DP drive settings for ICL+ (Ville)
- Enable runtime pm autosuspend by default (Tilak Tangudu)
- ADL-P DSI support (Vandita)
- Add support for pipe C and D DMC firmware (Anusha)
- Implement (near)atomic gamma LUT updates via vblank workers (Ville)
- Split plane updates to noarm+arm phases (Ville)
- Remove the CCS FB stride restrictions on ADL-P (Imre)
- Add PSR selective fetch support for biplanar formats (Jouni)
- Add support for display audio codec keepalive (Kai)
- VRR platform support for display 11 (Manasi)

Refactoring and cleanups:
- FBC refactoring and cleanups preparing for multiple FBC instances (Ville)
- PCH modeset refactoring, move to its own file (Ville)
- Refactor and simplify handling of modifiers (Imre)
- PXP cleanups (Ville)
- Display header and include refactoring (Jani)
- Some register macro cleanups (Ville)
- Refactor DP HDMI DFP limit code (Ville)

Fixes:
- Disable DSB usage for now due to incorrect gamma LUT updates (Ville)
- Check async flip state of every crtc and plane only once (José)
- Fix DPT FB suspend/resume (Imre)
- Fix black screen on reboot due to disabled DP++ TMDS output buffers (Ville)
- Don't request GMBUS to generate irqs when called while irqs are off (Ville)
- Fix type1 DVI DP dual mode adapter heuristics for modern platforms (Ville)
- Fix fix integer overflow in 128b/132b data rate calculation (Jani)
- Fix bigjoiner state readout (Ville)
- Build fix for non-x86 (Siva)
- PSR fixes (José, Jouni, Ville)
- Disable ADL-P underrun recovery (José)
- Fix DP link parameter usage before valid DPCD (Imre)
- VRR vblank and frame counter fixes (Ville)
- Fix fastsets on TypeC ports following a non-blocking modeset (Imre)
- Compiler warning fixes (Nathan Chancellor)
- Fix DSI HS mode commands (William Tseng)
- Error return fixes (Dan Carpenter)
- Update memory bandwidth calculations (Radhakrishna)
- Implement WM0 cursor WA for DG2 (Stan)
- Fix DSI Double pixelclock on read-back for dual-link panels (Hans de Goede)
- HDMI 2.1 PCON FRL configuration fixes (Ankit)

Merges:
- DP link training delay helpers, via topic branch (Jani)
- Backmerge drm-next (Jani)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87v909it0t.fsf@intel.com
This commit is contained in:
Dave Airlie 2021-12-02 10:28:17 +10:00
commit c305ae99df
91 changed files with 5390 additions and 3885 deletions

View File

@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include <linux/intel-iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"

View File

@ -21,7 +21,7 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
select IOSF_MBI
select IOSF_MBI if X86
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER

View File

@ -30,7 +30,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915-y += i915_driver.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
@ -226,6 +226,8 @@ i915-y += \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
display/intel_overlay.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
display/intel_plane_initial.o \
display/intel_psr.o \
display/intel_quirks.o \

View File

@ -60,22 +60,11 @@ static const u32 vlv_primary_formats[] = {
DRM_FORMAT_XBGR16161616F,
};
static const u64 i9xx_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
}
switch (format) {
case DRM_FORMAT_C8:
@ -92,13 +81,8 @@ static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
}
switch (format) {
case DRM_FORMAT_C8:
@ -272,7 +256,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
u32 alignment = intel_surf_alignment(fb, 0);
int cpp = fb->format->cpp[0];
while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) {
while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
@ -418,22 +402,49 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
return DIV_ROUND_UP(pixel_rate * num, den);
}
static void i9xx_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
static void i9xx_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
plane_state->view.color_plane[0].mapping_stride);
if (DISPLAY_VER(dev_priv) < 4) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int crtc_h = drm_rect_height(&plane_state->uapi.dst);
/*
* PLANE_A doesn't actually have a full window
* generator but let's assume we still need to
* program whatever is there.
*/
intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
((crtc_h - 1) << 16) | (crtc_w - 1));
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 dspcntr, dspaddr_offset, linear_offset;
unsigned long irqflags;
u32 dspaddr_offset;
u32 dspcntr;
dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
@ -446,20 +457,12 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
plane_state->view.color_plane[0].stride);
if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int crtc_h = drm_rect_height(&plane_state->uapi.dst);
if (DISPLAY_VER(dev_priv) < 4) {
/*
* PLANE_A doesn't actually have a full window
* generator but let's assume we still need to
* program whatever is there.
*/
intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
@ -493,8 +496,22 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
static void i830_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
/*
* On i830/i845 all registers are self-arming [ALM040].
*
* Additional breakage on i830 causes register reads to return
* the last latched value instead of the last written value [ALM026].
*/
i9xx_plane_update_noarm(plane, crtc_state, plane_state);
i9xx_plane_update_arm(plane, crtc_state, plane_state);
}
static void i9xx_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@ -768,6 +785,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
const u64 *modifiers;
const u32 *formats;
int num_formats;
int ret, zpos;
@ -789,12 +807,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->id = PLANE_PRIMARY;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
if (plane->has_fbc) {
struct intel_fbc *fbc = &dev_priv->fbc;
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
if (i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane))
plane->fbc = &dev_priv->fbc;
if (plane->fbc)
plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
formats = vlv_primary_formats;
@ -851,8 +867,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->max_stride = ilk_primary_max_stride;
}
plane->update_plane = i9xx_update_plane;
plane->disable_plane = i9xx_disable_plane;
if (IS_I830(dev_priv) || IS_I845G(dev_priv)) {
plane->update_arm = i830_plane_update_arm;
} else {
plane->update_noarm = i9xx_plane_update_noarm;
plane->update_arm = i9xx_plane_update_arm;
}
plane->disable_arm = i9xx_plane_disable_arm;
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
@ -875,21 +896,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->disable_flip_done = ilk_primary_disable_flip_done;
}
modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
plane_name(plane->i9xx_plane));
kfree(modifiers);
if (ret)
goto fail;

View File

@ -28,6 +28,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include "icl_dsi.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_combo_phy.h"
@ -36,6 +37,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
#include "intel_vdsc.h"
#include "skl_scaler.h"
@ -183,6 +185,8 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
if (enable_lpdt)
tmp |= LP_DATA_TRANSFER;
else
tmp &= ~LP_DATA_TRANSFER;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
@ -1226,7 +1230,9 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
/* step5: program and powerup panel */
gen11_dsi_powerup_panel(encoder);
intel_dsc_enable(encoder, pipe_config);
intel_dsc_dsi_pps_write(encoder, pipe_config);
intel_dsc_enable(pipe_config);
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
@ -1623,7 +1629,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
ret = intel_dsc_compute_params(encoder, crtc_state);
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __ICL_DSI_H__
#define __ICL_DSI_H__
struct drm_i915_private;
struct intel_crtc_state;
void icl_dsi_init(struct drm_i915_private *i915);
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
#endif /* __ICL_DSI_H__ */

View File

@ -39,6 +39,7 @@
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
@ -469,31 +470,72 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
void intel_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_update_plane(&plane->base, crtc);
trace_intel_plane_update_noarm(&plane->base, crtc);
if (plane->update_noarm)
plane->update_noarm(plane, crtc_state, plane_state);
}
void intel_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_plane_update_arm(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_plane(plane, crtc_state, plane_state);
plane->update_arm(plane, crtc_state, plane_state);
}
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
void intel_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc_state);
trace_intel_plane_disable_arm(&plane->base, crtc);
plane->disable_arm(plane, crtc_state);
}
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
void intel_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
u32 update_mask = new_crtc_state->update_planes;
struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
int i;
if (new_crtc_state->uapi.async_flip)
return;
/*
* Since we only write non-arming registers here,
* the order does not matter even for skl+.
*/
for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
if (crtc->pipe != plane->pipe ||
!(update_mask & BIT(plane->id)))
continue;
/* TODO: for mailbox updates this should be skipped */
if (new_plane_state->uapi.visible ||
new_plane_state->planar_slave)
intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
}
}
void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@ -515,17 +557,20 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
/*
* TODO: for mailbox updates intel_plane_update_noarm()
* would have to be called here as well.
*/
if (new_plane_state->uapi.visible ||
new_plane_state->planar_slave) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
} else {
intel_disable_plane(plane, new_crtc_state);
}
new_plane_state->planar_slave)
intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
else
intel_plane_disable_arm(plane, new_crtc_state);
}
}
void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@ -539,10 +584,14 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
/*
* TODO: for mailbox updates intel_plane_update_noarm()
* would have to be called here as well.
*/
if (new_plane_state->uapi.visible)
intel_update_plane(plane, new_crtc_state, new_plane_state);
intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
else
intel_disable_plane(plane, new_crtc_state);
intel_plane_disable_arm(plane, new_crtc_state);
}
}

View File

@ -30,20 +30,25 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
struct intel_crtc *crtc);
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state);
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
void intel_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,

View File

@ -62,6 +62,15 @@
* struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
struct intel_audio_funcs {
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void (*audio_codec_disable)(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
};
/* DP N/M table */
#define LC_810M 810000
#define LC_540M 540000
@ -388,7 +397,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct i915_audio_component *acomp = dev_priv->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
@ -436,7 +445,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct i915_audio_component *acomp = dev_priv->audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@ -494,7 +503,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
transcoder_name(cpu_transcoder));
mutex_lock(&dev_priv->av_mutex);
mutex_lock(&dev_priv->audio.mutex);
/* Disable timestamps */
tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
@ -512,7 +521,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
mutex_unlock(&dev_priv->av_mutex);
mutex_unlock(&dev_priv->audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
@ -641,7 +650,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
"Enable audio codec on transcoder %s, %u bytes ELD\n",
transcoder_name(cpu_transcoder), drm_eld_size(eld));
mutex_lock(&dev_priv->av_mutex);
mutex_lock(&dev_priv->audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
@ -679,7 +688,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
mutex_unlock(&dev_priv->av_mutex);
mutex_unlock(&dev_priv->audio.mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
@ -826,7 +835,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
@ -848,17 +857,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->audio_funcs)
dev_priv->audio_funcs->audio_codec_enable(encoder,
if (dev_priv->audio.funcs)
dev_priv->audio.funcs->audio_codec_enable(encoder,
crtc_state,
conn_state);
mutex_lock(&dev_priv->av_mutex);
mutex_lock(&dev_priv->audio.mutex);
encoder->audio_connector = connector;
/* referred in audio callbacks */
dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
dev_priv->audio.encoder_map[pipe] = encoder;
mutex_unlock(&dev_priv->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@ -888,20 +897,20 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
if (dev_priv->audio_funcs)
dev_priv->audio_funcs->audio_codec_disable(encoder,
if (dev_priv->audio.funcs)
dev_priv->audio.funcs->audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
mutex_lock(&dev_priv->av_mutex);
mutex_lock(&dev_priv->audio.mutex);
encoder->audio_connector = NULL;
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
dev_priv->audio.encoder_map[pipe] = NULL;
mutex_unlock(&dev_priv->audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@ -931,19 +940,53 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
};
/**
* intel_init_audio_hooks - Set up chip specific audio hooks
* intel_audio_hooks_init - Set up chip specific audio hooks
* @dev_priv: device private
*/
void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
dev_priv->audio_funcs = &g4x_audio_funcs;
dev_priv->audio.funcs = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->audio_funcs = &ilk_audio_funcs;
dev_priv->audio.funcs = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
dev_priv->audio_funcs = &hsw_audio_funcs;
dev_priv->audio.funcs = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->audio_funcs = &ilk_audio_funcs;
dev_priv->audio.funcs = &ilk_audio_funcs;
}
}
struct aud_ts_cdclk_m_n {
u8 m;
u16 n;
};
void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
{
if (DISPLAY_VER(i915) >= 13)
intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
}
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
{
if (refclk == 24000)
aud_ts->m = 12;
else
aud_ts->m = 15;
aud_ts->n = cdclk * aud_ts->m / 24000;
}
void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
{
struct aud_ts_cdclk_m_n aud_ts;
if (DISPLAY_VER(i915) >= 13) {
get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts);
intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n);
}
}
@ -1014,13 +1057,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
if (dev_priv->audio_power_refcount++ == 0) {
if (dev_priv->audio.power_refcount++ == 0) {
if (DISPLAY_VER(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
dev_priv->audio_freq_cntrl);
dev_priv->audio.freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
dev_priv->audio_freq_cntrl);
dev_priv->audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@ -1041,7 +1084,7 @@ static void i915_audio_component_put_power(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
if (--dev_priv->audio.power_refcount == 0)
if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
@ -1093,7 +1136,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
/*
* get the intel_encoder according to the parameter port and pipe
* intel_encoder is saved by the index of pipe
* MST & (pipe >= 0): return the av_enc_map[pipe],
* MST & (pipe >= 0): return the audio.encoder_map[pipe],
* when port is matched
* MST & (pipe < 0): this is invalid
* Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
@ -1108,10 +1151,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
if (drm_WARN_ON(&dev_priv->drm,
pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map)))
return NULL;
encoder = dev_priv->av_enc_map[pipe];
encoder = dev_priv->audio.encoder_map[pipe];
/*
* when bootup, audio driver may not know it is
* MST or not. So it will poll all the port & pipe
@ -1127,7 +1170,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
return NULL;
for_each_pipe(dev_priv, pipe) {
encoder = dev_priv->av_enc_map[pipe];
encoder = dev_priv->audio.encoder_map[pipe];
if (encoder == NULL)
continue;
@ -1145,7 +1188,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct i915_audio_component *acomp = dev_priv->audio.component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@ -1155,7 +1198,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
return 0;
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
mutex_lock(&dev_priv->audio.mutex);
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
@ -1174,7 +1217,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
mutex_unlock(&dev_priv->av_mutex);
mutex_unlock(&dev_priv->audio.mutex);
i915_audio_component_put_power(kdev, cookie);
return err;
}
@ -1188,13 +1231,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
const u8 *eld;
int ret = -EINVAL;
mutex_lock(&dev_priv->av_mutex);
mutex_lock(&dev_priv->audio.mutex);
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
port_name(port));
mutex_unlock(&dev_priv->av_mutex);
mutex_unlock(&dev_priv->audio.mutex);
return ret;
}
@ -1206,7 +1249,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
mutex_unlock(&dev_priv->av_mutex);
mutex_unlock(&dev_priv->audio.mutex);
return ret;
}
@ -1241,7 +1284,7 @@ static int i915_audio_component_bind(struct device *i915_kdev,
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
dev_priv->audio_component = acomp;
dev_priv->audio.component = acomp;
drm_modeset_unlock_all(&dev_priv->drm);
return 0;
@ -1256,14 +1299,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
dev_priv->audio.component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
if (dev_priv->audio_power_refcount)
if (dev_priv->audio.power_refcount)
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
dev_priv->audio_power_refcount);
dev_priv->audio.power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@ -1327,10 +1370,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
dev_priv->audio_freq_cntrl = aud_freq;
dev_priv->audio.freq_cntrl = aud_freq;
}
dev_priv->audio_component_registered = true;
/* init with current cdclk */
intel_audio_cdclk_change_post(dev_priv);
dev_priv->audio.component_registered = true;
}
/**
@ -1342,11 +1388,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
*/
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
if (!dev_priv->audio.component_registered)
return;
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
dev_priv->audio_component_registered = false;
dev_priv->audio.component_registered = false;
}
/**
@ -1368,7 +1414,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_audio_deinit(struct drm_i915_private *dev_priv)
{
if ((dev_priv)->lpe_audio.platdev != NULL)
if ((dev_priv)->audio.lpe.platdev != NULL)
intel_lpe_audio_teardown(dev_priv);
else
i915_audio_component_cleanup(dev_priv);

View File

@ -11,13 +11,15 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);

View File

@ -27,6 +27,9 @@ struct intel_qgv_info {
u8 num_points;
u8 num_psf_points;
u8 t_bl;
u8 max_numchannels;
u8 channel_width;
u8 deinterleave;
};
static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
@ -42,7 +45,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
else
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = dclk_ratio * dclk_reference;
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
@ -69,6 +72,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
int point)
{
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
ret = sandybridge_pcode_read(dev_priv,
@ -78,7 +82,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
sp->dclk = val & 0xffff;
dclk = val & 0xffff;
sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@ -133,7 +138,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
}
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
struct intel_qgv_info *qi,
bool is_y_tile)
{
const struct dram_info *dram_info = &dev_priv->dram_info;
int i, ret;
@ -141,20 +147,44 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
if (DISPLAY_VER(dev_priv) == 12)
if (DISPLAY_VER(dev_priv) >= 12)
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
qi->t_bl = is_y_tile ? 8 : 4;
qi->max_numchannels = 2;
qi->channel_width = 64;
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_DDR5:
qi->t_bl = 8;
qi->t_bl = is_y_tile ? 16 : 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
if (IS_ROCKETLAKE(dev_priv)) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
qi->deinterleave = 2;
break;
}
fallthrough;
case INTEL_DRAM_LPDDR5:
qi->t_bl = 16;
qi->max_numchannels = 8;
qi->channel_width = 16;
qi->deinterleave = is_y_tile ? 2 : 4;
break;
default:
qi->t_bl = 16;
qi->max_numchannels = 1;
break;
}
else if (DISPLAY_VER(dev_priv) == 11)
else if (DISPLAY_VER(dev_priv) == 11) {
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
if (drm_WARN_ON(&dev_priv->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
@ -193,12 +223,6 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
return 0;
}
static int icl_calc_bw(int dclk, int num, int den)
{
/* multiples of 16.666MHz (100/6) */
return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
}
static int adl_calc_psf_bw(int clk)
{
/*
@ -240,7 +264,7 @@ static const struct intel_sa_info tgl_sa_info = {
};
static const struct intel_sa_info rkl_sa_info = {
.deburst = 16,
.deburst = 8,
.deprogbwlimit = 20, /* GB/s */
.displayrtids = 128,
.derating = 10,
@ -265,34 +289,30 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
int deinterleave;
int ipqdepth, ipqdepthpch;
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
int num_groups = ARRAY_SIZE(dev_priv->max_bw);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi);
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
dclk_max = icl_sagv_max_dclk(&qi);
ipqdepthpch = 16;
maxdebw = min(sa->deprogbwlimit * 1000,
icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
for (i = 0; i < num_groups; i++) {
struct intel_bw_info *bi = &dev_priv->max_bw[i];
int clpchgroup;
int j;
clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
bi->num_qgv_points = qi.num_points;
@ -310,7 +330,106 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
*/
ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
(clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
drm_dbg_kms(&dev_priv->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
}
/*
* In case if SAGV is disabled in BIOS, we always get 1
* SAGV point, but we can't send PCode commands to restrict it
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
else
dev_priv->sagv_status = I915_SAGV_ENABLED;
return 0;
}
static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
const struct dram_info *dram_info = &dev_priv->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
int num_groups = ARRAY_SIZE(dev_priv->max_bw);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
dclk_max = icl_sagv_max_dclk(&qi);
peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */
ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
/*
* clperchgroup = 4kpagespermempage * clperchperblock,
* clperchperblock = 8 / num_channels * interleave
*/
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
struct intel_bw_info *bi = &dev_priv->max_bw[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
if (i < num_groups - 1)
bi_next = &dev_priv->max_bw[i + 1];
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1 && clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
else
bi_next->num_planes = 0;
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
for (j = 0; j < qi.num_points; j++) {
const struct intel_qgv_point *sp = &qi.points[j];
int ct, bw;
/*
* Max row cycle time
*
* FIXME what is the logic behind the
* assumed burst length?
*/
ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
(clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
@ -329,9 +448,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
if (bi->num_planes == 1)
break;
}
/*
@ -395,6 +511,34 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
return 0;
}
static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
int num_planes, int qgv_point)
{
int i;
/*
* Let's return max bw for 0 planes
*/
num_planes = max(1, num_planes);
for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
/*
* Pcode will not expose all QGV points when
* SAGV is forced to off/min/med/max.
*/
if (qgv_point >= bi->num_qgv_points)
return UINT_MAX;
if (num_planes <= bi->num_planes)
return bi->deratedbw[qgv_point];
}
return dev_priv->max_bw[0].deratedbw[qgv_point];
}
static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
int psf_gv_point)
{
@ -412,13 +556,13 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
else if (IS_ALDERLAKE_P(dev_priv))
icl_get_bw_info(dev_priv, &adlp_sa_info);
tgl_get_bw_info(dev_priv, &adlp_sa_info);
else if (IS_ALDERLAKE_S(dev_priv))
icl_get_bw_info(dev_priv, &adls_sa_info);
tgl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
tgl_get_bw_info(dev_priv, &rkl_sa_info);
else if (DISPLAY_VER(dev_priv) == 12)
icl_get_bw_info(dev_priv, &tgl_sa_info);
tgl_get_bw_info(dev_priv, &tgl_sa_info);
else if (DISPLAY_VER(dev_priv) == 11)
icl_get_bw_info(dev_priv, &icl_sa_info);
}
@ -746,7 +890,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate;
max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
if (DISPLAY_VER(dev_priv) > 11)
max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
else
max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
/*
* We need to know which qgv point gives us
* maximum bandwidth in order to disable SAGV

View File

@ -24,6 +24,7 @@
#include <linux/time.h>
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
@ -1975,6 +1976,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_psr_pause(intel_dp);
}
intel_audio_cdclk_change_pre(dev_priv);
/*
* Lock aux/gmbus while we change cdclk in case those
* functions use cdclk. Not all platforms/ports do,
@ -2003,6 +2006,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_psr_resume(intel_dp);
}
intel_audio_cdclk_change_post(dev_priv);
if (drm_WARN(&dev_priv->drm,
intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {

View File

@ -26,7 +26,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_dsi.h"
#include "vlv_dsi_pll.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@ -552,8 +552,8 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
intel_de_write(dev_priv, PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
intel_de_write_fw(dev_priv, PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
@ -576,15 +576,15 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
}
intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
@ -618,8 +618,8 @@ static void ilk_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
static void ilk_load_lut_10(struct intel_crtc *crtc,
@ -631,8 +631,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
ilk_lut_10(&lut[i]));
intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i),
ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@ -681,16 +681,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
ilk_lut_10(entry));
intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe),
ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
/* On BDW+ the index auto increment mode actually works */
@ -704,23 +704,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
prec_index | PAL_PREC_AUTO_INCREMENT);
intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
/* We discard half the user entries in split gamma mode */
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
ilk_lut_10(entry));
intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe),
ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
@ -821,9 +821,9 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_AUTO_INCREMENT);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
/*
@ -839,15 +839,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
lut[i].green);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe),
lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < 35)
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
@ -862,21 +862,21 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_AUTO_INCREMENT);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
u32 v = (i << 16) / (lut_size - 1);
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
@ -1071,10 +1071,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
chv_cgm_degamma_ldw(&lut[i]));
intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
chv_cgm_degamma_udw(&lut[i]));
intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
chv_cgm_degamma_ldw(&lut[i]));
intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
chv_cgm_degamma_udw(&lut[i]));
}
}
@ -1105,10 +1105,10 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
chv_cgm_gamma_ldw(&lut[i]));
intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
chv_cgm_gamma_udw(&lut[i]));
intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
chv_cgm_gamma_ldw(&lut[i]));
intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
chv_cgm_gamma_udw(&lut[i]));
}
}
@ -1131,8 +1131,8 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
else
i965_load_luts(crtc_state);
intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
crtc_state->cgm_mode);
intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe),
crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@ -1808,7 +1808,7 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@ -1843,15 +1843,15 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
i965_lut_10p6_pack(&lut[i], ldw, udw);
}
lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0)));
lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1)));
lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
@ -1886,8 +1886,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
@ -1922,7 +1922,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@ -1947,7 +1947,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i));
ilk_lut_10_pack(&lut[i], val);
}
@ -1999,16 +1999,16 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
lut = blob->data;
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
prec_index | PAL_PREC_AUTO_INCREMENT);
intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe));
ilk_lut_10_pack(&lut[i], val);
}
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
return blob;
}
@ -2050,17 +2050,17 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
lut = blob->data;
intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
icl_lut_multi_seg_pack(&lut[i], ldw, udw);
}
intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
/*
* FIXME readouts from PAL_PREC_DATA register aren't giving

View File

@ -301,7 +301,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
val |= lane_mask;
intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
}

View File

@ -45,6 +45,7 @@
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_pch_display.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@ -143,7 +144,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
lpt_pch_get_config(pipe_config);
hsw_ddi_get_config(encoder, pipe_config);
@ -152,8 +153,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@ -247,6 +246,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_crtc_vblank_off(old_crtc_state);
@ -261,10 +261,9 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
lpt_pch_disable(state, crtc);
intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
hsw_fdi_disable(encoder);
drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
@ -316,7 +315,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
intel_enable_transcoder(crtc_state);
lpt_pch_enable(crtc_state);
lpt_pch_enable(state, crtc);
intel_crtc_vblank_on(crtc_state);

View File

@ -3,16 +3,18 @@
* Copyright © 2020 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank_work.h>
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "icl_dsi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_color.h"
@ -167,6 +169,8 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
cpu_latency_qos_remove_request(&crtc->vblank_pm_qos);
drm_crtc_cleanup(&crtc->base);
kfree(crtc);
}
@ -344,6 +348,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc_crc_init(crtc);
cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@ -354,6 +360,65 @@ fail:
return ret;
}
static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
{
return crtc_state->hw.active &&
!intel_crtc_needs_modeset(crtc_state) &&
!crtc_state->preload_luts &&
(crtc_state->uapi.color_mgmt_changed ||
crtc_state->update_pipe);
}
static void intel_crtc_vblank_work(struct kthread_work *base)
{
struct drm_vblank_work *work = to_drm_vblank_work(base);
struct intel_crtc_state *crtc_state =
container_of(work, typeof(*crtc_state), vblank_work);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_crtc_vblank_work_start(crtc);
intel_color_load_luts(crtc_state);
if (crtc_state->uapi.event) {
spin_lock_irq(&crtc->base.dev->event_lock);
drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event);
crtc_state->uapi.event = NULL;
spin_unlock_irq(&crtc->base.dev->event_lock);
}
trace_intel_crtc_vblank_work_end(crtc);
}
static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base,
intel_crtc_vblank_work);
/*
* Interrupt latency is critical for getting the vblank
* work executed as early as possible during the vblank.
*/
cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0);
}
void intel_wait_for_vblank_workers(struct intel_atomic_state *state)
{
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!intel_crtc_needs_vblank_work(crtc_state))
continue;
drm_vblank_work_flush(&crtc_state->vblank_work);
cpu_latency_qos_update_request(&crtc->vblank_pm_qos,
PM_QOS_DEFAULT_VALUE);
}
}
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@ -387,7 +452,7 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays.
*/
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -402,10 +467,17 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (new_crtc_state->uapi.async_flip)
return;
if (new_crtc_state->vrr.enable)
vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
else
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
if (new_crtc_state->vrr.enable) {
if (intel_vrr_is_push_sent(new_crtc_state))
vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
else
vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
} else {
vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
@ -554,7 +626,11 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
if (new_crtc_state->uapi.event) {
if (intel_crtc_needs_vblank_work(new_crtc_state)) {
drm_vblank_work_schedule(&new_crtc_state->vblank_work,
drm_crtc_accurate_vblank_count(&crtc->base) + 1,
false);
} else if (new_crtc_state->uapi.event) {
drm_WARN_ON(&dev_priv->drm,
drm_crtc_vblank_get(&crtc->base) != 0);
@ -566,11 +642,24 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
new_crtc_state->uapi.event = NULL;
}
local_irq_enable();
/* Send VRR Push to terminate Vblank */
/*
* Send VRR Push to terminate Vblank. If we are already in vblank
* this has to be done _after_ sampling the frame counter, as
* otherwise the push would immediately terminate the vblank and
* the sampled frame counter would correspond to the next frame
* instead of the current frame.
*
* There is a tiny race here (iff vblank evasion failed us) where
* we might sample the frame counter just before vmax vblank start
* but the push would be sent just after it. That would cause the
* push to affect the next frame instead of the current frame,
* which would cause the next frame to terminate already at vmin
* vblank start instead of vmax vblank start.
*/
intel_vrr_send_push(new_crtc_state);
local_irq_enable();
if (intel_vgpu_active(dev_priv))
return;

View File

@ -9,10 +9,14 @@
#include <linux/types.h>
enum pipe;
struct drm_display_mode;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
@ -21,5 +25,8 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
#endif

View File

@ -28,11 +28,6 @@ static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
static const u64 cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
@ -195,7 +190,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
CURSOR_STRIDE(plane_state->view.color_plane[0].stride);
CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride);
}
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
@ -234,7 +229,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
}
drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
plane_state->view.color_plane[0].stride != fb->pitches[0]);
plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@ -253,9 +248,10 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
static void i845_update_cursor(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
/* TODO: split into noarm+arm pair */
static void i845_cursor_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
u32 cntl = 0, base = 0, pos = 0, size = 0;
@ -298,10 +294,10 @@ static void i845_update_cursor(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i845_disable_cursor(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
static void i845_cursor_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
i845_update_cursor(plane, crtc_state, NULL);
i845_cursor_update_arm(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@ -455,7 +451,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
}
drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
plane_state->view.color_plane[0].stride != fb->pitches[0]);
plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
@ -488,9 +484,10 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
static void i9xx_update_cursor(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
/* TODO: split into noarm+arm pair */
static void i9xx_cursor_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@ -562,10 +559,10 @@ static void i9xx_update_cursor(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_cursor(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
static void i9xx_cursor_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
i9xx_update_cursor(plane, crtc_state, NULL);
i9xx_cursor_update_arm(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@ -605,8 +602,10 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
return modifier == DRM_FORMAT_MOD_LINEAR &&
format == DRM_FORMAT_ARGB8888;
if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
return format == DRM_FORMAT_ARGB8888;
}
static int
@ -717,10 +716,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
if (new_plane_state->uapi.visible)
intel_update_plane(plane, crtc_state, new_plane_state);
else
intel_disable_plane(plane, crtc_state);
if (new_plane_state->uapi.visible) {
intel_plane_update_noarm(plane, crtc_state, new_plane_state);
intel_plane_update_arm(plane, crtc_state, new_plane_state);
} else {
intel_plane_disable_arm(plane, crtc_state);
}
intel_plane_unpin_fb(old_plane_state);
@ -754,6 +755,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *cursor;
int ret, zpos;
u64 *modifiers;
cursor = intel_plane_alloc();
if (IS_ERR(cursor))
@ -766,14 +768,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->max_stride = i845_cursor_max_stride;
cursor->update_plane = i845_update_cursor;
cursor->disable_plane = i845_disable_cursor;
cursor->update_arm = i845_cursor_update_arm;
cursor->disable_arm = i845_cursor_disable_arm;
cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
cursor->max_stride = i9xx_cursor_max_stride;
cursor->update_plane = i9xx_update_cursor;
cursor->disable_plane = i9xx_disable_cursor;
cursor->update_arm = i9xx_cursor_update_arm;
cursor->disable_arm = i9xx_cursor_disable_arm;
cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
@ -784,13 +786,18 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE);
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
modifiers,
DRM_PLANE_TYPE_CURSOR,
"cursor %c", pipe_name(pipe));
kfree(modifiers);
if (ret)
goto fail;

View File

@ -321,10 +321,11 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
/* CRT dotclock is determined via other means */
if (pipe_config->has_pch_encoder)
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
else if (intel_crtc_has_dp_encoder(pipe_config))
return;
if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
@ -1039,7 +1040,6 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int n_entries, ln;
@ -1068,32 +1068,36 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel);
val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
RCOMP_SCALAR(0x98));
}
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1);
val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2);
val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
int level = intel_ddi_level(encoder, crtc_state, ln);
intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff));
}
/* Program PORT_TX_DW7 */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar);
intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
}
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
@ -1124,16 +1128,14 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
val |= icl_combo_phy_loadgen_select(crtc_state, ln);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
@ -1154,10 +1156,8 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
u32 val;
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
@ -1166,53 +1166,51 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val);
val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val);
intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
trans->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
int level;
val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
trans->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
int level;
val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_EN);
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_EN);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@ -1223,50 +1221,34 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port));
if (crtc_state->port_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val);
intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (crtc_state->port_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
} else {
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val);
intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (crtc_state->port_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
} else {
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val);
intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
val = intel_de_read(dev_priv,
MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
val);
val = intel_de_read(dev_priv,
MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
val);
intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@ -1275,9 +1257,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
int level = intel_ddi_level(encoder, crtc_state, 0);
const struct intel_ddi_buf_trans *trans;
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
@ -1287,33 +1267,36 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK);
dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing);
dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis);
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot);
for (ln = 0; ln < 2; ln++) {
int level;
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, ln));
intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
/* All the registers are RMW */
val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val);
intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port),
DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK,
DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port),
DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK,
DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
DKL_TX_DP20BITMODE, 0);
}
}
@ -1938,7 +1921,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder,
encoder->enable_clock(encoder, crtc_state);
}
static void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_disable_clock(struct intel_encoder *encoder)
{
if (encoder->disable_clock)
encoder->disable_clock(encoder);
@ -2385,7 +2368,10 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5.k Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
intel_dsc_enable(crtc_state);
}
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
@ -2519,8 +2505,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
if (!crtc_state->bigjoiner)
intel_dsc_enable(encoder, crtc_state);
intel_dsc_enable(crtc_state);
}
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
@ -2585,8 +2574,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dsc_dp_pps_write(encoder, crtc_state);
if (!crtc_state->bigjoiner)
intel_dsc_enable(encoder, crtc_state);
intel_dsc_enable(crtc_state);
}
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
@ -2824,12 +2815,10 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
}
if (old_crtc_state->bigjoiner_linked_crtc) {
struct intel_atomic_state *state =
to_intel_atomic_state(old_crtc_state->uapi.state);
struct intel_crtc *slave =
struct intel_crtc *slave_crtc =
old_crtc_state->bigjoiner_linked_crtc;
const struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave);
intel_atomic_get_old_crtc_state(state, slave_crtc);
intel_crtc_vblank_off(old_slave_crtc_state);
@ -2866,41 +2855,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
intel_tc_port_put_link(dig_port);
}
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
* and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_disable_clock(encoder);
val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_PCDCLK;
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_PLL_ENABLE;
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
@ -3095,6 +3049,12 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
intel_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
@ -3112,6 +3072,10 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
drm_dbg_kms(&i915->drm,
@ -3119,25 +3083,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
connector->base.id, connector->name);
}
static void intel_pre_disable_ddi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp;
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder, old_crtc_state,
old_conn_state);
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
return;
intel_dp = enc_to_intel_dp(encoder);
intel_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
}
static void intel_disable_ddi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@ -3195,8 +3140,14 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
intel_tc_port_get_link(enc_to_dig_port(encoder),
required_lanes);
if (crtc_state && crtc_state->hw.active)
if (crtc_state && crtc_state->hw.active) {
struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc;
intel_update_active_dpll(state, crtc, encoder);
if (slave_crtc)
intel_update_active_dpll(state, slave_crtc, encoder);
}
}
static void
@ -3552,18 +3503,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
if (pipe_config->bigjoiner_slave) {
/* read out pipe settings from master */
enum transcoder save = pipe_config->cpu_transcoder;
/* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */
WARN_ON(pipe_config->output_types);
pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe;
intel_ddi_read_func_ctl(encoder, pipe_config);
pipe_config->cpu_transcoder = save;
} else {
intel_ddi_read_func_ctl(encoder, pipe_config);
}
intel_ddi_read_func_ctl(encoder, pipe_config);
intel_ddi_mso_get_config(encoder, pipe_config);
@ -3591,8 +3531,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
if (!pipe_config->bigjoiner_slave)
ddi_dotclock_get(pipe_config);
ddi_dotclock_get(pipe_config);
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
pipe_config->lane_lat_optim_mask =
@ -4472,7 +4411,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable = intel_enable_ddi;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
encoder->pre_disable = intel_pre_disable_ddi;
encoder->disable = intel_disable_ddi;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;

View File

@ -6,11 +6,11 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
#include "intel_display.h"
#include "i915_reg.h"
struct drm_connector_state;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
@ -18,6 +18,8 @@ struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
struct intel_shared_dpll;
enum pipe;
enum port;
enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
@ -30,6 +32,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state);
void intel_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_clock(struct intel_encoder *encoder);
void intel_ddi_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll);

File diff suppressed because it is too large Load Diff

View File

@ -521,7 +521,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n, bool fec_enable);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
@ -542,9 +541,6 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
void lpt_pch_enable(const struct intel_crtc_state *crtc_state);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
@ -580,10 +576,6 @@ struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
void intel_display_prepare_reset(struct drm_i915_private *dev_priv);
@ -592,8 +584,11 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
@ -610,9 +605,6 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
@ -632,7 +624,6 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
int intel_modeset_all_pipes(struct intel_atomic_state *state);
/* modesetting asserts */

View File

@ -52,27 +52,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
if (intel_fbc_is_active(fbc)) {
seq_puts(m, "FBC enabled\n");
else
seq_printf(m, "Compressing: %s\n",
yesno(intel_fbc_is_compressing(fbc)));
} else {
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
if (DISPLAY_VER(dev_priv) >= 8)
mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
else if (DISPLAY_VER(dev_priv) >= 7)
mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
else if (DISPLAY_VER(dev_priv) >= 5)
mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
else if (IS_G4X(dev_priv))
mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
else
mask = intel_de_read(dev_priv, FBC_STATUS) &
(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
seq_printf(m, "Compressing: %s\n", yesno(mask));
}
mutex_unlock(&fbc->lock);
@ -85,9 +70,6 @@ static int i915_fbc_false_color_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
*val = dev_priv->fbc.false_color;
return 0;
@ -96,21 +78,8 @@ static int i915_fbc_false_color_get(void *data, u64 *val)
static int i915_fbc_false_color_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
u32 reg;
if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
mutex_lock(&dev_priv->fbc.lock);
reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
dev_priv->fbc.false_color = val;
intel_de_write(dev_priv, ILK_DPFC_CONTROL,
val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
mutex_unlock(&dev_priv->fbc.lock);
return 0;
return intel_fbc_set_false_color(&dev_priv->fbc, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
@ -303,8 +272,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
};
val = intel_de_read(dev_priv,
EDP_PSR2_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
} else {
@ -503,28 +471,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int i;
struct drm_i915_private *i915 = node_to_i915(m->private);
mutex_lock(&power_domains->lock);
seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
enum intel_display_power_domain power_domain;
power_well = &power_domains->power_wells[i];
seq_printf(m, "%-25s %d\n", power_well->desc->name,
power_well->count);
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
mutex_unlock(&power_domains->lock);
intel_display_power_debug(i915, m);
return 0;
}
@ -2095,7 +2044,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
}
ret = intel_fbc_reset_underrun(dev_priv);
ret = intel_fbc_reset_underrun(&dev_priv->fbc);
if (ret)
return ret;

View File

@ -15,6 +15,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_pps.h"
@ -23,6 +24,98 @@
#include "intel_vga.h"
#include "vlv_sideband.h"
struct i915_power_well_ops {
/*
* Synchronize the well's hw state to match the current sw state, for
* example enable/disable it based on the current refcount. Called
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
void (*sync_hw)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
void (*enable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
void (*disable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
bool (*is_enabled)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
};
struct i915_power_well_regs {
i915_reg_t bios;
i915_reg_t driver;
i915_reg_t kvmr;
i915_reg_t debug;
};
/* Power well structure for haswell */
struct i915_power_well_desc {
const char *name;
bool always_on;
u64 domains;
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
union {
struct {
/*
* request/status flag index in the PUNIT power well
* control/status registers.
*/
u8 idx;
} vlv;
struct {
enum dpio_phy phy;
} bxt;
struct {
const struct i915_power_well_regs *regs;
/*
* request/status flag index in the power well
* constrol/status registers.
*/
u8 idx;
/* Mask of pipes whose IRQ logic is backed by the pw */
u8 irq_pipe_mask;
/*
* Instead of waiting for the status bit to ack enables,
* just wait a specific amount of time and then consider
* the well enabled.
*/
u16 fixed_enable_delay;
/* The pw is backing the VGA functionality */
bool has_vga:1;
bool has_fuses:1;
/*
* The pw is for an ICL+ TypeC PHY port in
* Thunderbolt mode.
*/
bool is_tc_tbt:1;
} hsw;
};
const struct i915_power_well_ops *ops;
};
struct i915_power_well {
const struct i915_power_well_desc *desc;
/* power well enable/disable usage count */
int count;
/* cached hw enabled state */
bool hw_enabled;
};
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@ -154,8 +247,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "MODESET";
case POWER_DOMAIN_GT_IRQ:
return "GT_IRQ";
case POWER_DOMAIN_DPLL_DC_OFF:
return "DPLL_DC_OFF";
case POWER_DOMAIN_DC_OFF:
return "DC_OFF";
case POWER_DOMAIN_TC_COLD_OFF:
return "TC_COLD_OFF";
default:
@ -434,6 +527,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/* Wa_16013190616:adlp */
if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
* before enabling the power well and PW1/PG1's own fuse
@ -894,7 +992,7 @@ static u32
sanitize_target_dc_state(struct drm_i915_private *dev_priv,
u32 target_dc_state)
{
u32 states[] = {
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
DC_STATE_EN_DC3CO,
@ -2802,7 +2900,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
ICL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
BIT_ULL(POWER_DOMAIN_DC_OFF) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define ICL_DDI_IO_A_POWER_DOMAINS ( \
@ -3105,6 +3203,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
@ -6390,3 +6489,28 @@ void intel_display_power_resume(struct drm_i915_private *i915)
hsw_disable_pc8(i915);
}
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
struct i915_power_domains *power_domains = &i915->power_domains;
int i;
mutex_lock(&power_domains->lock);
seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
enum intel_display_power_domain power_domain;
power_well = &power_domains->power_wells[i];
seq_printf(m, "%-25s %d\n", power_well->desc->name,
power_well->count);
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
mutex_unlock(&power_domains->lock);
}

View File

@ -6,11 +6,13 @@
#ifndef __INTEL_DISPLAY_POWER_H__
#define __INTEL_DISPLAY_POWER_H__
#include "intel_display.h"
#include "intel_runtime_pm.h"
#include "i915_reg.h"
enum dpio_channel;
enum dpio_phy;
struct drm_i915_private;
struct i915_power_well;
struct intel_encoder;
enum intel_display_power_domain {
@ -117,7 +119,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
POWER_DOMAIN_DPLL_DC_OFF,
POWER_DOMAIN_DC_OFF,
POWER_DOMAIN_TC_COLD_OFF,
POWER_DOMAIN_INIT,
@ -155,100 +157,6 @@ enum i915_power_well_id {
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
struct i915_power_well;
struct i915_power_well_ops {
/*
* Synchronize the well's hw state to match the current sw state, for
* example enable/disable it based on the current refcount. Called
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
void (*sync_hw)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
void (*enable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
void (*disable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
bool (*is_enabled)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
};
struct i915_power_well_regs {
i915_reg_t bios;
i915_reg_t driver;
i915_reg_t kvmr;
i915_reg_t debug;
};
/* Power well structure for haswell */
struct i915_power_well_desc {
const char *name;
bool always_on;
u64 domains;
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
union {
struct {
/*
* request/status flag index in the PUNIT power well
* control/status registers.
*/
u8 idx;
} vlv;
struct {
enum dpio_phy phy;
} bxt;
struct {
const struct i915_power_well_regs *regs;
/*
* request/status flag index in the power well
* constrol/status registers.
*/
u8 idx;
/* Mask of pipes whose IRQ logic is backed by the pw */
u8 irq_pipe_mask;
/*
* Instead of waiting for the status bit to ack enables,
* just wait a specific amount of time and then consider
* the well enabled.
*/
u16 fixed_enable_delay;
/* The pw is backing the VGA functionality */
bool has_vga:1;
bool has_fuses:1;
/*
* The pw is for an ICL+ TypeC PHY port in
* Thunderbolt mode.
*/
bool is_tc_tbt:1;
} hsw;
};
const struct i915_power_well_ops *ops;
};
struct i915_power_well {
const struct i915_power_well_desc *desc;
/* power well enable/disable usage count */
int count;
/* cached hw enabled state */
bool hw_enabled;
};
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
@ -391,6 +299,8 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915,
intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
/*
* FIXME: We should probably switch this to a 0-based scheme to be consistent
* with how we now name/number DBUF_CTL instances.

View File

@ -28,6 +28,7 @@
#include <linux/async.h>
#include <linux/i2c.h>
#include <linux/pm_qos.h>
#include <linux/pwm.h>
#include <linux/sched/clock.h>
@ -41,6 +42,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
@ -49,6 +51,7 @@
struct drm_printer;
struct __intel_global_objs_state;
struct intel_ddi_buf_trans;
struct intel_fbc;
/*
* Display related stuff
@ -115,7 +118,8 @@ struct intel_fb_view {
* bytes for 0/180 degree rotation
* pixels for 90/270 degree rotation
*/
unsigned int stride;
unsigned int mapping_stride;
unsigned int scanout_stride;
} color_plane[4];
};
@ -194,10 +198,6 @@ struct intel_encoder {
void (*update_complete)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
void (*pre_disable)(struct intel_atomic_state *,
struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*disable)(struct intel_atomic_state *,
struct intel_encoder *,
const struct intel_crtc_state *,
@ -949,7 +949,6 @@ struct intel_crtc_state {
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
#define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */
unsigned long quirks;
unsigned fb_bits; /* framebuffers to flip */
@ -1241,6 +1240,9 @@ struct intel_crtc_state {
u8 link_count;
u8 pixel_overlap;
} splitter;
/* for loading single buffered registers during vblank */
struct drm_vblank_work vblank_work;
};
enum intel_pipe_crc_source {
@ -1325,6 +1327,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
/* for loading single buffered registers during vblank */
struct pm_qos_request vblank_pm_qos;
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc;
#endif
@ -1335,8 +1340,6 @@ struct intel_plane {
enum i9xx_plane_id i9xx_plane;
enum plane_id id;
enum pipe pipe;
bool has_fbc;
bool has_ccs;
bool need_async_flip_disable_wa;
u32 frontbuffer_bit;
@ -1344,6 +1347,8 @@ struct intel_plane {
u32 base, cntl, size;
} cursor;
struct intel_fbc *fbc;
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
@ -1362,11 +1367,17 @@ struct intel_plane {
unsigned int (*max_stride)(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
void (*update_plane)(struct intel_plane *plane,
/* Write all non-self arming plane registers */
void (*update_noarm)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
/* Write all self-arming plane registers */
void (*update_arm)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
/* Disable the plane, must arm */
void (*disable_arm)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@ -1563,6 +1574,8 @@ struct intel_dp {
int num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
bool use_rate_select;
/* Max sink lane count as reported by DP_MAX_LANE_COUNT */
int max_sink_lane_count;
/* intersection of source and sink rates */
int num_common_rates;
int common_rates[DP_MAX_SUPPORTED_RATES];
@ -2041,20 +2054,4 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
}
static inline bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
static inline bool is_gen12_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
}
#endif /* __INTEL_DISPLAY_TYPES_H__ */

View File

@ -20,6 +20,8 @@ enum {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
DMC_FW_PIPEB,
DMC_FW_PIPEC,
DMC_FW_PIPED,
DMC_FW_MAX
};

View File

@ -127,7 +127,7 @@ static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
}
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
{
static const int dp_rates[] = {
162000, 270000, 540000, 810000
@ -197,6 +197,54 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
intel_dp_set_dpcd_sink_rates(intel_dp);
if (intel_dp->num_sink_rates)
return;
drm_err(&dp_to_i915(intel_dp)->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
intel_dp_set_default_sink_rates(intel_dp);
}
static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
{
intel_dp->max_sink_lane_count = 1;
}
static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
switch (intel_dp->max_sink_lane_count) {
case 1:
case 2:
case 4:
return;
}
drm_err(&dp_to_i915(intel_dp)->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
intel_dp->max_sink_lane_count);
intel_dp_set_default_max_sink_lane_count(intel_dp);
}
/* Get length of rates array potentially limited by max_rate. */
static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
{
@ -219,10 +267,19 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
intel_dp->num_common_rates, max_rate);
}
static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
{
if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
index < 0 || index >= intel_dp->num_common_rates))
return 162000;
return intel_dp->common_rates[index];
}
/* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
return intel_dp->common_rates[intel_dp->num_common_rates - 1];
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
/* Theoretical max between source and sink */
@ -230,7 +287,7 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int source_max = dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
int sink_max = intel_dp->max_sink_lane_count;
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
@ -242,7 +299,15 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
int intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
return intel_dp->max_link_lane_count;
switch (intel_dp->max_link_lane_count) {
case 1:
case 2:
case 4:
return intel_dp->max_link_lane_count;
default:
MISSING_CASE(intel_dp->max_link_lane_count);
return 1;
}
}
/*
@ -554,13 +619,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
if (index > 0) {
if (intel_dp_is_edp(intel_dp) &&
!intel_dp_can_link_train_fallback_for_edp(intel_dp,
intel_dp->common_rates[index - 1],
intel_dp_common_rate(intel_dp, index - 1),
lane_count)) {
drm_dbg_kms(&i915->drm,
"Retrying Link training for eDP with same parameters\n");
return 0;
}
intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1);
intel_dp->max_link_lane_count = lane_count;
} else if (lane_count > 1) {
if (intel_dp_is_edp(intel_dp) &&
@ -1000,14 +1065,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int len;
len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
if (drm_WARN_ON(&i915->drm, len <= 0))
return 162000;
return intel_dp->common_rates[len - 1];
return intel_dp_common_rate(intel_dp, len - 1);
}
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
@ -1204,7 +1266,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
output_bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) {
link_rate = intel_dp->common_rates[i];
link_rate = intel_dp_common_rate(intel_dp, i);
if (link_rate < limits->min_rate ||
link_rate > limits->max_rate)
continue;
@ -1283,7 +1345,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
else
vdsc_cfg->slice_height = 2;
ret = intel_dsc_compute_params(encoder, crtc_state);
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@ -1452,17 +1514,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
int common_len;
int ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
/* No common link rates between source and sink */
drm_WARN_ON(encoder->base.dev, common_len <= 0);
limits.min_rate = intel_dp->common_rates[0];
limits.max_rate = intel_dp->common_rates[common_len - 1];
limits.min_rate = intel_dp_common_rate(intel_dp, 0);
limits.max_rate = intel_dp_max_link_rate(intel_dp);
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
@ -2143,6 +2198,18 @@ static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
return max_frl_rate;
}
static bool
intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
u8 max_frl_bw_mask, u8 *frl_trained_mask)
{
if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
*frl_trained_mask >= max_frl_bw_mask)
return true;
return false;
}
static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
{
#define TIMEOUT_FRL_READY_MS 500
@ -2153,10 +2220,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
u8 max_frl_bw_mask = 0, frl_trained_mask;
bool is_active;
ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
if (ret < 0)
return ret;
max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
@ -2168,6 +2231,12 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (max_frl_bw <= 0)
return -EINVAL;
max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
goto frl_trained;
ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
if (ret < 0)
return ret;
@ -2177,7 +2246,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (!is_active)
return -ETIMEDOUT;
max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
DP_PCON_ENABLE_SEQUENTIAL_LINK);
if (ret < 0)
@ -2193,19 +2261,15 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
* Wait for FRL to be completed
* Check if the HDMI Link is up and active.
*/
wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
wait_for(is_active =
intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
TIMEOUT_HDMI_LINK_ACTIVE_MS);
if (!is_active)
return -ETIMEDOUT;
/* Verify HDMI Link configuration shows FRL Mode */
if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
DP_PCON_HDMI_MODE_FRL) {
drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
return -EINVAL;
}
drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
frl_trained:
drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
intel_dp->frl.is_trained = true;
drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
@ -2223,6 +2287,28 @@ static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
return false;
}
static
int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
{
int ret;
u8 buf = 0;
/* Set PCON source control mode */
buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
if (ret < 0)
return ret;
/* Set HDMI LINK ENABLE */
buf |= DP_PCON_ENABLE_HDMI_LINK;
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
if (ret < 0)
return ret;
return 0;
}
void intel_dp_check_frl_training(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -2241,7 +2327,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
int ret, mode;
drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
ret = intel_dp_pcon_set_tmds_mode(intel_dp);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
@ -2603,6 +2689,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp->use_rate_select = true;
else
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
intel_dp_reset_max_link_params(intel_dp);
@ -2648,6 +2735,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_is_branch(intel_dp->dpcd));
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@ -5014,6 +5102,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_source_rates(intel_dp);
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_default_max_sink_lane_count(intel_dp);
intel_dp_set_common_rates(intel_dp);
intel_dp_reset_max_link_params(intel_dp);

View File

@ -301,7 +301,10 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
DISPLAY_VER(i915) >= 11;
}
/* 128b/132b */
@ -683,15 +686,6 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
return true;
}
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX)
drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
else
drm_dp_lttpr_link_train_clock_recovery_delay();
}
static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
const u8 old_link_status[DP_LINK_STATUS_SIZE],
const u8 new_link_status[DP_LINK_STATUS_SIZE])
@ -750,6 +744,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
char phy_name[10];
int delay_us;
delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
@ -777,7 +776,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
usleep_range(delay_us, 2 * delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@ -895,19 +894,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
return DP_TRAINING_PATTERN_2;
}
static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX) {
drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
} else {
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
}
}
/*
* Perform the link training channel equalization phase on the given DP PHY
* using one of training pattern 2, 3 or 4 depending on the source and
@ -925,6 +911,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
char phy_name[10];
int delay_us;
delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
@ -944,8 +935,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
}
for (tries = 0; tries < 5; tries++) {
intel_dp_link_training_channel_equalization_delay(intel_dp,
dp_phy);
usleep_range(delay_us, 2 * delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm,

View File

@ -231,6 +231,7 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
if (DISPLAY_VER(dev_priv) < 12)
return 0;
@ -243,7 +244,6 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
struct intel_digital_connector_state *conn_iter_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int ret;
if (connector_iter->mst_port != connector->mst_port ||
connector_iter == connector)
@ -252,8 +252,8 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
conn_iter_state = intel_atomic_get_digital_connector_state(state,
connector_iter);
if (IS_ERR(conn_iter_state)) {
drm_connector_list_iter_end(&connector_list_iter);
return PTR_ERR(conn_iter_state);
ret = PTR_ERR(conn_iter_state);
break;
}
if (!conn_iter_state->base.crtc)
@ -262,20 +262,18 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
crtc = to_intel_crtc(conn_iter_state->base.crtc);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state)) {
drm_connector_list_iter_end(&connector_list_iter);
return PTR_ERR(crtc_state);
ret = PTR_ERR(crtc_state);
break;
}
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret) {
drm_connector_list_iter_end(&connector_list_iter);
return ret;
}
if (ret)
break;
crtc_state->uapi.mode_changed = true;
}
drm_connector_list_iter_end(&connector_list_iter);
return 0;
return ret;
}
static int
@ -348,16 +346,6 @@ static void wait_for_act_sent(struct intel_encoder *encoder,
drm_dp_check_act_status(&intel_dp->mst_mgr);
}
static void intel_mst_pre_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder, old_crtc_state,
old_conn_state);
}
static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@ -382,6 +370,9 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@ -916,7 +907,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->pre_disable = intel_mst_pre_disable_dp;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->update_pipe = intel_ddi_update_pipe;

View File

@ -26,6 +26,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_pch_refclk.h"
#include "intel_tc.h"
/**
@ -3740,7 +3741,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
* domain.
*/
pll->wakeref = intel_display_power_get(dev_priv,
POWER_DOMAIN_DPLL_DC_OFF);
POWER_DOMAIN_DC_OFF);
}
icl_pll_power_enable(dev_priv, pll, enable_reg);
@ -3847,7 +3848,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv,
if (IS_JSL_EHL(dev_priv) &&
pll->info->id == DPLL_ID_EHL_DPLL4)
intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
pll->wakeref);
}
@ -4231,7 +4232,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
if (IS_JSL_EHL(i915) && pll->on &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
pll->wakeref = intel_display_power_get(i915,
POWER_DOMAIN_DPLL_DC_OFF);
POWER_DOMAIN_DC_OFF);
}
pll->state.pipe_mask = 0;

View File

@ -27,7 +27,6 @@
#include <linux/types.h>
#include "intel_display.h"
#include "intel_wakeref.h"
/*FIXME: Move this to a more appropriate place. */
@ -37,6 +36,7 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
enum tc_port;
struct drm_device;
struct drm_i915_private;
struct intel_atomic_state;

View File

@ -167,6 +167,64 @@ void intel_dpt_unpin(struct i915_address_space *vm)
i915_vma_put(dpt->vma);
}
/**
* intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
* @i915: device instance
*
* Restore the memory mapping during system resume for all framebuffers which
* are mapped to HW via a GGTT->DPT page table. The content of these page
* tables are not stored in the hibernation image during S4 and S3RST->S4
* transitions, so here we reprogram the PTE entries in those tables.
*
* This function must be called after the mappings in GGTT have been restored calling
* i915_ggtt_resume().
*/
void intel_dpt_resume(struct drm_i915_private *i915)
{
struct drm_framebuffer *drm_fb;
if (!HAS_DISPLAY(i915))
return;
mutex_lock(&i915->drm.mode_config.fb_lock);
drm_for_each_fb(drm_fb, &i915->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_resume_vm(fb->dpt_vm);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
/**
* intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
* @i915: device instance
*
* Suspend the memory mapping during system suspend for all framebuffers which
* are mapped to HW via a GGTT->DPT page table.
*
* This function must be called before the mappings in GGTT are suspended calling
* i915_ggtt_suspend().
*/
void intel_dpt_suspend(struct drm_i915_private *i915)
{
struct drm_framebuffer *drm_fb;
if (!HAS_DISPLAY(i915))
return;
mutex_lock(&i915->drm.mode_config.fb_lock);
drm_for_each_fb(drm_fb, &i915->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_suspend_vm(fb->dpt_vm);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{

View File

@ -6,6 +6,8 @@
#ifndef __INTEL_DPT_H__
#define __INTEL_DPT_H__
struct drm_i915_private;
struct i915_address_space;
struct i915_vma;
struct intel_framebuffer;
@ -13,6 +15,8 @@ struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
struct i915_vma *intel_dpt_pin(struct i915_address_space *vm);
void intel_dpt_unpin(struct i915_address_space *vm);
void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);

View File

@ -100,7 +100,7 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
u32 reg_val;
if (!dsb) {
intel_de_write(dev_priv, reg, val);
intel_de_write_fw(dev_priv, reg, val);
return;
}
buf = dsb->cmd_buf;
@ -177,7 +177,7 @@ void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
dsb = crtc_state->dsb;
if (!dsb) {
intel_de_write(dev_priv, reg, val);
intel_de_write_fw(dev_priv, reg, val);
return;
}

View File

@ -166,57 +166,15 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
return enc_to_intel_dsi(encoder)->ports;
}
/* icl_dsi.c */
void icl_dsi_init(struct drm_i915_private *dev_priv);
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
/* intel_dsi.c */
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector);
/* vlv_dsi.c */
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
int intel_dsi_get_modes(struct drm_connector *connector);
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
void vlv_dsi_init(struct drm_i915_private *dev_priv);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void vlv_dsi_pll_disable(struct intel_encoder *encoder);
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void bxt_dsi_pll_disable(struct intel_encoder *encoder);
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
void assert_dsi_pll_enabled(struct drm_i915_private *i915);
void assert_dsi_pll_disabled(struct drm_i915_private *i915);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* _INTEL_DSI_H */

View File

@ -71,6 +71,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
u8 data[2] = {};
enum port port;
size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
unsigned long mode_flags;
if (len == 1) {
data[0] = level;
@ -81,8 +82,11 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mode_flags = dsi_device->mode_flags;
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&data, len);
dsi_device->mode_flags = mode_flags;
}
}

View File

@ -41,6 +41,8 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "vlv_dsi.h"
#include "vlv_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __INTEL_DSI_VBT_H__
#define __INTEL_DSI_VBT_H__
#include <linux/types.h>
enum mipi_seq;
struct intel_dsi;
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* __INTEL_DSI_VBT_H__ */

View File

@ -13,26 +13,465 @@
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
/*
* From the Sky Lake PRM:
* "The Color Control Surface (CCS) contains the compression status of
* the cache-line pairs. The compression state of the cache-line pair
* is specified by 2 bits in the CCS. Each CCS cache-line represents
* an area on the main surface of 16 x16 sets of 128 byte Y-tiled
* cache-line-pairs. CCS is always Y tiled."
*
* Since cache line pairs refers to horizontally adjacent cache lines,
* each cache line in the CCS corresponds to an area of 32x16 cache
* lines on the main surface. Since each pixel is 4 bytes, this gives
* us a ratio of one byte in the CCS for each 8x16 pixels in the
* main surface.
*/
static const struct drm_format_info skl_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
/*
* Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
* main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
* in the main surface. With 4 byte pixels and each Y-tile having dimensions of
* 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
* the main surface.
*/
static const struct drm_format_info gen12_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV12, .num_planes = 4,
.char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P010, .num_planes = 4,
.char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P012, .num_planes = 4,
.char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_P016, .num_planes = 4,
.char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
.hsub = 2, .vsub = 2, .is_yuv = true },
};
/*
* Same as gen12_ccs_formats[] above, but with additional surface used
* to pass Clear Color information in plane 2 with 64 bits of data.
*/
static const struct drm_format_info gen12_ccs_cc_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
.char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
};
struct intel_modifier_desc {
u64 modifier;
struct {
u8 from;
u8 until;
} display_ver;
#define DISPLAY_VER_ALL { 0, -1 }
const struct drm_format_info *formats;
int format_count;
#define FORMAT_OVERRIDE(format_list) \
.formats = format_list, \
.format_count = ARRAY_SIZE(format_list)
u8 plane_caps;
struct {
u8 cc_planes:3;
u8 packed_aux_planes:4;
u8 planar_aux_planes:4;
} ccs;
};
#define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \
INTEL_PLANE_CAP_CCS_RC_CC | \
INTEL_PLANE_CAP_CCS_MC)
#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \
INTEL_PLANE_CAP_TILING_Y | \
INTEL_PLANE_CAP_TILING_Yf)
#define INTEL_PLANE_CAP_TILING_NONE 0
static const struct intel_modifier_desc intel_modifiers[] = {
{
.modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
.display_ver = { 12, 13 },
.plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC,
.ccs.packed_aux_planes = BIT(1),
.ccs.planar_aux_planes = BIT(2) | BIT(3),
FORMAT_OVERRIDE(gen12_ccs_formats),
}, {
.modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
.display_ver = { 12, 13 },
.plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC,
.ccs.packed_aux_planes = BIT(1),
FORMAT_OVERRIDE(gen12_ccs_formats),
}, {
.modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
.display_ver = { 12, 13 },
.plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC,
.ccs.cc_planes = BIT(2),
.ccs.packed_aux_planes = BIT(1),
FORMAT_OVERRIDE(gen12_ccs_cc_formats),
}, {
.modifier = I915_FORMAT_MOD_Yf_TILED_CCS,
.display_ver = { 9, 11 },
.plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC,
.ccs.packed_aux_planes = BIT(1),
FORMAT_OVERRIDE(skl_ccs_formats),
}, {
.modifier = I915_FORMAT_MOD_Y_TILED_CCS,
.display_ver = { 9, 11 },
.plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC,
.ccs.packed_aux_planes = BIT(1),
FORMAT_OVERRIDE(skl_ccs_formats),
}, {
.modifier = I915_FORMAT_MOD_Yf_TILED,
.display_ver = { 9, 11 },
.plane_caps = INTEL_PLANE_CAP_TILING_Yf,
}, {
.modifier = I915_FORMAT_MOD_Y_TILED,
.display_ver = { 9, 13 },
.plane_caps = INTEL_PLANE_CAP_TILING_Y,
}, {
.modifier = I915_FORMAT_MOD_X_TILED,
.display_ver = DISPLAY_VER_ALL,
.plane_caps = INTEL_PLANE_CAP_TILING_X,
}, {
.modifier = DRM_FORMAT_MOD_LINEAR,
.display_ver = DISPLAY_VER_ALL,
},
};
static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier)
{
if (!is_ccs_modifier(fb->modifier))
int i;
for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++)
if (intel_modifiers[i].modifier == modifier)
return &intel_modifiers[i];
return NULL;
}
static const struct intel_modifier_desc *lookup_modifier(u64 modifier)
{
const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
if (WARN_ON(!md))
return &intel_modifiers[0];
return md;
}
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
{
int i;
for (i = 0; i < num_formats; i++) {
if (formats[i].format == format)
return &formats[i];
}
return NULL;
}
/**
* intel_fb_get_format_info: Get a modifier specific format information
* @cmd: FB add command structure
*
* Returns:
* Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0],
* or %NULL if the modifier doesn't override the format.
*/
const struct drm_format_info *
intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
{
const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]);
if (!md || !md->formats)
return NULL;
return lookup_format_info(md->formats, md->format_count, cmd->pixel_format);
}
static bool plane_caps_contain_any(u8 caps, u8 mask)
{
return caps & mask;
}
static bool plane_caps_contain_all(u8 caps, u8 mask)
{
return (caps & mask) == mask;
}
/**
* intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type
* @modifier: Modifier to check
*
* Returns:
* Returns %true if @modifier is a render, render with color clear or
* media compression modifier.
*/
bool intel_fb_is_ccs_modifier(u64 modifier)
{
return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
INTEL_PLANE_CAP_CCS_MASK);
}
/**
* intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type
* @modifier: Modifier to check
*
* Returns:
* Returns %true if @modifier is a render with color clear modifier.
*/
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier)
{
return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
INTEL_PLANE_CAP_CCS_RC_CC);
}
/**
* intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type
* @modifier: Modifier to check
*
* Returns:
* Returns %true if @modifier is a media compression modifier.
*/
bool intel_fb_is_mc_ccs_modifier(u64 modifier)
{
return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
INTEL_PLANE_CAP_CCS_MC);
}
static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
u8 display_ver_from, u8 display_ver_until)
{
return md->display_ver.from <= display_ver_until &&
display_ver_from <= md->display_ver.until;
}
static bool plane_has_modifier(struct drm_i915_private *i915,
u8 plane_caps,
const struct intel_modifier_desc *md)
{
if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until))
return false;
return plane >= fb->format->num_planes / 2;
if (!plane_caps_contain_all(plane_caps, md->plane_caps))
return false;
return true;
}
bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
/**
* intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities
* @i915: i915 device instance
* @plane_caps: capabilities for the plane the modifiers are queried for
*
* Returns:
* Returns the list of modifiers allowed by the @i915 platform and @plane_caps.
* The caller must free the returned buffer.
*/
u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
u8 plane_caps)
{
return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
u64 *list, *p;
int count = 1; /* +1 for invalid modifier terminator */
int i;
for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
count++;
}
list = kmalloc_array(count, sizeof(*list), GFP_KERNEL);
if (drm_WARN_ON(&i915->drm, !list))
return NULL;
p = list;
for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
*p++ = intel_modifiers[i].modifier;
}
*p++ = DRM_FORMAT_MOD_INVALID;
return list;
}
bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
/**
* intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane
* @plane: Plane to check the modifier support for
* @modifier: The modifier to check the support for
*
* Returns:
* %true if the @modifier is supported on @plane.
*/
bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier)
{
return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
plane == 2;
int i;
for (i = 0; i < plane->base.modifier_count; i++)
if (plane->base.modifiers[i] == modifier)
return true;
return false;
}
bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md,
const struct drm_format_info *info)
{
int yuv_planes;
if (!info->is_yuv)
return false;
if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK))
yuv_planes = 4;
else
yuv_planes = 2;
return info->num_planes == yuv_planes;
}
/**
* intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar
* @info: format to check
* @modifier: modifier used with the format
*
* Returns:
* %true if @info / @modifier is YUV semiplanar.
*/
bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier)
{
return format_is_yuv_semiplanar(lookup_modifier(modifier), info);
}
static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md,
const struct drm_format_info *format)
{
if (format_is_yuv_semiplanar(md, format))
return md->ccs.planar_aux_planes;
else
return md->ccs.packed_aux_planes;
}
/**
* intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane
* @fb: Framebuffer
* @color_plane: color plane index to check
*
* Returns:
* Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane.
*/
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane)
{
const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane);
}
/**
* intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane
* @fb: Framebuffer
* @color_plane: color plane index to check
*
* Returns:
* Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane.
*/
static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane)
{
const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
return check_modifier_display_ver_range(md, 12, 13) &&
ccs_aux_plane_mask(md, fb->format) & BIT(color_plane);
}
/**
* intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer
* @fb: Framebuffer
*
* Returns:
* Returns the index of the color clear plane for @fb, or -1 if @fb is not a
* framebuffer using a render compression/color clear modifier.
*/
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb)
{
const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
if (!md->ccs.cc_planes)
return -1;
drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1);
return ilog2((int)md->ccs.cc_planes);
}
static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane)
{
return intel_fb_rc_ccs_cc_plane(fb) == color_plane;
}
static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
{
return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
color_plane == 1;
@ -41,12 +480,13 @@ bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
{
return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
is_gen12_ccs_plane(fb, color_plane);
intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) ||
is_gen12_ccs_cc_plane(fb, color_plane);
}
int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
{
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) ||
(main_plane && main_plane >= fb->format->num_planes / 2));
return fb->format->num_planes / 2 + main_plane;
@ -54,7 +494,7 @@ int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
{
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) ||
ccs_plane < fb->format->num_planes / 2);
if (is_gen12_ccs_cc_plane(fb, ccs_plane))
@ -63,35 +503,12 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
return ccs_plane - fb->format->num_planes / 2;
}
static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb,
int color_plane)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int stride = fb->base.pitches[color_plane];
if (IS_ALDERLAKE_P(i915))
return roundup_pow_of_two(max(stride,
8u * intel_tile_width_bytes(&fb->base, color_plane)));
return stride;
}
static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane);
unsigned int main_stride = fb->base.pitches[main_plane];
unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane);
/*
* On ADL-P the AUX stride must align with a power-of-two aligned main
* surface stride. The stride of the allocated main surface object can
* be less than this POT stride, which is then autopadded to the POT
* size.
*/
if (IS_ALDERLAKE_P(i915))
main_stride = gen12_aligned_scanout_stride(fb, main_plane);
return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64;
}
@ -99,7 +516,7 @@ int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
struct drm_i915_private *i915 = to_i915(fb->dev);
if (is_ccs_modifier(fb->modifier))
if (intel_fb_is_ccs_modifier(fb->modifier))
return main_to_ccs_plane(fb, main_plane);
else if (DISPLAY_VER(i915) < 11 &&
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
@ -128,13 +545,14 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 128;
fallthrough;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (is_ccs_plane(fb, color_plane))
if (intel_fb_is_ccs_aux_plane(fb, color_plane) ||
is_gen12_ccs_cc_plane(fb, color_plane))
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
@ -143,7 +561,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (is_ccs_plane(fb, color_plane))
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 128;
fallthrough;
case I915_FORMAT_MOD_Yf_TILED:
@ -199,7 +617,7 @@ static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_pl
{
intel_tile_dims(fb, color_plane, tile_width, tile_height);
if (is_gen12_ccs_plane(fb, color_plane))
if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane))
*tile_height = 1;
}
@ -223,16 +641,19 @@ intel_fb_align_height(const struct drm_framebuffer *fb,
static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case I915_FORMAT_MOD_X_TILED:
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps &
INTEL_PLANE_CAP_TILING_MASK;
switch (tiling_caps) {
case INTEL_PLANE_CAP_TILING_Y:
return I915_TILING_Y;
case INTEL_PLANE_CAP_TILING_X:
return I915_TILING_X;
case INTEL_PLANE_CAP_TILING_Yf:
case INTEL_PLANE_CAP_TILING_NONE:
return I915_TILING_NONE;
default:
MISSING_CASE(tiling_caps);
return I915_TILING_NONE;
}
}
@ -271,7 +692,7 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return 512 * 4096;
/* AUX_DIST needs only 4K alignment */
if (is_ccs_plane(fb, color_plane))
if (intel_fb_is_ccs_aux_plane(fb, color_plane))
return 4096;
if (is_semiplanar_uv_plane(fb, color_plane)) {
@ -330,7 +751,7 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
* TODO: Deduct the subsampling from the char block for all CCS
* formats and planes.
*/
if (!is_gen12_ccs_plane(fb, color_plane)) {
if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) {
*hsub = fb->format->hsub;
*vsub = fb->format->vsub;
@ -357,24 +778,13 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
int main_plane = is_ccs_plane(&fb->base, color_plane) ?
int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ?
skl_ccs_to_main_plane(&fb->base, color_plane) : 0;
unsigned int main_width = fb->base.width;
unsigned int main_height = fb->base.height;
int main_hsub, main_vsub;
int hsub, vsub;
/*
* On ADL-P the CCS AUX surface layout always aligns with the
* power-of-two aligned main surface stride. The main surface
* stride in the allocated FB object may not be power-of-two
* sized, in which case it is auto-padded to the POT size.
*/
if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane))
main_width = gen12_aligned_scanout_stride(fb, 0) /
fb->base.format->cpp[0];
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
@ -409,6 +819,20 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
static u32 intel_adjust_linear_offset(int *x, int *y,
unsigned int cpp,
unsigned int pitch,
u32 old_offset,
u32 new_offset)
{
old_offset += *y * pitch + *x * cpp;
*y = (old_offset - new_offset) / pitch;
*x = ((old_offset - new_offset) - *y * pitch) / cpp;
return new_offset;
}
static u32 intel_adjust_aligned_offset(int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane,
@ -439,10 +863,8 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
tile_size, pitch_tiles,
old_offset, new_offset);
} else {
old_offset += *y * pitch + *x * cpp;
*y = (old_offset - new_offset) / pitch;
*x = ((old_offset - new_offset) - *y * pitch) / cpp;
intel_adjust_linear_offset(x, y, cpp, pitch,
old_offset, new_offset);
}
return new_offset;
@ -459,7 +881,7 @@ u32 intel_plane_adjust_aligned_offset(int *x, int *y,
{
return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
state->hw.rotation,
state->view.color_plane[color_plane].stride,
state->view.color_plane[color_plane].mapping_stride,
old_offset, new_offset);
}
@ -540,7 +962,7 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y,
struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int rotation = state->hw.rotation;
int pitch = state->view.color_plane[color_plane].stride;
int pitch = state->view.color_plane[color_plane].mapping_stride;
u32 alignment;
if (intel_plane->id == PLANE_CURSOR)
@ -562,6 +984,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
u32 alignment;
if (DISPLAY_VER(i915) >= 12 &&
!intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
@ -610,7 +1033,7 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane
int ccs_x, ccs_y;
int main_x, main_y;
if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
return 0;
/*
@ -673,7 +1096,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
* The new CCS hash mode isn't compatible with remapping as
* the virtual address of the pages affects the compressed data.
*/
if (is_ccs_modifier(fb->modifier))
if (intel_fb_is_ccs_modifier(fb->modifier))
return false;
/* Linear needs a page aligned stride for remapping */
@ -699,11 +1122,11 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
{
if (drm_rotation_90_or_270(rotation))
return fb->rotated_view.color_plane[color_plane].stride;
return fb->rotated_view.color_plane[color_plane].mapping_stride;
else if (intel_fb_needs_pot_stride_remap(fb))
return fb->remapped_view.color_plane[color_plane].stride;
return fb->remapped_view.color_plane[color_plane].mapping_stride;
else
return fb->normal_view.color_plane[color_plane].stride;
return fb->normal_view.color_plane[color_plane].mapping_stride;
}
static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
@ -814,17 +1237,31 @@ plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
unsigned int pitch_tiles)
{
if (intel_fb_needs_pot_stride_remap(fb)) {
unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8;
/*
* ADL_P, the only platform needing a POT stride has a minimum
* of 8 main surface and 2 CCS AUX stride tiles.
* of 8 main surface tiles.
*/
return roundup_pow_of_two(max(pitch_tiles, min_stride));
return roundup_pow_of_two(max(pitch_tiles, 8u));
} else {
return pitch_tiles;
}
}
static unsigned int
plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
unsigned int tile_width,
unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
{
unsigned int stride_tiles;
if (IS_ALDERLAKE_P(to_i915(fb->base.dev)))
stride_tiles = src_stride_tiles;
else
stride_tiles = dst_stride_tiles;
return stride_tiles * tile_width * fb->base.format->cpp[color_plane];
}
static unsigned int
plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
@ -841,11 +1278,31 @@ plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane,
return DIV_ROUND_UP(y + dims->height, dims->tile_height);
}
static unsigned int
plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
int x, int y)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int size;
size = (y + dims->height) * fb->base.pitches[color_plane] +
x * fb->base.format->cpp[color_plane];
return DIV_ROUND_UP(size, intel_tile_size(i915));
}
#define assign_chk_ovf(i915, var, val) ({ \
drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \
(var) = (val); \
})
#define assign_bfld_chk_ovf(i915, var, val) ({ \
(var) = (val); \
drm_WARN_ON(&(i915)->drm, (var) != (val)); \
(var); \
})
static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
u32 obj_offset, u32 gtt_offset, int x, int y,
@ -860,12 +1317,26 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
struct drm_rect r;
u32 size = 0;
assign_chk_ovf(i915, remap_info->offset, obj_offset);
assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims));
assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x));
assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y));
assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset);
if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) {
remap_info->linear = 1;
assign_chk_ovf(i915, remap_info->size,
plane_view_linear_tiles(fb, color_plane, dims, x, y));
} else {
remap_info->linear = 0;
assign_chk_ovf(i915, remap_info->src_stride,
plane_view_src_stride_tiles(fb, color_plane, dims));
assign_chk_ovf(i915, remap_info->width,
plane_view_width_tiles(fb, color_plane, dims, x));
assign_chk_ovf(i915, remap_info->height,
plane_view_height_tiles(fb, color_plane, dims, y));
}
if (view->gtt.type == I915_GGTT_VIEW_ROTATED) {
drm_WARN_ON(&i915->drm, remap_info->linear);
check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
assign_chk_ovf(i915, remap_info->dst_stride,
@ -881,7 +1352,8 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
color_plane_info->x = r.x1;
color_plane_info->y = r.y1;
color_plane_info->stride = remap_info->dst_stride * tile_height;
color_plane_info->mapping_stride = remap_info->dst_stride * tile_height;
color_plane_info->scanout_stride = color_plane_info->mapping_stride;
size += remap_info->dst_stride * remap_info->width;
@ -900,16 +1372,29 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
gtt_offset = aligned_offset;
}
assign_chk_ovf(i915, remap_info->dst_stride,
plane_view_dst_stride_tiles(fb, color_plane, remap_info->width));
color_plane_info->x = x;
color_plane_info->y = y;
color_plane_info->stride = remap_info->dst_stride * tile_width *
fb->base.format->cpp[color_plane];
if (remap_info->linear) {
color_plane_info->mapping_stride = fb->base.pitches[color_plane];
color_plane_info->scanout_stride = color_plane_info->mapping_stride;
size += remap_info->dst_stride * remap_info->height;
size += remap_info->size;
} else {
unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane,
remap_info->width);
assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
color_plane_info->mapping_stride = dst_stride *
tile_width *
fb->base.format->cpp[color_plane];
color_plane_info->scanout_stride =
plane_view_scanout_stride(fb, color_plane, tile_width,
remap_info->src_stride,
dst_stride);
size += dst_stride * remap_info->height;
}
}
/*
@ -917,10 +1402,16 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
* the x/y offsets. x,y will hold the first pixel of the framebuffer
* plane from the start of the remapped/rotated gtt mapping.
*/
intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y,
tile_width, tile_height,
tile_size, remap_info->dst_stride,
gtt_offset * tile_size, 0);
if (remap_info->linear)
intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y,
fb->base.format->cpp[color_plane],
color_plane_info->mapping_stride,
gtt_offset * tile_size, 0);
else
intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y,
tile_width, tile_height,
tile_size, remap_info->dst_stride,
gtt_offset * tile_size, 0);
return size;
}
@ -933,15 +1424,10 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
const struct fb_plane_view_dims *dims,
int x, int y)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int tiles;
if (is_surface_linear(&fb->base, color_plane)) {
unsigned int size;
size = (y + dims->height) * fb->base.pitches[color_plane] +
x * fb->base.format->cpp[color_plane];
tiles = DIV_ROUND_UP(size, intel_tile_size(i915));
tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y);
} else {
tiles = plane_view_src_stride_tiles(fb, color_plane, dims) *
plane_view_height_tiles(fb, color_plane, dims, y);
@ -1030,7 +1516,9 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
*/
fb->normal_view.color_plane[i].x = x;
fb->normal_view.color_plane[i].y = y;
fb->normal_view.color_plane[i].stride = fb->base.pitches[i];
fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i];
fb->normal_view.color_plane[i].scanout_stride =
fb->normal_view.color_plane[i].mapping_stride;
offset = calc_plane_aligned_offset(fb, i, &x, &y);
@ -1080,7 +1568,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier));
drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@ -1143,7 +1631,7 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
*
* The new CCS hash mode makes remapping impossible
*/
if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) ||
intel_modifier_uses_dpt(dev_priv, modifier))
return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
else if (DISPLAY_VER(dev_priv) >= 7)
@ -1168,27 +1656,19 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* we need the stride to be page aligned.
*/
if (fb->pitches[color_plane] > max_stride &&
!is_ccs_modifier(fb->modifier))
!intel_fb_is_ccs_modifier(fb->modifier))
return intel_tile_size(dev_priv);
else
return 64;
}
tile_width = intel_tile_width_bytes(fb, color_plane);
if (is_ccs_modifier(fb->modifier)) {
/*
* On ADL-P the stride must be either 8 tiles or a stride
* that is aligned to 16 tiles, required by the 16 tiles =
* 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be
* remapped.
*/
if (IS_ALDERLAKE_P(dev_priv))
tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16;
if (intel_fb_is_ccs_modifier(fb->modifier)) {
/*
* On TGL the surface stride must be 4 tile aligned, mapped by
* one 64 byte cacheline on the CCS AUX surface.
*/
else if (DISPLAY_VER(dev_priv) >= 12)
if (DISPLAY_VER(dev_priv) >= 12)
tile_width *= 4;
/*
* Display WA #0531: skl,bxt,kbl,glk
@ -1224,7 +1704,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
return 0;
/* FIXME other color planes? */
stride = plane_state->view.color_plane[0].stride;
stride = plane_state->view.color_plane[0].mapping_stride;
max_stride = plane->max_stride(plane, fb->format->format,
fb->modifier, rotation);
@ -1430,7 +1910,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i);
if (fb->pitches[i] != ccs_aux_stride) {

View File

@ -6,6 +6,7 @@
#ifndef __INTEL_FB_H__
#define __INTEL_FB_H__
#include <linux/bits.h>
#include <linux/types.h>
struct drm_device;
@ -16,12 +17,34 @@ struct drm_i915_private;
struct drm_mode_fb_cmd2;
struct intel_fb_view;
struct intel_framebuffer;
struct intel_plane;
struct intel_plane_state;
bool is_ccs_plane(const struct drm_framebuffer *fb, int plane);
bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane);
bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane);
bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane);
#define INTEL_PLANE_CAP_NONE 0
#define INTEL_PLANE_CAP_CCS_RC BIT(0)
#define INTEL_PLANE_CAP_CCS_RC_CC BIT(1)
#define INTEL_PLANE_CAP_CCS_MC BIT(2)
#define INTEL_PLANE_CAP_TILING_X BIT(3)
#define INTEL_PLANE_CAP_TILING_Y BIT(4)
#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
u8 plane_caps);
bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
const struct drm_format_info *
intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane);

View File

@ -142,13 +142,11 @@ retry:
if (ret)
goto err;
if (!ret) {
vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
view, pinctl);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
view, pinctl);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {

File diff suppressed because it is too large Load Diff

View File

@ -14,16 +14,19 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_fbc;
struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
bool intel_fbc_is_active(struct intel_fbc *fbc);
bool intel_fbc_is_compressing(struct intel_fbc *fbc);
bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_cleanup(struct drm_i915_private *dev_priv);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
@ -33,8 +36,8 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc);
int intel_fbc_reset_underrun(struct intel_fbc *fbc);
int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable);
#endif /* __INTEL_FBC_H__ */

View File

@ -8,7 +8,6 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_sbi.h"
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@ -887,6 +886,43 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
DP_TP_CTL_ENABLE);
}
void hsw_fdi_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
* and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
val &= ~DDI_BUF_CTL_ENABLE;
intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
intel_ddi_disable_clock(encoder);
val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_PCDCLK;
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_PLL_ENABLE;
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@ -1006,104 +1042,6 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
}
static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
lpt_fdi_reset_mphy(dev_priv);
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
static const struct intel_fdi_funcs ilk_funcs = {
.fdi_link_train = ilk_fdi_link_train,
};

View File

@ -23,8 +23,8 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_fdi_disable(struct intel_encoder *encoder);
void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
void lpt_fdi_program_mphy(struct drm_i915_private *i915);
void intel_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);

View File

@ -434,7 +434,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
intel_fbc_handle_fifo_underrun_irq(&dev_priv->fbc);
}
/**

View File

@ -334,6 +334,15 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
algo->data = bus;
}
static bool has_gmbus_irq(struct drm_i915_private *i915)
{
/*
* encoder->shutdown() may want to use GMBUS
* after irqs have already been disabled.
*/
return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
}
static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
{
DEFINE_WAIT(wait);
@ -344,7 +353,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves.
*/
if (!HAS_GMBUS_IRQ(dev_priv))
if (!has_gmbus_irq(dev_priv))
irq_en = 0;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
@ -375,7 +384,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
/* Important: The hw handles only the first bit, so set only one! */
irq_enable = 0;
if (HAS_GMBUS_IRQ(dev_priv))
if (has_gmbus_irq(dev_priv))
irq_enable = GMBUS_IDLE_EN;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);

View File

@ -1800,6 +1800,11 @@ static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
}
static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state)
{
return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420;
}
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
bool has_hdmi_sink)
@ -1864,8 +1869,12 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_OK;
}
static int intel_hdmi_port_clock(int clock, int bpc)
static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
{
/* YCBCR420 TMDS rate requirement is half the pixel clock */
if (ycbcr420_output)
clock /= 2;
/*
* Need to adjust the port link by:
* 1.5x for 12bpc
@ -1874,18 +1883,29 @@ static int intel_hdmi_port_clock(int clock, int bpc)
return clock * bpc / 8;
}
static bool intel_hdmi_bpc_possible(struct drm_connector *connector,
int bpc, bool has_hdmi_sink, bool ycbcr420_output)
static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
{
switch (bpc) {
case 12:
return !HAS_GMCH(i915);
case 10:
return DISPLAY_VER(i915) >= 11;
case 8:
return true;
default:
MISSING_CASE(bpc);
return false;
}
}
static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
int bpc, bool has_hdmi_sink, bool ycbcr420_output)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct drm_display_info *info = &connector->display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
switch (bpc) {
case 12:
if (HAS_GMCH(i915))
return false;
if (!has_hdmi_sink)
return false;
@ -1894,9 +1914,6 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector,
else
return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
case 10:
if (DISPLAY_VER(i915) < 11)
return false;
if (!has_hdmi_sink)
return false;
@ -1916,26 +1933,26 @@ static enum drm_mode_status
intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
bool has_hdmi_sink, bool ycbcr420_output)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum drm_mode_status status;
if (ycbcr420_output)
clock /= 2;
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output),
true, has_hdmi_sink);
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK &&
intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
intel_hdmi_source_bpc_possible(i915, 12) &&
intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output))
status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output),
true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
if (status != MODE_OK &&
intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
intel_hdmi_source_bpc_possible(i915, 10) &&
intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output))
status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output),
true, has_hdmi_sink);
return status;
@ -2000,7 +2017,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
return false;
}
@ -2015,6 +2032,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
if (!intel_hdmi_source_bpc_possible(dev_priv, bpc))
return false;
/*
* HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
@ -2023,7 +2043,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return false;
/* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
if (intel_hdmi_is_ycbcr420(crtc_state) &&
bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
@ -2031,8 +2051,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
return intel_hdmi_deep_color_possible(crtc_state, bpc,
crtc_state->has_hdmi_sink,
crtc_state->output_format ==
INTEL_OUTPUT_FORMAT_YCBCR420);
intel_hdmi_is_ycbcr420(crtc_state));
}
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
@ -2040,12 +2059,13 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
int clock)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state);
int bpc;
for (bpc = 12; bpc >= 10; bpc -= 2) {
if (hdmi_deep_color_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi,
intel_hdmi_port_clock(clock, bpc),
intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output),
true, crtc_state->has_hdmi_sink) == MODE_OK)
return bpc;
}
@ -2065,13 +2085,10 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
/* YCBCR420 TMDS rate requirement is half the pixel clock */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
clock /= 2;
bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc,
intel_hdmi_is_ycbcr420(crtc_state));
/*
* pipe_bpp could already be below 8bpc due to
@ -2141,34 +2158,44 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
static enum intel_output_format
intel_hdmi_output_format(struct intel_connector *connector,
bool ycbcr_420_output)
{
if (connector->base.ycbcr_420_allowed && ycbcr_420_output)
return INTEL_OUTPUT_FORMAT_YCBCR420;
else
return INTEL_OUTPUT_FORMAT_RGB;
}
static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_connector *connector = conn_state->connector;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
int ret;
bool ycbcr_420_only;
ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode);
if (connector->ycbcr_420_allowed && ycbcr_420_only) {
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
} else {
if (!connector->ycbcr_420_allowed && ycbcr_420_only)
drm_dbg_kms(&i915->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only);
if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) {
drm_dbg_kms(&i915->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
}
ret = intel_hdmi_compute_clock(encoder, crtc_state);
if (ret) {
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 &&
connector->ycbcr_420_allowed &&
drm_mode_is_420_also(&connector->display_info, adjusted_mode)) {
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
ret = intel_hdmi_compute_clock(encoder, crtc_state);
}
if (intel_hdmi_is_ycbcr420(crtc_state) ||
!connector->base.ycbcr_420_allowed ||
!drm_mode_is_420_also(info, adjusted_mode))
return ret;
crtc_state->output_format = intel_hdmi_output_format(connector, true);
ret = intel_hdmi_compute_clock(encoder, crtc_state);
}
return ret;
@ -2208,7 +2235,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
if (intel_hdmi_is_ycbcr420(pipe_config)) {
ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;

View File

@ -74,7 +74,7 @@
#include "intel_de.h"
#include "intel_lpe_audio.h"
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL)
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
@ -96,7 +96,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq;
rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
@ -148,7 +148,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
platform_device_unregister(dev_priv->lpe_audio.platdev);
platform_device_unregister(dev_priv->audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@ -167,7 +167,7 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
int irq = dev_priv->lpe_audio.irq;
int irq = dev_priv->audio.lpe.irq;
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
@ -204,15 +204,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
{
int ret;
dev_priv->lpe_audio.irq = irq_alloc_desc(0);
if (dev_priv->lpe_audio.irq < 0) {
dev_priv->audio.lpe.irq = irq_alloc_desc(0);
if (dev_priv->audio.lpe.irq < 0) {
drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
dev_priv->lpe_audio.irq);
ret = dev_priv->lpe_audio.irq;
dev_priv->audio.lpe.irq);
ret = dev_priv->audio.lpe.irq;
goto err;
}
drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq);
ret = lpe_audio_irq_init(dev_priv);
@ -223,10 +223,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
goto err_free_irq;
}
dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv);
dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
if (IS_ERR(dev_priv->lpe_audio.platdev)) {
ret = PTR_ERR(dev_priv->lpe_audio.platdev);
if (IS_ERR(dev_priv->audio.lpe.platdev)) {
ret = PTR_ERR(dev_priv->audio.lpe.platdev);
drm_err(&dev_priv->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
@ -241,10 +241,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
return 0;
err_free_irq:
irq_free_desc(dev_priv->lpe_audio.irq);
irq_free_desc(dev_priv->audio.lpe.irq);
err:
dev_priv->lpe_audio.irq = -1;
dev_priv->lpe_audio.platdev = NULL;
dev_priv->audio.lpe.irq = -1;
dev_priv->audio.lpe.platdev = NULL;
return ret;
}
@ -262,7 +262,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
if (!HAS_LPE_AUDIO(dev_priv))
return;
ret = generic_handle_irq(dev_priv->lpe_audio.irq);
ret = generic_handle_irq(dev_priv->audio.lpe.irq);
if (ret)
drm_err_ratelimited(&dev_priv->drm,
"error handling LPE audio irq: %d\n", ret);
@ -303,10 +303,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
irq_free_desc(dev_priv->audio.lpe.irq);
dev_priv->lpe_audio.irq = -1;
dev_priv->lpe_audio.platdev = NULL;
dev_priv->audio.lpe.irq = -1;
dev_priv->audio.lpe.platdev = NULL;
}
/**
@ -333,7 +333,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (!HAS_LPE_AUDIO(dev_priv))
return;
pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev);
pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
@ -361,7 +361,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
}
if (pdata->notify_audio_lpe)
pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B);
pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}

View File

@ -0,0 +1,501 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include "g4x_dp.h"
#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_lvds.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pps.h"
#include "intel_sdvo.h"
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
enum pipe port_pipe;
bool state;
state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
"PCH DP %c enabled on transcoder %c, should be disabled\n",
port_name(port), pipe_name(pipe));
I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
"IBX PCH DP %c still using transcoder B\n",
port_name(port));
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
enum pipe port_pipe;
bool state;
state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
I915_STATE_WARN(state && port_pipe == pipe,
"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
port_name(port), pipe_name(pipe));
I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
"IBX PCH HDMI %c still using transcoder B\n",
port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
enum pipe port_pipe;
assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
port_pipe == pipe,
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
}
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 val;
bool enabled;
val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(dev_priv, reg);
/*
* Workaround: Set the timing override bit
* before enabling the pch transcoder.
*/
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, reg, val);
}
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
} else {
val |= TRANS_PROGRESSIVE;
}
intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
pipe_name(pipe));
}
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_ENABLE;
intel_de_write(dev_priv, reg, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
intel_de_write(dev_priv, reg, val);
}
}
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
* - FDI training & RX/TX
* - update transcoder timings
* - DP transcoding bits
* - transcoder
*/
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
/* For PCH output, training FDI link */
intel_fdi_link_train(crtc, crtc_state);
/*
* We need to program the right clock selection
* before writing the pixel multiplier into the DPLL.
*/
if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (crtc_state->shared_dpll ==
intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
/*
* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that
* to have the right LVDS enable sequence.
*/
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
assert_pps_unlocked(dev_priv, pipe);
ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev_priv) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
temp |= TRANS_DP_OUTPUT_ENABLE;
temp |= bpc << 9; /* same format but at 11:9 */
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
intel_de_write(dev_priv, reg, temp);
}
ilk_enable_pch_transcoder(crtc_state);
}
void ilk_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
ilk_fdi_disable(crtc);
}
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
ilk_disable_pch_transcoder(crtc);
if (HAS_PCH_CPT(dev_priv)) {
i915_reg_t reg;
u32 temp;
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
intel_de_write(dev_priv, reg, temp);
/* disable DPLL_SEL */
temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
ilk_fdi_pll_disable(crtc);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
i9xx_crtc_clock_get(crtc, crtc_state);
/*
* In case there is an active pipe without active ports,
* we may need some idea for the dotclock anyway.
* Calculate one based on the FDI configuration.
*/
crtc_state->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state),
&crtc_state->fdi_m_n);
}
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
bool pll_active;
u32 tmp;
if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
ilk_get_fdi_m_n_config(crtc, crtc_state);
if (HAS_PCH_IBX(dev_priv)) {
/*
* The pipe->pch transcoder and pch transcoder->pll
* mapping is fixed.
*/
pll_id = (enum intel_dpll_id) pipe;
} else {
tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(pipe))
pll_id = DPLL_ID_PCH_PLL_B;
else
pll_id = DPLL_ID_PCH_PLL_A;
}
crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
pll = crtc_state->shared_dpll;
pll_active = intel_dpll_get_hw_state(dev_priv, pll,
&crtc_state->dpll_hw_state);
drm_WARN_ON(&dev_priv->drm, !pll_active);
tmp = crtc_state->dpll_hw_state.dpll;
crtc_state->pixel_multiplier =
((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
>> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
ilk_pch_clock_get(crtc_state);
}
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
u32 val, pipeconf_val;
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
PIPECONF_INTERLACED_ILK)
val |= TRANS_INTERLACED;
else
val |= TRANS_PROGRESSIVE;
intel_de_write(dev_priv, LPT_TRANSCONF, val);
if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
}
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
val = intel_de_read(dev_priv, LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
intel_de_write(dev_priv, LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
}
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, PIPE_A);
lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
lpt_disable_pch_transcoder(dev_priv);
lpt_disable_iclkip(dev_priv);
}
void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
ilk_get_fdi_m_n_config(crtc, crtc_state);
crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _INTEL_PCH_DISPLAY_H_
#define _INTEL_PCH_DISPLAY_H_
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void ilk_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void ilk_pch_get_config(struct intel_crtc_state *crtc_state);
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void lpt_pch_get_config(struct intel_crtc_state *crtc_state);
#endif

View File

@ -0,0 +1,648 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
lpt_fdi_reset_mphy(dev_priv);
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
mutex_lock(&dev_priv->sb_lock);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
mutex_unlock(&dev_priv->sb_lock);
}
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
lpt_disable_iclkip(dev_priv);
/* The iCLK virtual clock root frequency is in MHz,
* but the adjusted_mode->crtc_clock in KHz. To get the
* divisors, it is necessary to divide one by another, so we
* convert the virtual clock precision to KHz here for higher
* precision.
*/
for (auxdiv = 0; auxdiv < 2; auxdiv++) {
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
u32 desired_divisor;
desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
clock << auxdiv);
divsel = (desired_divisor / iclk_pi_range) - 2;
phaseinc = desired_divisor % iclk_pi_range;
/*
* Near 20MHz is a corner case which is
* out of range for the 7-bit divisor
*/
if (divsel <= 0x7f)
break;
}
/* This should not happen with any sane values */
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
drm_dbg_kms(&dev_priv->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, auxdiv, divsel, phasedir, phaseinc);
mutex_lock(&dev_priv->sb_lock);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
mutex_unlock(&dev_priv->sb_lock);
/* Wait for initialization time */
udelay(24);
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
{
u32 divsel, phaseinc, auxdiv;
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
u32 desired_divisor;
u32 temp;
if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
mutex_lock(&dev_priv->sb_lock);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
mutex_unlock(&dev_priv->sb_lock);
return 0;
}
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
mutex_unlock(&dev_priv->sb_lock);
desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
desired_divisor << auxdiv);
}
/* Implements 3 different sequences from BSpec chapter "Display iCLK
* Programming" based on the parameters passed:
* - Sequence to enable CLKOUT_DP
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
bool with_spread, bool with_fdi)
{
u32 reg, tmp;
if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
"FDI requires downspread\n"))
with_spread = true;
if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(24);
if (with_spread) {
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
lpt_fdi_program_mphy(dev_priv);
}
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
mutex_unlock(&dev_priv->sb_lock);
}
/* Sequence to disable CLKOUT_DP */
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
u32 reg, tmp;
mutex_lock(&dev_priv->sb_lock);
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
if (!(tmp & SBI_SSCCTL_DISABLE)) {
if (!(tmp & SBI_SSCCTL_PATHALT)) {
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
udelay(32);
}
tmp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
mutex_unlock(&dev_priv->sb_lock);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
static const u16 sscdivintphase[] = {
[BEND_IDX( 50)] = 0x3B23,
[BEND_IDX( 45)] = 0x3B23,
[BEND_IDX( 40)] = 0x3C23,
[BEND_IDX( 35)] = 0x3C23,
[BEND_IDX( 30)] = 0x3D23,
[BEND_IDX( 25)] = 0x3D23,
[BEND_IDX( 20)] = 0x3E23,
[BEND_IDX( 15)] = 0x3E23,
[BEND_IDX( 10)] = 0x3F23,
[BEND_IDX( 5)] = 0x3F23,
[BEND_IDX( 0)] = 0x0025,
[BEND_IDX( -5)] = 0x0025,
[BEND_IDX(-10)] = 0x0125,
[BEND_IDX(-15)] = 0x0125,
[BEND_IDX(-20)] = 0x0225,
[BEND_IDX(-25)] = 0x0225,
[BEND_IDX(-30)] = 0x0325,
[BEND_IDX(-35)] = 0x0325,
[BEND_IDX(-40)] = 0x0425,
[BEND_IDX(-45)] = 0x0425,
[BEND_IDX(-50)] = 0x0525,
};
/*
* Bend CLKOUT_DP
* steps -50 to 50 inclusive, in steps of 5
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
{
u32 tmp;
int idx = BEND_IDX(steps);
if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
return;
if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
mutex_lock(&dev_priv->sb_lock);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
else
tmp = 0x00000000;
intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
tmp &= 0xffff0000;
tmp |= sscdivintphase[idx];
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
mutex_unlock(&dev_priv->sb_lock);
}
#undef BEND_IDX
static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
{
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
if (IS_BROADWELL(dev_priv) &&
(ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
return true;
return false;
}
static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
return false;
}
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool has_fdi = false;
for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_fdi = true;
break;
default:
break;
}
}
/*
* The BIOS may have decided to use the PCH SSC
* reference so we must not disable it until the
* relevant PLLs have stopped relying on it. We'll
* just leave the PCH SSC reference enabled in case
* any active PLL is using it. It will get disabled
* after runtime suspend if we don't have FDI.
*
* TODO: Move the whole reference clock handling
* to the modeset sequence proper so that we can
* actually enable/disable/reconfigure these things
* safely. To do that we need to introduce a real
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
dev_priv->pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
if (dev_priv->pch_ssc_use)
return;
if (has_fdi) {
lpt_bend_clkout_dp(dev_priv, 0);
lpt_enable_clkout_dp(dev_priv, true, true);
} else {
lpt_disable_clkout_dp(dev_priv);
}
}
static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
int i;
u32 val, final;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
bool using_ssc_source = false;
/* We need to take the global config into account */
for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
has_lvds = true;
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
if (encoder->port == PORT_A)
has_cpu_edp = true;
break;
default:
break;
}
}
if (HAS_PCH_IBX(dev_priv)) {
has_ck505 = dev_priv->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
can_ssc = true;
}
/* Check if any DPLLs are using the SSC source */
for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
continue;
if ((temp & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
using_ssc_source = true;
break;
}
}
drm_dbg_kms(&dev_priv->drm,
"has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
/* As we must carefully and slowly disable/enable each source in turn,
* compute the final state we want first and check if we need to
* make any changes at all.
*/
final = val;
final &= ~DREF_NONSPREAD_SOURCE_MASK;
if (has_ck505)
final |= DREF_NONSPREAD_CK505_ENABLE;
else
final |= DREF_NONSPREAD_SOURCE_ENABLE;
final &= ~DREF_SSC_SOURCE_MASK;
final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
final &= ~DREF_SSC1_ENABLE;
if (has_panel) {
final |= DREF_SSC_SOURCE_ENABLE;
if (intel_panel_use_ssc(dev_priv) && can_ssc)
final |= DREF_SSC1_ENABLE;
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc)
final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
else
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else {
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
}
} else if (using_ssc_source) {
final |= DREF_SSC_SOURCE_ENABLE;
final |= DREF_SSC1_ENABLE;
}
if (final == val)
return;
/* Always enable nonspread source */
val &= ~DREF_NONSPREAD_SOURCE_MASK;
if (has_ck505)
val |= DREF_NONSPREAD_CK505_ENABLE;
else
val |= DREF_NONSPREAD_SOURCE_ENABLE;
if (has_panel) {
val &= ~DREF_SSC_SOURCE_MASK;
val |= DREF_SSC_SOURCE_ENABLE;
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
val &= ~DREF_SSC1_ENABLE;
}
/* Get SSC going before enabling the outputs */
intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
drm_dbg_kms(&dev_priv->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else {
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
}
} else {
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
}
intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
} else {
drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Turn off CPU output */
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
if (!using_ssc_source) {
drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
val |= DREF_SSC_SOURCE_DISABLE;
/* Turn off SSC1 */
val &= ~DREF_SSC1_ENABLE;
intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
}
}
BUG_ON(val != final);
}
/*
* Initialize reference clocks when the driver loads
*/
void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
ilk_init_pch_refclk(dev_priv);
else if (HAS_PCH_LPT(dev_priv))
lpt_init_pch_refclk(dev_priv);
}

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _INTEL_PCH_REFCLK_H_
#define _INTEL_PCH_REFCLK_H_
#include <linux/types.h>
struct drm_i915_private;
struct intel_crtc_state;
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
#endif

View File

@ -28,13 +28,13 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "skl_universal_plane.h"
/**
@ -588,7 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
if (DISPLAY_VER(dev_priv) >= 12)
if (IS_ALDERLAKE_P(dev_priv))
return trans == TRANSCODER_A || trans == TRANSCODER_B;
else if (DISPLAY_VER(dev_priv) >= 12)
return trans == TRANSCODER_A;
else
return trans == TRANSCODER_EDP;
@ -1346,6 +1348,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp))
@ -1358,6 +1361,9 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
/* If we ever hit this, we will need to add refcount to pause/resume */
drm_WARN_ON(&dev_priv->drm, psr->paused);
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
psr->paused = true;
@ -1463,10 +1469,19 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
/* TODO: consider auxiliary surfaces */
x = plane_state->uapi.src.x1 >> 16;
y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
x = plane_state->view.color_plane[color_plane].x;
/*
* From Bspec: UV surface Start Y Position = half of Y plane Y
* start position.
*/
if (!color_plane)
y = plane_state->view.color_plane[color_plane].y + clip->y1;
else
y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
@ -1558,9 +1573,6 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
* also planes are not updated if they have a negative X
* position so for now doing a full update in this cases
*
* TODO: We are missing multi-planar formats handling, until it is
* implemented it will send full frame updates.
*
* Plane scaling and rotation is not supported by selective fetch and both
* properties can change without a modeset, so need to be check at every
* atomic commmit.
@ -1570,7 +1582,6 @@ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state
if (plane_state->uapi.dst.y1 < 0 ||
plane_state->uapi.dst.x1 < 0 ||
plane_state->scaler_id >= 0 ||
plane_state->hw.fb->format->num_planes > 1 ||
plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
return false;
@ -1696,6 +1707,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct drm_rect *sel_fetch_area, inter;
struct intel_plane *linked = new_plane_state->planar_linked_plane;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
!new_plane_state->uapi.visible)
@ -1714,6 +1726,24 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
crtc_state->update_planes |= BIT(plane->id);
/*
* Sel_fetch_area is calculated for UV plane. Use
* same area for Y plane as well.
*/
if (linked) {
struct intel_plane_state *linked_new_plane_state;
struct drm_rect *linked_sel_fetch_area;
linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
if (IS_ERR(linked_new_plane_state))
return PTR_ERR(linked_new_plane_state);
linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
linked_sel_fetch_area->y1 = sel_fetch_area->y1;
linked_sel_fetch_area->y2 = sel_fetch_area->y2;
crtc_state->update_planes |= BIT(linked->id);
}
}
skip_sel_fetch_set_loop:
@ -1721,11 +1751,17 @@ skip_sel_fetch_set_loop:
return 0;
}
static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
if (!HAS_PSR(i915))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -1740,6 +1776,7 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
* - All planes will go inactive
* - Changing between PSR versions
*/
needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
needs_to_disable |= !crtc_state->has_psr;
needs_to_disable |= !crtc_state->active_planes;
needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
@ -1751,20 +1788,6 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
}
}
void intel_psr_pre_plane_update(const struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i;
if (!HAS_PSR(dev_priv))
return;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
_intel_psr_pre_plane_update(state, crtc_state);
}
static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
@ -1809,15 +1832,21 @@ void intel_psr_post_plane_update(const struct intel_atomic_state *state)
_intel_psr_post_plane_update(state, crtc_state);
}
/**
* psr_wait_for_idle - wait for PSR1 to idle
* @intel_dp: Intel DP
* @out_value: PSR status in case of failure
*
* Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
*
*/
static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/*
* Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
return intel_de_wait_for_clear(dev_priv,
EDP_PSR2_STATUS(intel_dp->psr.transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
}
static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -1827,15 +1856,13 @@ static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
return __intel_wait_for_register(&dev_priv->uncore,
EDP_PSR_STATUS(intel_dp->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
return intel_de_wait_for_clear(dev_priv,
EDP_PSR_STATUS(intel_dp->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK, 50);
}
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* intel_psr_wait_for_idle - wait for PSR be ready for a pipe update
* @new_crtc_state: new CRTC state
*
* This function is expected to be called from pipe_update_start() where it is
@ -1852,19 +1879,23 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_status;
int ret;
mutex_lock(&intel_dp->psr.lock);
if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
if (!intel_dp->psr.enabled) {
mutex_unlock(&intel_dp->psr.lock);
continue;
}
/* when the PSR1 is enabled */
if (psr_wait_for_idle(intel_dp, &psr_status))
drm_err(&dev_priv->drm,
"PSR idle timed out 0x%x, atomic update may fail\n",
psr_status);
if (intel_dp->psr.psr2_enabled)
ret = _psr2_ready_for_pipe_update_locked(intel_dp);
else
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
if (ret)
drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
mutex_unlock(&intel_dp->psr.lock);
}
}

View File

@ -20,7 +20,8 @@ struct intel_plane;
struct intel_encoder;
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(const struct intel_atomic_state *state);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr_post_plane_update(const struct intel_atomic_state *state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);

View File

@ -58,7 +58,6 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int level = intel_ddi_level(encoder, crtc_state, 0);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
@ -66,6 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
return;
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
u32 val = 0;
val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);

View File

@ -45,6 +45,7 @@
#include "intel_atomic_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
#include "i9xx_plane.h"
@ -118,7 +119,7 @@ static void i9xx_plane_linear_gamma(u16 gamma[8])
}
static void
chv_update_csc(const struct intel_plane_state *plane_state)
chv_sprite_update_csc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -190,7 +191,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
#define COS_0 1
static void
vlv_update_clrc(const struct intel_plane_state *plane_state)
vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -393,7 +394,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
static void vlv_update_gamma(const struct intel_plane_state *plane_state)
static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -417,45 +418,58 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state)
}
static void
vlv_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
vlv_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
unsigned long irqflags;
u32 sprctl;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
/* Sizes are 0 based */
crtc_w--;
crtc_h--;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
plane_state->view.color_plane[0].stride);
plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
(crtc_h << 16) | crtc_w);
intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
vlv_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 sprctl, linear_offset;
unsigned long irqflags;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
chv_sprite_update_csc(plane_state);
if (key->flags) {
intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
@ -466,6 +480,8 @@ vlv_update_plane(struct intel_plane *plane,
key->max_value);
}
intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x);
@ -478,15 +494,15 @@ vlv_update_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
vlv_update_gamma(plane_state);
vlv_sprite_update_clrc(plane_state);
vlv_sprite_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
vlv_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
vlv_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@ -502,8 +518,8 @@ vlv_disable_plane(struct intel_plane *plane,
}
static bool
vlv_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
vlv_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@ -805,7 +821,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
i++;
}
static void ivb_update_gamma(const struct intel_plane_state *plane_state)
static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -835,28 +851,21 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
}
static void
ivb_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
ivb_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 sprctl, sprscale = 0;
u32 sprscale = 0;
unsigned long irqflags;
sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
/* Sizes are 0 based */
src_w--;
src_h--;
@ -866,17 +875,38 @@ ivb_update_plane(struct intel_plane *plane,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
plane_state->view.color_plane[0].stride);
plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (IS_IVYBRIDGE(dev_priv))
intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
ivb_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 sprctl, linear_offset;
unsigned long irqflags;
sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (key->flags) {
intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
@ -902,14 +932,14 @@ ivb_update_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_update_gamma(plane_state);
ivb_sprite_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
ivb_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
ivb_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@ -927,8 +957,8 @@ ivb_disable_plane(struct intel_plane *plane,
}
static bool
ivb_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
ivb_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@ -1106,7 +1136,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return dvscntr;
}
static void g4x_update_gamma(const struct intel_plane_state *plane_state)
static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -1136,7 +1166,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
gamma[i] = (i << 10) / 16;
}
static void ilk_update_gamma(const struct intel_plane_state *plane_state)
static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@ -1163,28 +1193,21 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state)
}
static void
g4x_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
g4x_sprite_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 dvscntr, dvsscale = 0;
u32 dvsscale = 0;
unsigned long irqflags;
dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
/* Sizes are 0 based */
src_w--;
src_h--;
@ -1194,16 +1217,37 @@ g4x_update_plane(struct intel_plane *plane,
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
plane_state->view.color_plane[0].stride);
plane_state->view.color_plane[0].mapping_stride);
intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
g4x_sprite_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
u32 dvscntr, linear_offset;
unsigned long irqflags;
dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (key->flags) {
intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
@ -1224,16 +1268,16 @@ g4x_update_plane(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
g4x_update_gamma(plane_state);
g4x_sprite_update_gamma(plane_state);
else
ilk_update_gamma(plane_state);
ilk_sprite_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
g4x_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
g4x_sprite_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@ -1250,8 +1294,8 @@ g4x_disable_plane(struct intel_plane *plane,
}
static bool
g4x_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
g4x_sprite_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
@ -1299,7 +1343,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
unsigned int stride = plane_state->view.color_plane[0].stride;
unsigned int stride = plane_state->view.color_plane[0].mapping_stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
@ -1567,7 +1611,7 @@ out:
return ret;
}
static const u32 g4x_plane_formats[] = {
static const u32 g4x_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@ -1575,13 +1619,7 @@ static const u32 g4x_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const u64 i9xx_plane_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const u32 snb_plane_formats[] = {
static const u32 snb_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
@ -1594,7 +1632,7 @@ static const u32 snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
static const u32 vlv_plane_formats[] = {
static const u32 vlv_sprite_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@ -1629,13 +1667,8 @@ static const u32 chv_pipe_b_sprite_formats[] = {
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
@ -1655,13 +1688,8 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
@ -1686,13 +1714,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
return false;
}
switch (format) {
case DRM_FORMAT_C8:
@ -1762,9 +1785,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
return plane;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
plane->update_plane = vlv_update_plane;
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->update_noarm = vlv_sprite_update_noarm;
plane->update_arm = vlv_sprite_update_arm;
plane->disable_arm = vlv_sprite_disable_arm;
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
plane->max_stride = i965_plane_max_stride;
plane->min_cdclk = vlv_plane_min_cdclk;
@ -1773,16 +1797,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
formats = chv_pipe_b_sprite_formats;
num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
} else {
formats = vlv_plane_formats;
num_formats = ARRAY_SIZE(vlv_plane_formats);
formats = vlv_sprite_formats;
num_formats = ARRAY_SIZE(vlv_sprite_formats);
}
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
} else if (DISPLAY_VER(dev_priv) >= 7) {
plane->update_plane = ivb_update_plane;
plane->disable_plane = ivb_disable_plane;
plane->get_hw_state = ivb_plane_get_hw_state;
plane->update_noarm = ivb_sprite_update_noarm;
plane->update_arm = ivb_sprite_update_arm;
plane->disable_arm = ivb_sprite_disable_arm;
plane->get_hw_state = ivb_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
@ -1793,28 +1817,27 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->min_cdclk = ivb_sprite_min_cdclk;
}
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
formats = snb_sprite_formats;
num_formats = ARRAY_SIZE(snb_sprite_formats);
plane_funcs = &snb_sprite_funcs;
} else {
plane->update_plane = g4x_update_plane;
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->update_noarm = g4x_sprite_update_noarm;
plane->update_arm = g4x_sprite_update_arm;
plane->disable_arm = g4x_sprite_disable_arm;
plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
plane->max_stride = g4x_sprite_max_stride;
plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
if (IS_SANDYBRIDGE(dev_priv)) {
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
formats = snb_sprite_formats;
num_formats = ARRAY_SIZE(snb_sprite_formats);
plane_funcs = &snb_sprite_funcs;
} else {
formats = g4x_plane_formats;
num_formats = ARRAY_SIZE(g4x_plane_formats);
formats = g4x_sprite_formats;
num_formats = ARRAY_SIZE(g4x_sprite_formats);
plane_funcs = &g4x_sprite_funcs;
}
@ -1833,11 +1856,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
kfree(modifiers);
if (ret)
goto fail;

View File

@ -27,14 +27,10 @@ struct intel_plane_state;
#define VBLANK_EVASION_TIME_US 100
#endif
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);

View File

@ -442,10 +442,10 @@ calculate_rc_params(struct rc_parameters *rc,
}
}
int intel_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
const struct rc_parameters *rc_params;
@ -598,7 +598,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
pps_val);
@ -622,7 +622,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
/* Populate PICTURE_PARAMETER_SET_1 registers */
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
pps_val);
@ -647,7 +647,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
pps_val);
@ -672,7 +672,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
pps_val);
@ -697,7 +697,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
pps_val);
@ -722,7 +722,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
pps_val);
@ -749,7 +749,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
pps_val);
@ -774,7 +774,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
pps_val);
@ -799,7 +799,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
pps_val);
@ -824,7 +824,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
pps_val);
@ -851,7 +851,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
pps_val);
@ -879,7 +879,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_width) |
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
pps_val);
@ -906,8 +906,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
@ -963,8 +963,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
@ -1055,8 +1055,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
}
}
static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@ -1064,6 +1064,9 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
struct drm_dsc_picture_parameter_set pps;
enum port port;
if (!crtc_state->dsc.compression_enable)
return;
drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
for_each_dsi_port(port, intel_dsi->ports) {
@ -1074,14 +1077,16 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
}
}
static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
if (!crtc_state->dsc.compression_enable)
return;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
@ -1142,8 +1147,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
}
}
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -1155,13 +1159,6 @@ void intel_dsc_enable(struct intel_encoder *encoder,
intel_dsc_pps_configure(crtc_state);
if (!crtc_state->bigjoiner_slave) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
intel_dsc_dsi_pps_write(encoder, crtc_state);
else
intel_dsc_dp_pps_write(encoder, crtc_state);
}
dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
if (crtc_state->dsc.dsc_split) {
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;

View File

@ -15,15 +15,17 @@ struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */

View File

@ -60,7 +60,7 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
* Between those two points the vblank exit starts (and hence registers get
* latched) ASAP after a push is sent.
*
* framestart_delay is programmable 0-3.
* framestart_delay is programmable 1-4.
*/
static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
{
@ -138,13 +138,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
i915->window2_delay;
else
/*
* FIXME: s/4/framestart_delay+1/ to get consistent
* FIXME: s/4/framestart_delay/ to get consistent
* earliest/latest points for register latching regardless
* of the framestart_delay used?
*
* FIXME: this really needs the extra scanline to provide consistent
* behaviour for all framestart_delay values. Otherwise with
* framestart_delay==3 we will end up extending the min vblank by
* framestart_delay==4 we will end up extending the min vblank by
* one extra line.
*/
crtc_state->vrr.pipeline_full =
@ -193,6 +193,18 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
TRANS_PUSH_EN | TRANS_PUSH_SEND);
}
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->vrr.enable)
return false;
return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND;
}
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);

View File

@ -23,6 +23,7 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
void intel_vrr_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
void intel_vrr_get_config(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);

View File

@ -4,6 +4,7 @@
*/
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"

View File

@ -163,50 +163,6 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_XVYU16161616,
};
static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const u64 skl_plane_format_modifiers_ccs[] = {
I915_FORMAT_MOD_Yf_TILED_CCS,
I915_FORMAT_MOD_Y_TILED_CCS,
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const u64 adlp_step_a_plane_format_modifiers[] = {
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
{
switch (format) {
@ -633,7 +589,7 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
u32 stride = plane_state->view.color_plane[color_plane].stride;
u32 stride = plane_state->view.color_plane[color_plane].scanout_stride;
if (color_plane >= fb->format->num_planes)
return 0;
@ -642,8 +598,8 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
}
static void
skl_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
skl_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@ -985,6 +941,9 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
if (plane_state->force_black)
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
return plane_color_ctl;
}
@ -1008,115 +967,145 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
static void intel_load_plane_csc_black(struct intel_plane *intel_plane)
static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
enum pipe pipe = intel_plane->pipe;
enum plane_id plane = intel_plane->id;
u16 postoff = 0;
u32 plane_surf;
drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n",
intel_plane->base.name, plane);
intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0);
intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0);
plane_surf = intel_plane_ggtt_offset(plane_state) +
skl_surf_address(plane_state, color_plane);
intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0);
intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0);
if (plane_state->decrypt)
plane_surf |= PLANE_SURF_DECRYPT;
intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0);
intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0);
return plane_surf;
}
intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
static void icl_plane_csc_load_black(struct intel_plane *plane)
{
struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff);
intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff);
intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff);
intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane)
skl_program_plane_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = skl_surf_address(plane_state, color_plane);
u32 stride = skl_plane_stride(plane_state, color_plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_plane = skl_main_to_aux_plane(fb, color_plane);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
u32 x = plane_state->view.color_plane[color_plane].x;
u32 y = plane_state->view.color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0, aux_dist = 0;
unsigned long irqflags;
u32 keymsk, keymax, plane_surf;
u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
if (DISPLAY_VER(dev_priv) >= 10)
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
/* Sizes are 0 based */
src_w--;
src_h--;
keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
keymsk = key->channel_mask & 0x7ffffff;
if (alpha < 0xff)
keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
crtc_x = 0;
crtc_y = 0;
}
if (aux_plane) {
aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr;
if (DISPLAY_VER(dev_priv) < 12)
aux_dist |= skl_plane_stride(plane_state, aux_plane);
}
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/*
* FIXME: pxp session invalidation can hit any time even at time of commit
* or after the commit, display content will be garbage.
*/
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
(src_h << 16) | src_w);
intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
if (icl_is_hdr_plane(dev_priv, plane_id))
intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
plane_state->cus_ctl);
if (DISPLAY_VER(dev_priv) >= 10)
intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
plane_color_ctl);
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier))
intel_uncore_write64_fw(&dev_priv->uncore,
PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
skl_write_plane_wm(plane, crtc_state);
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
skl_program_plane_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
const struct drm_framebuffer *fb = plane_state->hw.fb;
int aux_plane = skl_main_to_aux_plane(fb, color_plane);
u32 x = plane_state->view.color_plane[color_plane].x;
u32 y = plane_state->view.color_plane[color_plane].y;
u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0;
u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_ctl = plane_state->ctl;
unsigned long irqflags;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
if (DISPLAY_VER(dev_priv) >= 10)
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
keymsk = key->channel_mask & 0x7ffffff;
if (alpha < 0xff)
keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
if (aux_plane) {
aux_dist = skl_surf_address(plane_state, aux_plane) -
skl_surf_address(plane_state, color_plane);
if (DISPLAY_VER(dev_priv) < 12)
aux_dist |= skl_plane_stride(plane_state, aux_plane);
}
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
key->min_value);
intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
@ -1125,17 +1114,22 @@ skl_program_plane(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
(y << 16) | x);
intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
if (DISPLAY_VER(dev_priv) < 11)
intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
(plane_state->view.color_plane[1].y << 16) |
plane_state->view.color_plane[1].x);
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
if (DISPLAY_VER(dev_priv) >= 10)
intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
/*
* Enable the scaler before the plane so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*
* TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
@ -1146,23 +1140,8 @@ skl_program_plane(struct intel_plane *plane,
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr;
plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
/*
* FIXME: pxp session invalidation can hit any time even at time of commit
* or after the commit, display content will be garbage.
*/
if (plane_state->decrypt) {
plane_surf |= PLANE_SURF_DECRYPT;
} else if (plane_state->force_black) {
intel_load_plane_csc_black(plane);
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
}
intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
plane_color_ctl);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
skl_plane_surf(plane_state, color_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@ -1177,7 +1156,6 @@ skl_plane_async_flip(struct intel_plane *plane,
unsigned long irqflags;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 surf_addr = plane_state->view.color_plane[0].offset;
u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@ -1189,15 +1167,15 @@ skl_plane_async_flip(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
skl_plane_surf(plane_state, 0));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
skl_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
int color_plane = 0;
@ -1205,7 +1183,21 @@ skl_update_plane(struct intel_plane *plane,
/* Program the UV plane on planar master */
color_plane = 1;
skl_program_plane(plane, crtc_state, plane_state, color_plane);
skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane);
}
static void
skl_plane_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
int color_plane = 0;
if (plane_state->planar_linked_plane && !plane_state->planar_slave)
/* Program the UV plane on planar master */
color_plane = 1;
skl_program_plane_arm(plane, crtc_state, plane_state, color_plane);
}
static bool intel_format_is_p01x(u32 format)
@ -1232,7 +1224,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
return 0;
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
is_ccs_modifier(fb->modifier)) {
intel_fb_is_ccs_modifier(fb->modifier)) {
drm_dbg_kms(&dev_priv->drm,
"RC support only with 0/180 degree rotation (%x)\n",
rotation);
@ -1284,13 +1276,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
/* Y-tiling is not supported in IF-ID Interlace mode */
if (crtc_state->hw.enable &&
crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
(fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
fb->modifier != DRM_FORMAT_MOD_LINEAR &&
fb->modifier != I915_FORMAT_MOD_X_TILED) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
@ -1487,7 +1474,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) {
while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
@ -1536,7 +1523,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
*/
if (is_ccs_modifier(fb->modifier)) {
if (intel_fb_is_ccs_modifier(fb->modifier)) {
while (!skl_check_main_ccs_coordinates(plane_state, x, y,
offset, aux_plane)) {
if (offset == 0)
@ -1600,7 +1587,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
offset = intel_plane_compute_aligned_offset(&x, &y,
plane_state, uv_plane);
if (is_ccs_modifier(fb->modifier)) {
if (intel_fb_is_ccs_modifier(fb->modifier)) {
int ccs_plane = main_to_ccs_plane(fb, uv_plane);
u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
u32 alignment = intel_surf_alignment(fb, uv_plane);
@ -1656,8 +1643,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int hsub, vsub;
int x, y;
if (!is_ccs_plane(fb, ccs_plane) ||
is_gen12_ccs_cc_plane(fb, ccs_plane))
if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
@ -1699,7 +1685,7 @@ static int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since the main surface setup depends on
* it.
*/
if (is_ccs_modifier(fb->modifier)) {
if (intel_fb_is_ccs_modifier(fb->modifier)) {
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
@ -1737,6 +1723,18 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
}
}
static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
}
static bool pxp_is_borked(struct drm_i915_gem_object *obj)
{
return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
}
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
@ -1781,6 +1779,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
if (DISPLAY_VER(dev_priv) >= 11) {
plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
}
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->hw.alpha >> 8))
plane_state->uapi.visible = false;
@ -1870,49 +1873,20 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
}
}
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
if (plane_id == PLANE_CURSOR)
return false;
if (DISPLAY_VER(dev_priv) >= 11)
return true;
if (IS_GEMINILAKE(dev_priv))
return pipe != PIPE_C;
return pipe != PIPE_C &&
(plane_id == PLANE_PRIMARY ||
plane_id == PLANE_SPRITE0);
}
static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct intel_plane *plane = to_intel_plane(_plane);
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
break;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
if (!plane->has_ccs)
return false;
break;
default:
if (!intel_fb_plane_supports_modifier(plane, modifier))
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
if (is_ccs_modifier(modifier))
if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_RGB565:
@ -1953,52 +1927,20 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
/* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0))
return false;
/* Wa_22011186057 */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
return false;
return plane_id < PLANE_SPRITE4;
}
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct drm_i915_private *dev_priv = to_i915(_plane->dev);
struct intel_plane *plane = to_intel_plane(_plane);
switch (modifier) {
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
return false;
fallthrough;
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
break;
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
/* Wa_22011186057 */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
return false;
break;
default:
if (!intel_fb_plane_supports_modifier(plane, modifier))
return false;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
if (is_ccs_modifier(modifier))
if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_YUYV:
@ -2010,7 +1952,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
if (intel_fb_is_mc_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_RGB565:
@ -2039,18 +1981,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
}
}
static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
/* Wa_22011186057 */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
return adlp_step_a_plane_format_modifiers;
else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
return gen12_plane_format_modifiers_mc_ccs;
else
return gen12_plane_format_modifiers_rc_ccs;
}
static const struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@ -2091,6 +2021,64 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
spin_unlock_irq(&i915->irq_lock);
}
static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
enum pipe pipe, enum plane_id plane_id)
{
/* Wa_22011186057 */
if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
if (DISPLAY_VER(i915) >= 11)
return true;
if (IS_GEMINILAKE(i915))
return pipe != PIPE_C;
return pipe != PIPE_C &&
(plane_id == PLANE_PRIMARY ||
plane_id == PLANE_SPRITE0);
}
static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
enum plane_id plane_id)
{
if (DISPLAY_VER(i915) < 12)
return false;
/* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
return false;
/* Wa_22011186057 */
if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
return plane_id < PLANE_SPRITE4;
}
static u8 skl_get_plane_caps(struct drm_i915_private *i915,
enum pipe pipe, enum plane_id plane_id)
{
u8 caps = INTEL_PLANE_CAP_TILING_X;
if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
caps |= INTEL_PLANE_CAP_TILING_Y;
if (DISPLAY_VER(i915) < 12)
caps |= INTEL_PLANE_CAP_TILING_Yf;
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
caps |= INTEL_PLANE_CAP_CCS_RC_CC;
}
if (gen12_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
return caps;
}
struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
@ -2113,12 +2101,10 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->id = plane_id;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
if (plane->has_fbc) {
struct intel_fbc *fbc = &dev_priv->fbc;
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
if (skl_plane_has_fbc(dev_priv, pipe, plane_id))
plane->fbc = &dev_priv->fbc;
if (plane->fbc)
plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
if (DISPLAY_VER(dev_priv) >= 11) {
plane->min_width = icl_plane_min_width;
@ -2136,8 +2122,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
}
plane->max_stride = skl_plane_max_stride;
plane->update_plane = skl_update_plane;
plane->disable_plane = skl_disable_plane;
plane->update_noarm = skl_plane_update_noarm;
plane->update_arm = skl_plane_update_arm;
plane->disable_arm = skl_plane_disable_arm;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
@ -2159,29 +2146,28 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (DISPLAY_VER(dev_priv) >= 12) {
modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
if (DISPLAY_VER(dev_priv) >= 12)
plane_funcs = &gen12_plane_funcs;
} else {
if (plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
modifiers = skl_plane_format_modifiers_noccs;
else
plane_funcs = &skl_plane_funcs;
}
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
modifiers = intel_fb_plane_get_modifiers(dev_priv,
skl_get_plane_caps(dev_priv, pipe, plane_id));
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
pipe_name(pipe));
kfree(modifiers);
if (ret)
goto fail;

View File

@ -38,9 +38,12 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
#include "skl_scaler.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_sideband.h"
/* return pixels in terms of txbyteclkhs */
@ -1258,7 +1261,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
@ -1270,6 +1275,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
if (intel_dsi->dual_link)
pclk *= 2;
if (pclk) {
pipe_config->hw.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __VLV_DSI_H__
#define __VLV_DSI_H__
#include <linux/types.h>
enum port;
struct drm_i915_private;
struct intel_dsi;
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
void vlv_dsi_init(struct drm_i915_private *dev_priv);
#endif /* __VLV_DSI_H__ */

View File

@ -31,6 +31,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_sideband.h"
static const u16 lfsr_converts[] = {

View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __VLV_DSI_PLL_H__
#define __VLV_DSI_PLL_H__
#include <linux/types.h>
enum port;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void vlv_dsi_pll_disable(struct intel_encoder *encoder);
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void bxt_dsi_pll_disable(struct intel_encoder *encoder);
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
void assert_dsi_pll_enabled(struct drm_i915_private *i915);
void assert_dsi_pll_disabled(struct drm_i915_private *i915);
#endif /* __VLV_DSI_PLL_H__ */

View File

@ -479,7 +479,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
!IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
drm_dbg(&i915->drm,
"Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
"Bonding not supported on this platform\n");
return -ENODEV;
}

View File

@ -3,12 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
#include <linux/agp_backend.h>
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <drm/i915_drm.h>
#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
@ -116,17 +118,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
return false;
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
*
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*/
void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
int open;
mutex_lock(&ggtt->vm.mutex);
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
mutex_lock(&vm->mutex);
/* Skip rewriting PTE on VMA unbind. */
open = atomic_xchg(&ggtt->vm.open, 0);
open = atomic_xchg(&vm->open, 0);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
@ -139,11 +150,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
}
}
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt);
atomic_set(&ggtt->vm.open, open);
vm->clear_range(vm, 0, vm->total);
mutex_unlock(&ggtt->vm.mutex);
atomic_set(&vm->open, open);
mutex_unlock(&vm->mutex);
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
i915_ggtt_suspend_vm(&ggtt->vm);
ggtt->invalidate(ggtt);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
@ -1253,37 +1270,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
void i915_ggtt_resume(struct i915_ggtt *ggtt)
/**
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
* @vm: The VM to restore the mappings for
*
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*
* Returns %true if restoring the mapping for any object that was in a write
* domain before suspend.
*/
bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool flush = false;
bool write_domain_objs = false;
int open;
intel_gt_check_and_clear_faults(ggtt->vm.gt);
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
vm->clear_range(vm, 0, vm->total);
/* Skip rewriting PTE on VMA unbind. */
open = atomic_xchg(&ggtt->vm.open, 0);
open = atomic_xchg(&vm->open, 0);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
unsigned int was_bound =
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
vma->ops->bind_vma(&ggtt->vm, NULL, vma,
vma->ops->bind_vma(vm, NULL, vma,
obj ? obj->cache_level : 0,
was_bound);
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
atomic_set(&ggtt->vm.open, open);
atomic_set(&vm->open, open);
return write_domain_objs;
}
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
bool flush;
intel_gt_check_and_clear_faults(ggtt->vm.gt);
flush = i915_ggtt_resume_vm(&ggtt->vm);
ggtt->invalidate(ggtt);
if (flush)
@ -1388,30 +1427,39 @@ err_st_alloc:
}
static struct scatterlist *
remap_pages(struct drm_i915_gem_object *obj,
unsigned int offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
add_padding_pages(unsigned int count,
struct sg_table *st, struct scatterlist *sg)
{
st->nents++;
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
* here is just a convenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
return sg;
}
static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
unsigned int offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
{
unsigned int row;
if (!width || !height)
return sg;
if (alignment_pad) {
st->nents++;
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
* here is just a convenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, alignment_pad * 4096, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = alignment_pad * 4096;
sg = sg_next(sg);
}
if (alignment_pad)
sg = add_padding_pages(alignment_pad, st, sg);
for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE;
@ -1448,19 +1496,99 @@ remap_pages(struct drm_i915_gem_object *obj,
if (!left)
continue;
st->nents++;
/*
* The DE ignores the PTEs for the padding tiles, the sg entry
* here is just a conenience to indicate how many padding PTEs
* to insert at this spot.
*/
sg_set_page(sg, NULL, left, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = left;
sg = sg_next(sg);
sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
}
*gtt_offset += alignment_pad + dst_stride * height;
return sg;
}
static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object *obj,
unsigned int obj_offset,
unsigned int count,
struct sg_table *st, struct scatterlist *sg)
{
struct scatterlist *iter;
unsigned int offset;
iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
GEM_BUG_ON(!iter);
do {
unsigned int len;
len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
sg_dma_address(iter) + (offset << PAGE_SHIFT);
sg_dma_len(sg) = len;
st->nents++;
count -= len >> PAGE_SHIFT;
if (count == 0)
return sg;
sg = __sg_next(sg);
iter = __sg_next(iter);
offset = 0;
} while (1);
}
static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
unsigned int obj_offset, unsigned int alignment_pad,
unsigned int size,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
{
if (!size)
return sg;
if (alignment_pad)
sg = add_padding_pages(alignment_pad, st, sg);
sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
sg = sg_next(sg);
*gtt_offset += alignment_pad + size;
return sg;
}
static struct scatterlist *
remap_color_plane_pages(const struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj,
int color_plane,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
{
unsigned int alignment_pad = 0;
if (rem_info->plane_alignment)
alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
if (rem_info->plane[color_plane].linear)
sg = remap_linear_color_plane_pages(obj,
rem_info->plane[color_plane].offset,
alignment_pad,
rem_info->plane[color_plane].size,
st, sg,
gtt_offset);
else
sg = remap_tiled_color_plane_pages(obj,
rem_info->plane[color_plane].offset,
alignment_pad,
rem_info->plane[color_plane].width,
rem_info->plane[color_plane].height,
rem_info->plane[color_plane].src_stride,
rem_info->plane[color_plane].dst_stride,
st, sg,
gtt_offset);
return sg;
}
@ -1488,21 +1616,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
unsigned int alignment_pad = 0;
if (rem_info->plane_alignment)
alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
sg = remap_pages(obj,
rem_info->plane[i].offset, alignment_pad,
rem_info->plane[i].width, rem_info->plane[i].height,
rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
st, sg);
gtt_offset += alignment_pad +
rem_info->plane[i].dst_stride * rem_info->plane[i].height;
}
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
i915_sg_trim(st);
@ -1524,9 +1639,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
struct scatterlist *sg, *iter;
struct scatterlist *sg;
unsigned int count = view->partial.size;
unsigned int offset;
int ret = -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@ -1537,34 +1651,14 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
st->nents = 0;
do {
unsigned int len;
len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
count << PAGE_SHIFT);
sg_set_page(sg, NULL, len, 0);
sg_dma_address(sg) =
sg_dma_address(iter) + (offset << PAGE_SHIFT);
sg_dma_len(sg) = len;
sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
st->nents++;
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg);
i915_sg_trim(st); /* Drop any unused tail entries. */
sg_mark_end(sg);
i915_sg_trim(st); /* Drop any unused tail entries. */
return st;
}
sg = __sg_next(sg);
iter = __sg_next(iter);
offset = 0;
} while (1);
return st;
err_sg_alloc:
kfree(st);

View File

@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
#include <drm/intel-gtt.h>
#include "intel_gt_debugfs.h"
#include "gem/i915_gem_lmem.h"

View File

@ -544,6 +544,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
void i915_ggtt_suspend_vm(struct i915_address_space *vm);
bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);

View File

@ -29,8 +29,8 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/oom.h>
#include <linux/module.h>
#include <linux/oom.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@ -48,12 +48,14 @@
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_dmc.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
@ -70,6 +72,7 @@
#include "pxp/intel_pxp_pm.h"
#include "i915_debugfs.h"
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_ioc32.h"
#include "i915_irq.h"
@ -89,7 +92,7 @@
#include "intel_region_ttm.h"
#include "vlv_suspend.h"
static const struct drm_driver driver;
static const struct drm_driver i915_drm_driver;
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
@ -322,7 +325,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->audio.mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
mutex_init(&dev_priv->hdcp_comp_mutex);
@ -766,7 +769,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *device_info;
struct drm_i915_private *i915;
i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
struct drm_i915_private, drm);
if (IS_ERR(i915))
return i915;
@ -1127,6 +1130,8 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
/* Must be called before GGTT is suspended. */
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(&dev_priv->ggtt);
i915_save_display(dev_priv);
@ -1183,6 +1188,14 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
goto out;
}
/*
* FIXME: Temporary hammer to avoid freezing the machine on our DGFX
* This should be totally removed when we handle the pci states properly
* on runtime PM and on s2idle cases.
*/
if (suspend_to_idle(dev_priv))
pci_d3cold_disable(pdev);
pci_disable_device(pdev);
/*
* During hibernation on some platforms the BIOS may try to access
@ -1207,7 +1220,8 @@ out:
return ret;
}
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
pm_message_t state)
{
int error;
@ -1243,6 +1257,8 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(&dev_priv->ggtt);
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
intel_dmc_ucode_resume(dev_priv);
@ -1344,6 +1360,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev);
pci_d3cold_enable(pdev);
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@ -1364,7 +1382,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
return ret;
}
int i915_resume_switcheroo(struct drm_i915_private *i915)
int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
@ -1520,6 +1538,7 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@ -1565,6 +1584,12 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
/*
* FIXME: Temporary hammer to avoid freezing the machine on our DGFX
* This should be totally removed when we handle the pci states properly
* on runtime PM and on s2idle cases.
*/
pci_d3cold_disable(pdev);
rpm->suspended = true;
/*
@ -1603,6 +1628,7 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@ -1615,6 +1641,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
pci_d3cold_enable(pdev);
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
@ -1777,7 +1804,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
static const struct drm_driver driver = {
static const struct drm_driver i915_drm_driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __I915_DRIVER_H__
#define __I915_DRIVER_H__
#include <linux/pm.h>
struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
void i915_driver_remove(struct drm_i915_private *i915);
void i915_driver_shutdown(struct drm_i915_private *i915);
int i915_driver_resume_switcheroo(struct drm_i915_private *i915);
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
#endif /* __I915_DRIVER_H__ */

View File

@ -50,7 +50,6 @@
#include <linux/stackdepot.h>
#include <linux/xarray.h>
#include <drm/intel-gtt.h>
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
@ -364,15 +363,6 @@ struct intel_color_funcs {
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
struct intel_audio_funcs {
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void (*audio_codec_disable)(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
};
struct intel_cdclk_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
@ -411,10 +401,14 @@ struct drm_i915_display_funcs {
void (*commit_modeset_enables)(struct intel_atomic_state *state);
};
struct intel_fbc_funcs;
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
struct intel_fbc {
struct drm_i915_private *i915;
const struct intel_fbc_funcs *funcs;
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
@ -828,6 +822,30 @@ struct i915_selftest_stash {
struct ida mock_region_instances;
};
/* intel_audio.c private */
struct intel_audio_funcs;
struct intel_audio_private {
/* Display internal audio functions */
const struct intel_audio_funcs *funcs;
/* hda/i915 audio component */
struct i915_audio_component *component;
bool component_registered;
/* mutex for audio/video sync */
struct mutex mutex;
int power_refcount;
u32 freq_cntrl;
/* Used to save the pipe-to-encoder mapping for audio */
struct intel_encoder *encoder_map[I915_MAX_PIPES];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
struct platform_device *platdev;
int irq;
} lpe;
};
struct drm_i915_private {
struct drm_device drm;
@ -995,9 +1013,6 @@ struct drm_i915_private {
/* Display internal color functions */
const struct intel_color_funcs *color_funcs;
/* Display internal audio functions */
const struct intel_audio_funcs *audio_funcs;
/* Display CDCLK functions */
const struct intel_cdclk_funcs *cdclk_funcs;
@ -1084,17 +1099,6 @@ struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
/* hda/i915 audio component */
struct i915_audio_component *audio_component;
bool audio_component_registered;
/**
* av_mutex - mutex for audio/video sync
*
*/
struct mutex av_mutex;
int audio_power_refcount;
u32 audio_freq_cntrl;
u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@ -1227,14 +1231,7 @@ struct drm_i915_private {
bool ipc_enabled;
/* Used to save the pipe-to-encoder mapping for audio */
struct intel_encoder *av_enc_map[I915_MAX_PIPES];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
struct platform_device *platdev;
int irq;
} lpe_audio;
struct intel_audio_private audio;
struct i915_pmu pmu;
@ -1455,7 +1452,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
#define IS_CANNONLAKE(dev_priv) 0
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
@ -1745,7 +1741,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11)
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
@ -1789,16 +1785,7 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
/* i915_drv.c */
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
void i915_driver_remove(struct drm_i915_private *i915);
void i915_driver_shutdown(struct drm_i915_private *i915);
int i915_resume_switcheroo(struct drm_i915_private *i915);
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
/* i915_getparam.c */
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __I915_IOSF_MBI_H__
#define __I915_IOSF_MBI_H__
#if IS_ENABLED(CONFIG_IOSF_MBI)
#include <asm/iosf_mbi.h>
#else
/* Stubs to compile for all non-x86 archs */
#define MBI_PMIC_BUS_ACCESS_BEGIN 1
#define MBI_PMIC_BUS_ACCESS_END 2
struct notifier_block;
static inline void iosf_mbi_punit_acquire(void) {}
static inline void iosf_mbi_punit_release(void) {}
static inline void iosf_mbi_assert_punit_acquired(void) {}
static inline
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
{
return 0;
}
static inline
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
return 0;
}
#endif
#endif /* __I915_IOSF_MBI_H__ */

View File

@ -3016,7 +3016,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv))
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));

View File

@ -24,8 +24,8 @@ static int i915_check_nomodeset(void)
/*
* Enable KMS by default, unless explicitly overriden by
* either the i915.modeset prarameter or by the
* vga_text_mode_force boot option.
* either the i915.modeset parameter or by the
* nomodeset boot option.
*/
if (i915_modparams.modeset == 0)

View File

@ -27,6 +27,7 @@
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_pci.h"
@ -145,6 +146,12 @@
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
#define ICL_COLORS \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
/* Keep in gen based order, and chronological order within a gen */
@ -811,7 +818,7 @@ static const struct intel_device_info cml_gt2_info = {
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
GEN(11), \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \
ICL_COLORS, \
.dbuf.size = 2048, \
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
.display.has_dsc = 1, \
@ -866,7 +873,7 @@ static const struct intel_device_info jsl_info = {
TGL_CURSOR_OFFSETS, \
.has_global_mocs = 1, \
.has_pxp = 1, \
.display.has_dsb = 1
.display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
@ -932,8 +939,6 @@ static const struct intel_device_info adl_s_info = {
#define XE_LPD_FEATURES \
.abox_mask = GENMASK(1, 0), \
.color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
.dbuf.size = 4096, \
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
@ -955,12 +960,16 @@ static const struct intel_device_info adl_s_info = {
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_D] = PIPE_D_OFFSET, \
[TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
[TRANSCODER_D] = TRANSCODER_D_OFFSET, \
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
XE_LPD_CURSOR_OFFSETS
@ -969,6 +978,9 @@ static const struct intel_device_info adl_p_info = {
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
.require_force_probe = 1,
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
.display.has_cdclk_crawl = 1,
.display.has_modular_fia = 1,
.display.has_psr_hw_tracking = 0,
@ -1038,6 +1050,8 @@ static const struct intel_device_info dg2_info = {
BIT(VECS0) | BIT(VECS1) |
BIT(VCS0) | BIT(VCS2),
.require_force_probe = 1,
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
};
#undef PLATFORM

View File

@ -371,6 +371,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
#define FBC_LLC_FULLY_OPEN REG_BIT(30)
#define GEN6_MBCTL _MMIO(0x0907c)
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
@ -2795,12 +2798,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1 << 31)
#define FPGA_DBG_RM_NOCLAIM REG_BIT(31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
#define CLAIM_ER_OVERFLOW (1 << 16)
#define CLAIM_ER_CTR_MASK 0xffff
#define CLAIM_ER_CLR REG_BIT(31)
#define CLAIM_ER_OVERFLOW REG_BIT(16)
#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0)
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
@ -3307,93 +3310,98 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
#define FBC_CTL_EN REG_BIT(31)
#define FBC_CTL_PERIODIC REG_BIT(30)
#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */
#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
#define FBC_CTL_EN REG_BIT(31)
#define FBC_CTL_PERIODIC REG_BIT(30)
#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
#define FBC_COMMAND _MMIO(0x320c)
#define FBC_CMD_COMPRESS (1 << 0)
#define FBC_CMD_COMPRESS REG_BIT(0)
#define FBC_STATUS _MMIO(0x3210)
#define FBC_STAT_COMPRESSING (1 << 31)
#define FBC_STAT_COMPRESSED (1 << 30)
#define FBC_STAT_MODIFIED (1 << 29)
#define FBC_STAT_CURRENT_LINE_SHIFT (0)
#define FBC_CONTROL2 _MMIO(0x3214)
#define FBC_CTL_FENCE_DBL (0 << 4)
#define FBC_CTL_IDLE_IMM (0 << 2)
#define FBC_CTL_IDLE_FULL (1 << 2)
#define FBC_CTL_IDLE_LINE (2 << 2)
#define FBC_CTL_IDLE_DEBUG (3 << 2)
#define FBC_CTL_CPU_FENCE (1 << 1)
#define FBC_CTL_PLANE(plane) ((plane) << 0)
#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
#define FBC_STAT_COMPRESSING REG_BIT(31)
#define FBC_STAT_COMPRESSED REG_BIT(30)
#define FBC_STAT_MODIFIED REG_BIT(29)
#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
#define FBC_CTL_FENCE_DBL REG_BIT(4)
#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
#define FBC_MOD_NUM_VALID REG_BIT(0)
#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
#define FBC_LL_SIZE (1536)
#define FBC_LLC_READ_CTRL _MMIO(0x9044)
#define FBC_LLC_FULLY_OPEN (1 << 30)
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
#define DPFC_CTL_EN (1 << 31)
#define DPFC_CTL_PLANE(plane) ((plane) << 30)
#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
#define DPFC_CTL_FENCE_EN (1 << 29)
#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
#define DPFC_SR_EN (1 << 10)
#define DPFC_CTL_LIMIT_1X (0 << 6)
#define DPFC_CTL_LIMIT_2X (1 << 6)
#define DPFC_CTL_LIMIT_4X (2 << 6)
#define DPFC_RECOMP_CTL _MMIO(0x320c)
#define DPFC_RECOMP_STALL_EN (1 << 27)
#define DPFC_RECOMP_STALL_WM_SHIFT (16)
#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
#define DPFC_STATUS _MMIO(0x3210)
#define DPFC_INVAL_SEG_SHIFT (16)
#define DPFC_INVAL_SEG_MASK (0x07ff0000)
#define DPFC_COMP_SEG_SHIFT (0)
#define DPFC_COMP_SEG_MASK (0x000007ff)
#define DPFC_STATUS2 _MMIO(0x3214)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define DPFC_CHICKEN _MMIO(0x3224)
#define DPFC_HT_MODIFY (1 << 31)
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define DPFC_CONTROL _MMIO(0x3208)
#define ILK_DPFC_CONTROL _MMIO(0x43208)
#define FBC_CTL_FALSE_COLOR (1 << 10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define DPFC_CTL_EN REG_BIT(31)
#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
#define DPFC_RECOMP_CTL _MMIO(0x320c)
#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
#define DPFC_RECOMP_STALL_EN REG_BIT(27)
#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
#define DPFC_STATUS _MMIO(0x3210)
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_COMP_SEG_MASK 0x7ff
#define IVB_FBC_STATUS2 _MMIO(0x43214)
#define IVB_FBC_COMP_SEG_MASK 0x7ff
#define BDW_FBC_COMP_SEG_MASK 0xfff
#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
#define DPFC_STATUS2 _MMIO(0x3214)
#define ILK_DPFC_STATUS2 _MMIO(0x43214)
#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define DPFC_CHICKEN _MMIO(0x3224)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
#define GLK_FBC_STRIDE _MMIO(0x43228)
#define FBC_STRIDE_OVERRIDE REG_BIT(15)
#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1 << 0)
#define SNB_FBC_FRONT_BUFFER (1 << 1)
#define ILK_FBC_RT_VALID REG_BIT(0)
#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS (1 << 22)
@ -3417,8 +3425,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
#define SNB_CPU_FENCE_ENABLE (1 << 29)
#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
#define SNB_DPFC_FENCE_EN REG_BIT(29)
#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
@ -3428,8 +3438,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
#define FBC_REND_NUKE (1 << 2)
#define FBC_REND_CACHE_CLEAN (1 << 1)
#define FBC_REND_NUKE REG_BIT(2)
#define FBC_REND_CACHE_CLEAN REG_BIT(1)
/*
* GPIO regs
@ -4309,47 +4319,52 @@ enum {
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
#define PIPE_CRC_ENABLE REG_BIT(31)
/* skl+ source selection */
#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
#define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28)
#define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0)
#define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2)
#define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4)
#define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6)
#define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7)
#define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5)
#define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3)
#define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
#define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29)
#define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0)
#define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1)
#define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2)
/* ilk+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
/* embedded DP port on the north display block, reserved on ivb */
#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
#define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28)
#define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0)
#define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1)
#define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2)
/* embedded DP port on the north display block */
#define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4)
#define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5)
/* vlv source selection */
#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
#define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27)
#define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0)
#define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1)
#define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2)
/* with DP port the pipe source is invalid */
#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
#define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3)
#define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6)
#define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7)
/* gen3+ source selection */
#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
#define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28)
#define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0)
#define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1)
#define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2)
/* with DP/TV port the pipe source is invalid */
#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
#define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3)
#define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4)
#define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5)
#define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6)
#define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7)
/* gen2 doesn't have source selection bits */
#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
#define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30)
#define _PIPE_CRC_RES_1_A_IVB 0x60064
#define _PIPE_CRC_RES_2_A_IVB 0x60068
@ -4698,11 +4713,11 @@ enum {
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
#define _PSR2_STATUS_A 0x60940
#define _PSR2_STATUS_EDP 0x6f940
#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
#define _PSR2_STATUS_A 0x60940
#define _PSR2_STATUS_EDP 0x6f940
#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
@ -4999,9 +5014,9 @@ enum {
#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
#define PIPE_A_SCRAMBLE_RESET (1 << 0)
#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
@ -6266,55 +6281,55 @@ enum {
#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
#define PIPEB_HLINE_INT_EN (1 << 28)
#define PIPEB_VBLANK_INT_EN (1 << 27)
#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
#define PIPE_PSR_INT_EN (1 << 22)
#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
#define PIPEA_HLINE_INT_EN (1 << 20)
#define PIPEA_VBLANK_INT_EN (1 << 19)
#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
#define PLANEA_FLIPDONE_INT_EN (1 << 16)
#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
#define PIPEC_HLINE_INT_EN (1 << 12)
#define PIPEC_VBLANK_INT_EN (1 << 11)
#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
#define PLANEC_FLIPDONE_INT_EN (1 << 8)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
#define PIPEB_HLINE_INT_EN REG_BIT(28)
#define PIPEB_VBLANK_INT_EN REG_BIT(27)
#define SPRITED_FLIP_DONE_INT_EN REG_BIT(26)
#define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25)
#define PLANEB_FLIP_DONE_INT_EN REG_BIT(24)
#define PIPE_PSR_INT_EN REG_BIT(22)
#define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21)
#define PIPEA_HLINE_INT_EN REG_BIT(20)
#define PIPEA_VBLANK_INT_EN REG_BIT(19)
#define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18)
#define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17)
#define PLANEA_FLIPDONE_INT_EN REG_BIT(16)
#define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13)
#define PIPEC_HLINE_INT_EN REG_BIT(12)
#define PIPEC_VBLANK_INT_EN REG_BIT(11)
#define SPRITEF_FLIPDONE_INT_EN REG_BIT(10)
#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9)
#define PLANEC_FLIPDONE_INT_EN REG_BIT(8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
#define DPINVGTT_EN_MASK 0xff0000
#define DPINVGTT_EN_MASK_CHV 0xfff0000
#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
#define PLANEC_INVALID_GTT_STATUS (1 << 9)
#define CURSORC_INVALID_GTT_STATUS (1 << 8)
#define CURSORB_INVALID_GTT_STATUS (1 << 7)
#define CURSORA_INVALID_GTT_STATUS (1 << 6)
#define SPRITED_INVALID_GTT_STATUS (1 << 5)
#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
#define PLANEB_INVALID_GTT_STATUS (1 << 3)
#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
#define PLANEA_INVALID_GTT_STATUS (1 << 0)
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
@ -8263,7 +8278,7 @@ enum {
/*
* The below are numbered starting from "S1" on gen11/gen12, but starting
* with gen13 display, the bspec switches to a 0-based numbering scheme
* with display 13, the bspec switches to a 0-based numbering scheme
* (although the addresses stay the same so new S0 = old S1, new S1 = old S2).
* We'll just use the 0-based numbering here for all platforms since it's the
* way things will be named by the hardware team going forward, plus it's more
@ -8308,9 +8323,10 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
#define ICL_DELAY_PMRSP (1 << 22)
#define MASK_WAKEMEM (1 << 13)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
#define ICL_DELAY_PMRSP REG_BIT(22)
#define DISABLE_FLR_SRC REG_BIT(15)
#define MASK_WAKEMEM REG_BIT(13)
#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
@ -9781,6 +9797,10 @@ enum {
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
#define AUD_TS_CDCLK_M_EN REG_BIT(31)
#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
/* Display Audio Config Reg */
#define AUD_CONFIG_BE _MMIO(0x65ef0)
#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
@ -10212,8 +10232,6 @@ enum skl_power_gate {
#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT)
#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)

View File

@ -5,6 +5,7 @@
#include <linux/vga_switcheroo.h>
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_switcheroo.h"
@ -24,12 +25,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
i915_resume_switcheroo(i915);
i915_driver_resume_switcheroo(i915);
i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
drm_info(&i915->drm, "switched off\n");
i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend_switcheroo(i915, pmm);
i915_driver_suspend_switcheroo(i915, pmm);
i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
}
}

View File

@ -288,7 +288,7 @@ TRACE_EVENT(vlv_fifo_size,
/* plane updates */
TRACE_EVENT(intel_update_plane,
TRACE_EVENT(intel_plane_update_noarm,
TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
TP_ARGS(plane, crtc),
@ -317,7 +317,36 @@ TRACE_EVENT(intel_update_plane,
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
TRACE_EVENT(intel_disable_plane,
TRACE_EVENT(intel_plane_update_arm,
TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
TP_ARGS(plane, crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
__array(int, dst, 4)
__string(name, plane->name)
),
TP_fast_assign(
__assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
);
TRACE_EVENT(intel_plane_disable_arm,
TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
TP_ARGS(plane, crtc),
@ -404,6 +433,48 @@ TRACE_EVENT(intel_fbc_nuke,
/* pipe updates */
TRACE_EVENT(intel_crtc_vblank_work_start,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, frame=%u, scanline=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline)
);
TRACE_EVENT(intel_crtc_vblank_work_end,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, frame=%u, scanline=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline)
);
TRACE_EVENT(intel_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),

View File

@ -97,11 +97,20 @@ enum i915_cache_level;
struct intel_remapped_plane_info {
/* in gtt pages */
u32 offset;
u16 width;
u16 height;
u16 src_stride;
u16 dst_stride;
u32 offset:31;
u32 linear:1;
union {
/* in gtt pages for !linear */
struct {
u16 width;
u16 height;
u16 src_stride;
u16 dst_stride;
};
/* in gtt pages for linear */
u32 size;
};
} __packed;
struct intel_remapped_info {

View File

@ -37,6 +37,7 @@
#include "display/intel_bw.h"
#include "display/intel_de.h"
#include "display/intel_display_types.h"
#include "display/intel_fb.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
#include "display/skl_universal_plane.h"
@ -160,7 +161,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0883: bxt
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
DPFC_DISABLE_DUMMY0);
}
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
@ -3062,9 +3063,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
if (!changed)
return;
@ -3374,7 +3375,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
* enabled sometime later.
*/
if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
intel_fbc_is_active(&dev_priv->fbc)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@ -5094,6 +5095,18 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
static bool icl_need_wm1_wa(struct drm_i915_private *i915,
enum plane_id plane_id)
{
/*
* Wa_1408961008:icl, ehl
* Wa_14012656716:tgl, adl
* Underruns with WM1+ disabled
*/
return DISPLAY_VER(i915) == 11 ||
(IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
}
static int
skl_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
@ -5264,11 +5277,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level],
total[plane_id], uv_total[plane_id]);
/*
* Wa_1408961008:icl, ehl
* Underruns with WM1+ disabled
*/
if (DISPLAY_VER(dev_priv) == 11 &&
if (icl_need_wm1_wa(dev_priv, plane_id) &&
level == 1 && wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
@ -7434,7 +7443,7 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* Wa_1409120013:icl,ehl */
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
@ -7447,7 +7456,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv))
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_1409825376:tgl (pre-prod)*/
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
@ -7509,7 +7518,7 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0873: cfl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
@ -7542,7 +7551,7 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0873: kbl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
@ -7569,14 +7578,14 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
* Display WA #0873: skl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
DPFC_NUKE_ON_ANY_MODIFICATION);
/*
* WaFbcHighMemBwCorruptionAvoidance:skl
* Display WA #0883: skl
*/
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
DPFC_DISABLE_DUMMY0);
}
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)

View File

@ -590,6 +590,9 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
pm_runtime_use_autosuspend(kdev);
}
/* Enable by default */
pm_runtime_allow(kdev);
/*
* The core calls the driver load handler with an RPM reference held.
* We drop that here and will reacquire it during unloading in

View File

@ -22,11 +22,11 @@
*/
#include <linux/pm_runtime.h>
#include <asm/iosf_mbi.h>
#include "gt/intel_lrc_reg.h" /* for shadow reg list */
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_pm.h"

View File

@ -3,7 +3,8 @@
* Copyright(c) 2020, Intel Corporation. All rights reserved.
*/
#include "drm/i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_pxp.h"

View File

@ -4,8 +4,10 @@
*/
#include <linux/component.h>
#include "drm/i915_pxp_tee_interface.h"
#include "drm/i915_component.h"
#include <drm/i915_pxp_tee_interface.h>
#include <drm/i915_component.h>
#include "i915_drv.h"
#include "intel_pxp.h"
#include "intel_pxp_session.h"

View File

@ -3,9 +3,8 @@
* Copyright © 2013-2021 Intel Corporation
*/
#include <asm/iosf_mbi.h>
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "vlv_sideband.h"
/*

View File

@ -4,9 +4,11 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
#include <linux/agp_backend.h>
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/types.h>
struct agp_bridge_data;
struct pci_dev;
struct sg_table;
void intel_gtt_get(u64 *gtt_total,
phys_addr_t *mappable_base,