Merge tag 'drm-intel-next-queued-2020-11-03' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
drm/i915 features for v5.11 Highlights: - More DG1 enabling (Lucas, Matt, Aditya, Anshuman, Clinton, Matt, Stuart, Venkata) - Integer scaling filter support (Pankaj Bharadiya) - Asynchronous flip support (Karthik) Generic: - Fix gen12 forcewake tables (Matt) - Haswell PCI ID updates (Alexei Podtelezhnikov) Display: - ICL+ DSI command mode enabling (Vandita) - Shutdown displays grafecully on reboot/shutdown (Ville) - Don't register display debugfs when there is no display (Lucas) - Fix RKL CDCLK table (Matt) - Limit EHL/JSL eDP to HBR2 (José) - Handle incorrectly set (by BIOS) PLLs and DP link rates at probe (Imre) - Fix mode valid check wrt bpp for "YCbCr 4:2:0 only" modes (Ville) - State checker and dump fixes (Ville) - DP AUX backlight updates (Aaron Ma, Sean Paul) - Add DP LTTPR non-transparent link training mode (Imre) - PSR2 selective fetch enabling (José) - VBT updates (José) - HDCP updates (Ramalingam) Cleanups and refactoring: - HPD pin, AUX channel, and Type-C port identifier cleanup (Ville) - Hotplug and irq refactoring (Ville) - Better DDI encoder and AUX channel names (Ville) - Color LUT code cleanups (Ville) - Combo PHY code cleanups (Ville) - LSPCON code cleanups (Ville) - Documentation fixes (Mauro, Chris) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87o8kehbaj.fsf@intel.com
This commit is contained in:
commit
e047c7be17
|
@ -118,6 +118,12 @@ Atomic Plane Helpers
|
|||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_atomic_plane.c
|
||||
:internal:
|
||||
|
||||
Asynchronous Page Flip
|
||||
----------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_display.c
|
||||
:doc: asynchronous flip implementation
|
||||
|
||||
Output Probing
|
||||
--------------
|
||||
|
||||
|
|
|
@ -469,6 +469,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
|||
return -EFAULT;
|
||||
|
||||
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
|
||||
} else if (property == crtc->scaling_filter_property) {
|
||||
state->scaling_filter = val;
|
||||
} else if (crtc->funcs->atomic_set_property) {
|
||||
return crtc->funcs->atomic_set_property(crtc, state, property, val);
|
||||
} else {
|
||||
|
@ -503,6 +505,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
|
|||
*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
|
||||
else if (property == config->prop_out_fence_ptr)
|
||||
*val = 0;
|
||||
else if (property == crtc->scaling_filter_property)
|
||||
*val = state->scaling_filter;
|
||||
else if (crtc->funcs->atomic_get_property)
|
||||
return crtc->funcs->atomic_get_property(crtc, state, property, val);
|
||||
else
|
||||
|
@ -585,6 +589,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
|
|||
sizeof(struct drm_rect),
|
||||
&replaced);
|
||||
return ret;
|
||||
} else if (property == plane->scaling_filter_property) {
|
||||
state->scaling_filter = val;
|
||||
} else if (plane->funcs->atomic_set_property) {
|
||||
return plane->funcs->atomic_set_property(plane, state,
|
||||
property, val);
|
||||
|
@ -643,6 +649,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
|
|||
} else if (property == config->prop_fb_damage_clips) {
|
||||
*val = (state->fb_damage_clips) ?
|
||||
state->fb_damage_clips->base.id : 0;
|
||||
} else if (property == plane->scaling_filter_property) {
|
||||
*val = state->scaling_filter;
|
||||
} else if (plane->funcs->atomic_get_property) {
|
||||
return plane->funcs->atomic_get_property(plane, state, property, val);
|
||||
} else {
|
||||
|
|
|
@ -194,6 +194,19 @@
|
|||
* Note that all the property extensions described here apply either to the
|
||||
* plane or the CRTC (e.g. for the background color, which currently is not
|
||||
* exposed and assumed to be black).
|
||||
*
|
||||
* SCALING_FILTER:
|
||||
*
|
||||
* Indicates scaling filter to be used for plane scaler
|
||||
*
|
||||
* The value of this property can be one of the following:
|
||||
* Default:
|
||||
* Driver's default scaling filter
|
||||
* Nearest Neighbor:
|
||||
* Nearest Neighbor scaling filter
|
||||
*
|
||||
* Drivers can set up this property for a plane by calling
|
||||
* drm_plane_create_scaling_filter_property
|
||||
*/
|
||||
|
||||
/**
|
||||
|
|
|
@ -229,6 +229,15 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
|
|||
* user-space must set this property to 0.
|
||||
*
|
||||
* Setting MODE_ID to 0 will release reserved resources for the CRTC.
|
||||
* SCALING_FILTER:
|
||||
* Atomic property for setting the scaling filter for CRTC scaler
|
||||
*
|
||||
* The value of this property can be one of the following:
|
||||
* Default:
|
||||
* Driver's default scaling filter
|
||||
* Nearest Neighbor:
|
||||
* Nearest Neighbor scaling filter
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -774,3 +783,34 @@ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_crtc_create_scaling_filter_property - create a new scaling filter
|
||||
* property
|
||||
*
|
||||
* @crtc: drm CRTC
|
||||
* @supported_filters: bitmask of supported scaling filters, must include
|
||||
* BIT(DRM_SCALING_FILTER_DEFAULT).
|
||||
*
|
||||
* This function lets driver to enable the scaling filter property on a given
|
||||
* CRTC.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero for success or -errno
|
||||
*/
|
||||
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
|
||||
unsigned int supported_filters)
|
||||
{
|
||||
struct drm_property *prop =
|
||||
drm_create_scaling_filter_prop(crtc->dev, supported_filters);
|
||||
|
||||
if (IS_ERR(prop))
|
||||
return PTR_ERR(prop);
|
||||
|
||||
drm_object_attach_property(&crtc->base, prop,
|
||||
DRM_SCALING_FILTER_DEFAULT);
|
||||
crtc->scaling_filter_property = prop;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
|
||||
|
|
|
@ -72,6 +72,9 @@ int drm_crtc_force_disable(struct drm_crtc *crtc);
|
|||
|
||||
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc);
|
||||
|
||||
struct drm_property *
|
||||
drm_create_scaling_filter_prop(struct drm_device *dev,
|
||||
unsigned int supported_filters);
|
||||
/* IOCTLs */
|
||||
int drm_mode_getcrtc(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv);
|
||||
|
|
|
@ -150,11 +150,8 @@ void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
|
||||
|
||||
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
static void __drm_dp_link_train_channel_eq_delay(unsigned long rd_interval)
|
||||
{
|
||||
unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_TRAINING_AUX_RD_MASK;
|
||||
|
||||
if (rd_interval > 4)
|
||||
DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
|
||||
rd_interval);
|
||||
|
@ -166,8 +163,35 @@ void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
|||
|
||||
usleep_range(rd_interval, rd_interval * 2);
|
||||
}
|
||||
|
||||
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
__drm_dp_link_train_channel_eq_delay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_TRAINING_AUX_RD_MASK);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
|
||||
|
||||
void drm_dp_lttpr_link_train_clock_recovery_delay(void)
|
||||
{
|
||||
usleep_range(100, 200);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_link_train_clock_recovery_delay);
|
||||
|
||||
static u8 dp_lttpr_phy_cap(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE], int r)
|
||||
{
|
||||
return phy_cap[r - DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1];
|
||||
}
|
||||
|
||||
void drm_dp_lttpr_link_train_channel_eq_delay(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE])
|
||||
{
|
||||
u8 interval = dp_lttpr_phy_cap(phy_cap,
|
||||
DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) &
|
||||
DP_TRAINING_AUX_RD_MASK;
|
||||
|
||||
__drm_dp_link_train_channel_eq_delay(interval);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
|
||||
|
||||
u8 drm_dp_link_rate_to_bw_code(int link_rate)
|
||||
{
|
||||
/* Spec says link_bw = link_rate / 0.27Gbps */
|
||||
|
@ -363,6 +387,59 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
|
||||
|
||||
/**
|
||||
* drm_dp_dpcd_read_phy_link_status - get the link status information for a DP PHY
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @dp_phy: the DP PHY to get the link status for
|
||||
* @link_status: buffer to return the status in
|
||||
*
|
||||
* Fetch the AUX DPCD registers for the DPRX or an LTTPR PHY link status. The
|
||||
* layout of the returned @link_status matches the DPCD register layout of the
|
||||
* DPRX PHY link status.
|
||||
*
|
||||
* Returns 0 if the information was read successfully or a negative error code
|
||||
* on failure.
|
||||
*/
|
||||
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
|
||||
enum drm_dp_phy dp_phy,
|
||||
u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (dp_phy == DP_PHY_DPRX) {
|
||||
ret = drm_dp_dpcd_read(aux,
|
||||
DP_LANE0_1_STATUS,
|
||||
link_status,
|
||||
DP_LINK_STATUS_SIZE);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
WARN_ON(ret != DP_LINK_STATUS_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = drm_dp_dpcd_read(aux,
|
||||
DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
|
||||
link_status,
|
||||
DP_LINK_STATUS_SIZE - 1);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
|
||||
|
||||
/* Convert the LTTPR to the sink PHY link status layout */
|
||||
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
|
||||
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
|
||||
DP_LINK_STATUS_SIZE - (DP_SINK_STATUS - DP_LANE0_1_STATUS) - 1);
|
||||
link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
|
||||
|
||||
static bool is_edid_digital_input_dp(const struct edid *edid)
|
||||
{
|
||||
return edid && edid->revision >= 4 &&
|
||||
|
@ -1882,6 +1959,7 @@ static const struct edid_quirk edid_quirk_list[] = {
|
|||
{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
{ MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
{ MFG(0x09, 0xe5), PROD_ID(0xde, 0x08), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
|
||||
};
|
||||
|
||||
#undef MFG
|
||||
|
@ -2103,6 +2181,153 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
|
||||
|
||||
/**
|
||||
* drm_dp_read_lttpr_common_caps - read the LTTPR common capabilities
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @caps: buffer to return the capability info in
|
||||
*
|
||||
* Read capabilities common to all LTTPRs.
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
|
||||
u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(aux,
|
||||
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
|
||||
caps, DP_LTTPR_COMMON_CAP_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
WARN_ON(ret != DP_LTTPR_COMMON_CAP_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_read_lttpr_common_caps);
|
||||
|
||||
/**
|
||||
* drm_dp_read_lttpr_phy_caps - read the capabilities for a given LTTPR PHY
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @dp_phy: LTTPR PHY to read the capabilities for
|
||||
* @caps: buffer to return the capability info in
|
||||
*
|
||||
* Read the capabilities for the given LTTPR PHY.
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
|
||||
enum drm_dp_phy dp_phy,
|
||||
u8 caps[DP_LTTPR_PHY_CAP_SIZE])
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_dpcd_read(aux,
|
||||
DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy),
|
||||
caps, DP_LTTPR_PHY_CAP_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
WARN_ON(ret != DP_LTTPR_PHY_CAP_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_read_lttpr_phy_caps);
|
||||
|
||||
static u8 dp_lttpr_common_cap(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE], int r)
|
||||
{
|
||||
return caps[r - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_lttpr_count - get the number of detected LTTPRs
|
||||
* @caps: LTTPR common capabilities
|
||||
*
|
||||
* Get the number of detected LTTPRs from the LTTPR common capabilities info.
|
||||
*
|
||||
* Returns:
|
||||
* -ERANGE if more than supported number (8) of LTTPRs are detected
|
||||
* -EINVAL if the DP_PHY_REPEATER_CNT register contains an invalid value
|
||||
* otherwise the number of detected LTTPRs
|
||||
*/
|
||||
int drm_dp_lttpr_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
|
||||
{
|
||||
u8 count = dp_lttpr_common_cap(caps, DP_PHY_REPEATER_CNT);
|
||||
|
||||
switch (hweight8(count)) {
|
||||
case 0:
|
||||
return 0;
|
||||
case 1:
|
||||
return 8 - ilog2(count);
|
||||
case 8:
|
||||
return -ERANGE;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_count);
|
||||
|
||||
/**
|
||||
* drm_dp_lttpr_max_link_rate - get the maximum link rate supported by all LTTPRs
|
||||
* @caps: LTTPR common capabilities
|
||||
*
|
||||
* Returns the maximum link rate supported by all detected LTTPRs.
|
||||
*/
|
||||
int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
|
||||
{
|
||||
u8 rate = dp_lttpr_common_cap(caps, DP_MAX_LINK_RATE_PHY_REPEATER);
|
||||
|
||||
return drm_dp_bw_code_to_link_rate(rate);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_max_link_rate);
|
||||
|
||||
/**
|
||||
* drm_dp_lttpr_max_lane_count - get the maximum lane count supported by all LTTPRs
|
||||
* @caps: LTTPR common capabilities
|
||||
*
|
||||
* Returns the maximum lane count supported by all detected LTTPRs.
|
||||
*/
|
||||
int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE])
|
||||
{
|
||||
u8 max_lanes = dp_lttpr_common_cap(caps, DP_MAX_LANE_COUNT_PHY_REPEATER);
|
||||
|
||||
return max_lanes & DP_MAX_LANE_COUNT_MASK;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_max_lane_count);
|
||||
|
||||
/**
|
||||
* drm_dp_lttpr_voltage_swing_level_3_supported - check for LTTPR vswing3 support
|
||||
* @caps: LTTPR PHY capabilities
|
||||
*
|
||||
* Returns true if the @caps for an LTTPR TX PHY indicate support for
|
||||
* voltage swing level 3.
|
||||
*/
|
||||
bool
|
||||
drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE])
|
||||
{
|
||||
u8 txcap = dp_lttpr_phy_cap(caps, DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1);
|
||||
|
||||
return txcap & DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_voltage_swing_level_3_supported);
|
||||
|
||||
/**
|
||||
* drm_dp_lttpr_pre_emphasis_level_3_supported - check for LTTPR preemph3 support
|
||||
* @caps: LTTPR PHY capabilities
|
||||
*
|
||||
* Returns true if the @caps for an LTTPR TX PHY indicate support for
|
||||
* pre-emphasis level 3.
|
||||
*/
|
||||
bool
|
||||
drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE])
|
||||
{
|
||||
u8 txcap = dp_lttpr_phy_cap(caps, DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1);
|
||||
|
||||
return txcap & DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_lttpr_pre_emphasis_level_3_supported);
|
||||
|
||||
/**
|
||||
* drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
|
||||
* @aux: DisplayPort AUX channel
|
||||
|
|
|
@ -1231,3 +1231,76 @@ out:
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_property *
|
||||
drm_create_scaling_filter_prop(struct drm_device *dev,
|
||||
unsigned int supported_filters)
|
||||
{
|
||||
struct drm_property *prop;
|
||||
static const struct drm_prop_enum_list props[] = {
|
||||
{ DRM_SCALING_FILTER_DEFAULT, "Default" },
|
||||
{ DRM_SCALING_FILTER_NEAREST_NEIGHBOR, "Nearest Neighbor" },
|
||||
};
|
||||
unsigned int valid_mode_mask = BIT(DRM_SCALING_FILTER_DEFAULT) |
|
||||
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR);
|
||||
int i;
|
||||
|
||||
if (WARN_ON((supported_filters & ~valid_mode_mask) ||
|
||||
((supported_filters & BIT(DRM_SCALING_FILTER_DEFAULT)) == 0)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
|
||||
"SCALING_FILTER",
|
||||
hweight32(supported_filters));
|
||||
if (!prop)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(props); i++) {
|
||||
int ret;
|
||||
|
||||
if (!(BIT(props[i].type) & supported_filters))
|
||||
continue;
|
||||
|
||||
ret = drm_property_add_enum(prop, props[i].type,
|
||||
props[i].name);
|
||||
|
||||
if (ret) {
|
||||
drm_property_destroy(dev, prop);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return prop;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_plane_create_scaling_filter_property - create a new scaling filter
|
||||
* property
|
||||
*
|
||||
* @plane: drm plane
|
||||
* @supported_filters: bitmask of supported scaling filters, must include
|
||||
* BIT(DRM_SCALING_FILTER_DEFAULT).
|
||||
*
|
||||
* This function lets driver to enable the scaling filter property on a given
|
||||
* plane.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero for success or -errno
|
||||
*/
|
||||
int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
|
||||
unsigned int supported_filters)
|
||||
{
|
||||
struct drm_property *prop =
|
||||
drm_create_scaling_filter_prop(plane->dev, supported_filters);
|
||||
|
||||
if (IS_ERR(prop))
|
||||
return PTR_ERR(prop);
|
||||
|
||||
drm_object_attach_property(&plane->base, prop,
|
||||
DRM_SCALING_FILTER_DEFAULT);
|
||||
plane->scaling_filter_property = prop;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_plane_create_scaling_filter_property);
|
||||
|
|
|
@ -205,6 +205,32 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 tmp, mode_flags;
|
||||
enum port port;
|
||||
|
||||
mode_flags = crtc_state->mode_flags;
|
||||
|
||||
/*
|
||||
* case 1 also covers dual link
|
||||
* In case of dual link, frame update should be set on
|
||||
* DSI_0
|
||||
*/
|
||||
if (mode_flags & I915_MODE_FLAG_DSI_USE_TE0)
|
||||
port = PORT_A;
|
||||
else if (mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
|
||||
port = PORT_B;
|
||||
else
|
||||
return;
|
||||
|
||||
tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
|
||||
tmp |= DSI_FRAME_UPDATE_REQUEST;
|
||||
intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
|
||||
}
|
||||
|
||||
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
@ -429,7 +455,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
|
|||
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
|
||||
|
||||
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
|
||||
if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
|
||||
if (IS_JSL_EHL(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
|
||||
tmp = intel_de_read(dev_priv,
|
||||
ICL_PORT_PCS_DW1_AUX(phy));
|
||||
tmp &= ~LATENCY_OPTIM_MASK;
|
||||
|
@ -586,7 +612,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
|
|||
}
|
||||
}
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
if (IS_JSL_EHL(dev_priv)) {
|
||||
for_each_dsi_phy(phy, intel_dsi->phys) {
|
||||
tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
|
||||
tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
|
||||
|
@ -1447,6 +1473,18 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
|
|||
return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
|
||||
}
|
||||
|
||||
static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
|
||||
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1 |
|
||||
I915_MODE_FLAG_DSI_USE_TE0;
|
||||
else if (intel_dsi->ports == BIT(PORT_B))
|
||||
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1;
|
||||
else
|
||||
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE0;
|
||||
}
|
||||
|
||||
static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
|
@ -1468,6 +1506,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
|||
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
|
||||
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
|
||||
|
||||
/* Get the details on which TE should be enabled */
|
||||
if (is_cmd_mode(intel_dsi))
|
||||
gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config);
|
||||
|
||||
if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
|
||||
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
|
||||
}
|
||||
|
@ -1562,18 +1604,8 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
|||
* receive TE from the slave if
|
||||
* dual link is enabled
|
||||
*/
|
||||
if (is_cmd_mode(intel_dsi)) {
|
||||
if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
|
||||
pipe_config->mode_flags |=
|
||||
I915_MODE_FLAG_DSI_USE_TE1 |
|
||||
I915_MODE_FLAG_DSI_USE_TE0;
|
||||
else if (intel_dsi->ports == BIT(PORT_B))
|
||||
pipe_config->mode_flags |=
|
||||
I915_MODE_FLAG_DSI_USE_TE1;
|
||||
else
|
||||
pipe_config->mode_flags |=
|
||||
I915_MODE_FLAG_DSI_USE_TE0;
|
||||
}
|
||||
if (is_cmd_mode(intel_dsi))
|
||||
gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1636,6 +1668,19 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool gen11_dsi_initial_fastset_check(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (crtc_state->dsc.compression_enable) {
|
||||
drm_dbg_kms(encoder->base.dev, "Forcing full modeset due to DSC being enabled\n");
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
intel_encoder_destroy(encoder);
|
||||
|
@ -1891,6 +1936,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
|
|||
encoder->update_pipe = intel_panel_update_backlight;
|
||||
encoder->compute_config = gen11_dsi_compute_config;
|
||||
encoder->get_hw_state = gen11_dsi_get_hw_state;
|
||||
encoder->initial_fastset_check = gen11_dsi_initial_fastset_check;
|
||||
encoder->type = INTEL_OUTPUT_DSI;
|
||||
encoder->cloneable = 0;
|
||||
encoder->pipe_mask = ~0;
|
||||
|
|
|
@ -262,6 +262,7 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
|||
plane_state->hw.rotation = from_plane_state->uapi.rotation;
|
||||
plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
|
||||
plane_state->hw.color_range = from_plane_state->uapi.color_range;
|
||||
plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter;
|
||||
}
|
||||
|
||||
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
|
||||
|
@ -408,7 +409,11 @@ void intel_update_plane(struct intel_plane *plane,
|
|||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
||||
trace_intel_update_plane(&plane->base, crtc);
|
||||
plane->update_plane(plane, crtc_state, plane_state);
|
||||
|
||||
if (crtc_state->uapi.async_flip && plane->async_flip)
|
||||
plane->async_flip(plane, crtc_state, plane_state);
|
||||
else
|
||||
plane->update_plane(plane, crtc_state, plane_state);
|
||||
}
|
||||
|
||||
void intel_disable_plane(struct intel_plane *plane,
|
||||
|
|
|
@ -425,6 +425,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
|
|||
const struct bdb_lfp_backlight_data *backlight_data;
|
||||
const struct lfp_backlight_data_entry *entry;
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
u16 level;
|
||||
|
||||
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
|
||||
if (!backlight_data)
|
||||
|
@ -459,14 +460,39 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
|
|||
|
||||
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
|
||||
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
|
||||
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
|
||||
|
||||
if (bdb->version >= 234) {
|
||||
u16 min_level;
|
||||
bool scale;
|
||||
|
||||
level = backlight_data->brightness_level[panel_type].level;
|
||||
min_level = backlight_data->brightness_min_level[panel_type].level;
|
||||
|
||||
if (bdb->version >= 236)
|
||||
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
|
||||
else
|
||||
scale = level > 255;
|
||||
|
||||
if (scale)
|
||||
min_level = min_level / 255;
|
||||
|
||||
if (min_level > 255) {
|
||||
drm_warn(&dev_priv->drm, "Brightness min level > 255\n");
|
||||
level = 255;
|
||||
}
|
||||
dev_priv->vbt.backlight.min_brightness = min_level;
|
||||
} else {
|
||||
level = backlight_data->level[panel_type];
|
||||
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT backlight PWM modulation frequency %u Hz, "
|
||||
"active %s, min brightness %u, level %u, controller %u\n",
|
||||
dev_priv->vbt.backlight.pwm_freq_hz,
|
||||
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
|
||||
dev_priv->vbt.backlight.min_brightness,
|
||||
backlight_data->level[panel_type],
|
||||
level,
|
||||
dev_priv->vbt.backlight.controller);
|
||||
}
|
||||
|
||||
|
@ -1602,7 +1628,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
|
|||
const u8 *ddc_pin_map;
|
||||
int n_entries;
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
|
||||
return vbt_pin;
|
||||
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
|
||||
ddc_pin_map = icp_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
|
||||
} else if (HAS_PCH_CNP(dev_priv)) {
|
||||
|
@ -1660,20 +1688,18 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
|
|||
[PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
|
||||
};
|
||||
/*
|
||||
* Bspec lists the ports as A, B, C, D - however internally in our
|
||||
* driver we keep them as PORT_A, PORT_B, PORT_D and PORT_E so the
|
||||
* registers in Display Engine match the right offsets. Apply the
|
||||
* mapping here to translate from VBT to internal convention.
|
||||
* RKL VBT uses PHY based mapping. Combo PHYs A,B,C,D
|
||||
* map to DDI A,B,TC1,TC2 respectively.
|
||||
*/
|
||||
static const int rkl_port_mapping[][3] = {
|
||||
[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
|
||||
[PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
|
||||
[PORT_C] = { -1 },
|
||||
[PORT_D] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
|
||||
[PORT_E] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
|
||||
[PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
|
||||
[PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
|
||||
};
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
|
||||
ARRAY_SIZE(rkl_port_mapping[0]),
|
||||
rkl_port_mapping,
|
||||
|
@ -1889,7 +1915,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
|
|||
expected_size = 37;
|
||||
} else if (bdb->version <= 215) {
|
||||
expected_size = 38;
|
||||
} else if (bdb->version <= 229) {
|
||||
} else if (bdb->version <= 237) {
|
||||
expected_size = 39;
|
||||
} else {
|
||||
expected_size = sizeof(*child);
|
||||
|
@ -2638,10 +2664,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
|
|||
aux_ch = AUX_CH_B;
|
||||
break;
|
||||
case DP_AUX_C:
|
||||
aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_D : AUX_CH_C;
|
||||
/*
|
||||
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
|
||||
* map to DDI A,B,TC1,TC2 respectively.
|
||||
*/
|
||||
aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
|
||||
AUX_CH_USBC1 : AUX_CH_C;
|
||||
break;
|
||||
case DP_AUX_D:
|
||||
aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_E : AUX_CH_D;
|
||||
aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
|
||||
AUX_CH_USBC2 : AUX_CH_D;
|
||||
break;
|
||||
case DP_AUX_E:
|
||||
aux_ch = AUX_CH_E;
|
||||
|
|
|
@ -1233,6 +1233,30 @@ static const struct intel_cdclk_vals icl_cdclk_table[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct intel_cdclk_vals rkl_cdclk_table[] = {
|
||||
{ .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 },
|
||||
{ .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 },
|
||||
{ .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 },
|
||||
{ .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 },
|
||||
{ .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 },
|
||||
{ .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 },
|
||||
|
||||
{ .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 },
|
||||
{ .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 },
|
||||
{ .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 },
|
||||
{ .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 },
|
||||
{ .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 },
|
||||
{ .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 },
|
||||
|
||||
{ .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 },
|
||||
{ .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 },
|
||||
{ .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 },
|
||||
{ .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 },
|
||||
{ .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 },
|
||||
{ .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 },
|
||||
{}
|
||||
};
|
||||
|
||||
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
|
||||
{
|
||||
const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
|
||||
|
@ -2588,7 +2612,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
if (IS_JSL_EHL(dev_priv)) {
|
||||
if (dev_priv->cdclk.hw.ref == 24000)
|
||||
dev_priv->max_cdclk_freq = 552000;
|
||||
else
|
||||
|
@ -2680,6 +2704,18 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
|
|||
DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
|
||||
}
|
||||
|
||||
static int dg1_rawclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* DG1 always uses a 38.4 MHz rawclk. The bspec tells us
|
||||
* "Program Numerator=2, Denominator=4, Divider=37 decimal."
|
||||
*/
|
||||
I915_WRITE(PCH_RAWCLK_FREQ,
|
||||
CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
|
||||
|
||||
return 38400;
|
||||
}
|
||||
|
||||
static int cnp_rawclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 rawclk;
|
||||
|
@ -2788,7 +2824,9 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
u32 freq;
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
freq = dg1_rawclk(dev_priv);
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
|
||||
freq = cnp_rawclk(dev_priv);
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
freq = pch_rawclk(dev_priv);
|
||||
|
@ -2809,13 +2847,19 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
dev_priv->display.set_cdclk = bxt_set_cdclk;
|
||||
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
|
||||
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
|
||||
dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
|
||||
dev_priv->cdclk.table = rkl_cdclk_table;
|
||||
} else if (INTEL_GEN(dev_priv) >= 12) {
|
||||
dev_priv->display.set_cdclk = bxt_set_cdclk;
|
||||
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
|
||||
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
|
||||
dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
|
||||
dev_priv->cdclk.table = icl_cdclk_table;
|
||||
} else if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
} else if (IS_JSL_EHL(dev_priv)) {
|
||||
dev_priv->display.set_cdclk = bxt_set_cdclk;
|
||||
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
|
||||
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
|
||||
|
|
|
@ -638,10 +638,17 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
|
|||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
|
||||
switch (crtc_state->gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
ilk_load_lut_8(crtc, gamma_lut);
|
||||
else
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
ilk_load_lut_10(crtc, gamma_lut);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int ivb_lut_10_size(u32 prec_index)
|
||||
|
@ -745,21 +752,27 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
|
|||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
|
||||
const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
|
||||
ilk_load_lut_8(crtc, gamma_lut);
|
||||
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
|
||||
switch (crtc_state->gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
ilk_load_lut_8(crtc, blob);
|
||||
break;
|
||||
case GAMMA_MODE_MODE_SPLIT:
|
||||
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_ext_max(crtc_state);
|
||||
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(512));
|
||||
} else {
|
||||
const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
|
||||
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
ivb_load_lut_10(crtc, blob,
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_ext_max(crtc_state);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -768,21 +781,28 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
|
|||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
|
||||
const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
|
||||
ilk_load_lut_8(crtc, gamma_lut);
|
||||
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
|
||||
switch (crtc_state->gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
ilk_load_lut_8(crtc, blob);
|
||||
break;
|
||||
case GAMMA_MODE_MODE_SPLIT:
|
||||
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_ext_max(crtc_state);
|
||||
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(512));
|
||||
} else {
|
||||
const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
|
||||
bdw_load_lut_10(crtc, blob,
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_ext_max(crtc_state);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -818,12 +838,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
|
|||
* as compared to just 16 to achieve this.
|
||||
*/
|
||||
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
|
||||
lut[i].green);
|
||||
lut[i].green);
|
||||
}
|
||||
|
||||
/* Clamp values > 1.0. */
|
||||
while (i++ < 35)
|
||||
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
|
||||
|
||||
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
|
||||
|
@ -851,6 +873,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
|
|||
/* Clamp values > 1.0. */
|
||||
while (i++ < 35)
|
||||
intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
|
||||
|
||||
intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
|
@ -871,11 +895,17 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
|
|||
else
|
||||
glk_load_degamma_lut_linear(crtc_state);
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
|
||||
switch (crtc_state->gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
ilk_load_lut_8(crtc, gamma_lut);
|
||||
} else {
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_ext_max(crtc_state);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1007,9 +1037,13 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
|
|||
icl_program_gamma_superfine_segment(crtc_state);
|
||||
icl_program_gamma_multi_segment(crtc_state);
|
||||
break;
|
||||
default:
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_ext_max(crtc_state);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
|
||||
intel_dsb_commit(crtc_state);
|
||||
|
@ -1026,13 +1060,6 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
|
|||
return drm_color_lut_extract(color->red, 14);
|
||||
}
|
||||
|
||||
static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
|
||||
{
|
||||
entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
|
||||
entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
|
||||
entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
|
||||
}
|
||||
|
||||
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob)
|
||||
{
|
||||
|
@ -1060,6 +1087,13 @@ static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
|
|||
return drm_color_lut_extract(color->red, 10);
|
||||
}
|
||||
|
||||
static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
|
||||
{
|
||||
entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
|
||||
entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
|
||||
entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
|
||||
}
|
||||
|
||||
static void chv_load_cgm_gamma(struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob)
|
||||
{
|
||||
|
@ -1733,7 +1767,7 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
|
|||
break;
|
||||
default:
|
||||
MISSING_CASE(gamma_mode);
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -1913,23 +1947,34 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
|
|||
if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
|
||||
return;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
|
||||
switch (crtc_state->gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
|
||||
else
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
|
||||
/* On BDW+ the index auto increment mode actually works */
|
||||
static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
|
||||
u32 prec_index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
int i, hw_lut_size = ivb_lut_10_size(prec_index);
|
||||
int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
struct drm_property_blob *blob;
|
||||
struct drm_color_lut *lut;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, lut_size != hw_lut_size);
|
||||
|
||||
blob = drm_property_create_blob(&dev_priv->drm,
|
||||
sizeof(struct drm_color_lut) * hw_lut_size,
|
||||
sizeof(struct drm_color_lut) * lut_size,
|
||||
NULL);
|
||||
if (IS_ERR(blob))
|
||||
return NULL;
|
||||
|
@ -1939,7 +1984,7 @@ static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
|
|||
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
|
||||
prec_index | PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
for (i = 0; i < hw_lut_size; i++) {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
|
||||
|
||||
ilk_lut_10_pack(&lut[i], val);
|
||||
|
@ -1957,10 +2002,17 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
|
|||
if (!crtc_state->gamma_enable)
|
||||
return;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
|
||||
switch (crtc_state->gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
|
||||
else
|
||||
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_property_blob *
|
||||
|
@ -2012,11 +2064,15 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
|
|||
case GAMMA_MODE_MODE_8BIT:
|
||||
crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
|
||||
break;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
|
||||
break;
|
||||
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
|
||||
crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
|
||||
break;
|
||||
default:
|
||||
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -188,8 +188,9 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
|
|||
* PHY-B and may not even have instances of the register for the
|
||||
* other combo PHY's.
|
||||
*/
|
||||
if (IS_ELKHARTLAKE(i915) ||
|
||||
IS_ROCKETLAKE(i915))
|
||||
if (IS_JSL_EHL(i915) ||
|
||||
IS_ROCKETLAKE(i915) ||
|
||||
IS_DG1(i915))
|
||||
return phy < PHY_C;
|
||||
|
||||
return true;
|
||||
|
@ -242,14 +243,14 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
|
|||
*
|
||||
* ICL,TGL:
|
||||
* A(master) -> B(slave), C(slave)
|
||||
* RKL:
|
||||
* RKL,DG1:
|
||||
* A(master) -> B(slave)
|
||||
* C(master) -> D(slave)
|
||||
*
|
||||
* We must set the IREFGEN bit for any PHY acting as a master
|
||||
* to another PHY.
|
||||
*/
|
||||
if (IS_ROCKETLAKE(dev_priv) && phy == PHY_C)
|
||||
if ((IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) && phy == PHY_C)
|
||||
return true;
|
||||
|
||||
return phy == PHY_A;
|
||||
|
@ -282,7 +283,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
|
|||
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
|
||||
IREFGEN, IREFGEN);
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
if (IS_JSL_EHL(dev_priv)) {
|
||||
if (ehl_vbt_ddi_d_present(dev_priv))
|
||||
expected_val = ICL_PHY_MISC_MUX_DDID;
|
||||
|
||||
|
@ -376,7 +377,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
|
|||
* "internal" child devices.
|
||||
*/
|
||||
val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
|
||||
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) {
|
||||
if (IS_JSL_EHL(dev_priv) && phy == PHY_A) {
|
||||
val &= ~ICL_PHY_MISC_MUX_DDID;
|
||||
|
||||
if (ehl_vbt_ddi_d_present(dev_priv))
|
||||
|
|
|
@ -40,13 +40,16 @@
|
|||
|
||||
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
|
||||
|
||||
#define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin"
|
||||
#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
|
||||
MODULE_FIRMWARE(DG1_CSR_PATH);
|
||||
|
||||
#define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin"
|
||||
#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
|
||||
MODULE_FIRMWARE(RKL_CSR_PATH);
|
||||
|
||||
#define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin"
|
||||
#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8)
|
||||
#define TGL_CSR_MAX_FW_SIZE 0x6000
|
||||
MODULE_FIRMWARE(TGL_CSR_PATH);
|
||||
|
||||
#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
|
||||
|
@ -686,14 +689,17 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
intel_csr_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
if (IS_DG1(dev_priv)) {
|
||||
csr->fw_path = DG1_CSR_PATH;
|
||||
csr->required_version = DG1_CSR_VERSION_REQUIRED;
|
||||
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
csr->fw_path = RKL_CSR_PATH;
|
||||
csr->required_version = RKL_CSR_VERSION_REQUIRED;
|
||||
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
|
||||
} else if (INTEL_GEN(dev_priv) >= 12) {
|
||||
csr->fw_path = TGL_CSR_PATH;
|
||||
csr->required_version = TGL_CSR_VERSION_REQUIRED;
|
||||
/* Allow to load fw via parameter using the last known size */
|
||||
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
|
||||
} else if (IS_GEN(dev_priv, 11)) {
|
||||
csr->fw_path = ICL_CSR_PATH;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7,6 +7,7 @@
|
|||
#define __INTEL_DDI_H__
|
||||
|
||||
#include "intel_display.h"
|
||||
#include "i915_reg.h"
|
||||
|
||||
struct drm_connector_state;
|
||||
struct drm_i915_private;
|
||||
|
@ -18,6 +19,10 @@ struct intel_dpll_hw_state;
|
|||
struct intel_encoder;
|
||||
enum transcoder;
|
||||
|
||||
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
|
@ -41,8 +46,10 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
|
|||
bool state);
|
||||
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
u32 bxt_signal_levels(struct intel_dp *intel_dp);
|
||||
u32 ddi_signal_levels(struct intel_dp *intel_dp);
|
||||
u32 bxt_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
u32 ddi_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
|
||||
enum transcoder cpu_transcoder,
|
||||
bool enable);
|
||||
|
|
|
@ -154,7 +154,7 @@ static void ilk_pch_clock_get(struct intel_crtc *crtc,
|
|||
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
|
||||
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
|
||||
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
|
||||
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_link_m_n *m_n,
|
||||
|
@ -1808,6 +1808,17 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
|
|||
static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
u32 mode_flags = crtc->mode_flags;
|
||||
|
||||
/*
|
||||
* From Gen 11, In case of dsi cmd mode, frame counter wouldnt
|
||||
* have updated at the beginning of TE, if we want to use
|
||||
* the hw counter, then we would find it updated in only
|
||||
* the next TE, hence switching to sw counter.
|
||||
*/
|
||||
if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* On i965gm the hardware frame counter reads
|
||||
|
@ -1990,13 +2001,17 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
|
|||
return ccs_plane - fb->format->num_planes / 2;
|
||||
}
|
||||
|
||||
/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
|
||||
int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(fb->dev);
|
||||
|
||||
if (is_ccs_modifier(fb->modifier))
|
||||
return main_to_ccs_plane(fb, main_plane);
|
||||
|
||||
return 1;
|
||||
else if (INTEL_GEN(i915) < 11 &&
|
||||
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -3930,7 +3945,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
|
|||
* main surface offset, and it must be non-negative. Make
|
||||
* sure that is what we will get.
|
||||
*/
|
||||
if (offset > aux_offset)
|
||||
if (aux_plane && offset > aux_offset)
|
||||
offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
|
||||
offset, aux_offset & ~(alignment - 1));
|
||||
|
||||
|
@ -4019,8 +4034,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
|
|||
|
||||
if (is_ccs_modifier(fb->modifier)) {
|
||||
int ccs_plane = main_to_ccs_plane(fb, uv_plane);
|
||||
int aux_offset = plane_state->color_plane[ccs_plane].offset;
|
||||
int alignment = intel_surf_alignment(fb, uv_plane);
|
||||
u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
|
||||
u32 alignment = intel_surf_alignment(fb, uv_plane);
|
||||
|
||||
if (offset > aux_offset)
|
||||
offset = intel_plane_adjust_aligned_offset(&x, &y,
|
||||
|
@ -4128,7 +4143,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
|
|||
}
|
||||
|
||||
for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
|
||||
plane_state->color_plane[i].offset = ~0xfff;
|
||||
plane_state->color_plane[i].offset = 0;
|
||||
plane_state->color_plane[i].x = 0;
|
||||
plane_state->color_plane[i].y = 0;
|
||||
}
|
||||
|
@ -4786,6 +4801,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
|
|||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
u32 plane_ctl = 0;
|
||||
|
||||
if (crtc_state->uapi.async_flip)
|
||||
plane_ctl |= PLANE_CTL_ASYNC_FLIP;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
return plane_ctl;
|
||||
|
||||
|
@ -5023,18 +5041,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
|||
intel_pps_unlock_regs_wa(dev_priv);
|
||||
intel_modeset_init_hw(dev_priv);
|
||||
intel_init_clock_gating(dev_priv);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
ret = __intel_display_resume(dev, state, ctx);
|
||||
if (ret)
|
||||
drm_err(&dev_priv->drm,
|
||||
"Restoring old state failed with %i\n", ret);
|
||||
|
||||
intel_hpd_init(dev_priv);
|
||||
intel_hpd_poll_disable(dev_priv);
|
||||
}
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
|
@ -6275,6 +6289,105 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
|
|||
skl_detach_scaler(crtc, i);
|
||||
}
|
||||
|
||||
static int cnl_coef_tap(int i)
|
||||
{
|
||||
return i % 7;
|
||||
}
|
||||
|
||||
static u16 cnl_nearest_filter_coef(int t)
|
||||
{
|
||||
return t == 3 ? 0x0800 : 0x3000;
|
||||
}
|
||||
|
||||
/*
|
||||
* Theory behind setting nearest-neighbor integer scaling:
|
||||
*
|
||||
* 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
|
||||
* The letter represents the filter tap (D is the center tap) and the number
|
||||
* represents the coefficient set for a phase (0-16).
|
||||
*
|
||||
* +------------+------------------------+------------------------+
|
||||
* |Index value | Data value coeffient 1 | Data value coeffient 2 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 00h | B0 | A0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 01h | D0 | C0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 02h | F0 | E0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 03h | A1 | G0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 04h | C1 | B1 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | ... | ... | ... |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 38h | B16 | A16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 39h | D16 | C16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 3Ah | F16 | C16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 3Bh | Reserved | G16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
*
|
||||
* To enable nearest-neighbor scaling: program scaler coefficents with
|
||||
* the center tap (Dxx) values set to 1 and all other values set to 0 as per
|
||||
* SCALER_COEFFICIENT_FORMAT
|
||||
*
|
||||
*/
|
||||
|
||||
static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, int id, int set)
|
||||
{
|
||||
int i;
|
||||
|
||||
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
|
||||
PS_COEE_INDEX_AUTO_INC);
|
||||
|
||||
for (i = 0; i < 17 * 7; i += 2) {
|
||||
u32 tmp;
|
||||
int t;
|
||||
|
||||
t = cnl_coef_tap(i);
|
||||
tmp = cnl_nearest_filter_coef(t);
|
||||
|
||||
t = cnl_coef_tap(i + 1);
|
||||
tmp |= cnl_nearest_filter_coef(t) << 16;
|
||||
|
||||
intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
|
||||
tmp);
|
||||
}
|
||||
|
||||
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
|
||||
}
|
||||
|
||||
inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
|
||||
{
|
||||
if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
|
||||
return (PS_FILTER_PROGRAMMED |
|
||||
PS_Y_VERT_FILTER_SELECT(set) |
|
||||
PS_Y_HORZ_FILTER_SELECT(set) |
|
||||
PS_UV_VERT_FILTER_SELECT(set) |
|
||||
PS_UV_HORZ_FILTER_SELECT(set));
|
||||
}
|
||||
|
||||
return PS_FILTER_MEDIUM;
|
||||
}
|
||||
|
||||
void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
int id, int set, enum drm_scaling_filter filter)
|
||||
{
|
||||
switch (filter) {
|
||||
case DRM_SCALING_FILTER_DEFAULT:
|
||||
break;
|
||||
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
|
||||
cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(filter);
|
||||
}
|
||||
}
|
||||
|
||||
static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
@ -6295,6 +6408,7 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
|
|||
int hscale, vscale;
|
||||
unsigned long irqflags;
|
||||
int id;
|
||||
u32 ps_ctrl;
|
||||
|
||||
if (!crtc_state->pch_pfit.enabled)
|
||||
return;
|
||||
|
@ -6311,10 +6425,16 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
|
|||
|
||||
id = scaler_state->scaler_id;
|
||||
|
||||
ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
|
||||
ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
|
||||
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
|
||||
skl_scaler_setup_filter(dev_priv, pipe, id, 0,
|
||||
crtc_state->hw.scaling_filter);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
|
||||
|
@ -6560,6 +6680,43 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
|
|||
icl_wa_scalerclkgating(dev_priv, pipe, false);
|
||||
}
|
||||
|
||||
static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_plane *plane;
|
||||
struct intel_plane_state *new_plane_state;
|
||||
int i;
|
||||
|
||||
for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
|
||||
u32 update_mask = new_crtc_state->update_planes;
|
||||
u32 plane_ctl, surf_addr;
|
||||
enum plane_id plane_id;
|
||||
unsigned long irqflags;
|
||||
enum pipe pipe;
|
||||
|
||||
if (crtc->pipe != plane->pipe ||
|
||||
!(update_mask & BIT(plane->id)))
|
||||
continue;
|
||||
|
||||
plane_id = plane->id;
|
||||
pipe = plane->pipe;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
|
||||
surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
|
||||
|
||||
plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
|
||||
|
||||
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
|
||||
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
}
|
||||
|
||||
static void intel_pre_plane_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
|
@ -6645,6 +6802,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
|
|||
*/
|
||||
if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
/*
|
||||
* WA for platforms where async address update enable bit
|
||||
* is double buffered and only latched at start of vblank.
|
||||
*/
|
||||
if (old_crtc_state->uapi.async_flip &&
|
||||
!new_crtc_state->uapi.async_flip &&
|
||||
IS_GEN_RANGE(dev_priv, 9, 10))
|
||||
skl_disable_async_flip_wa(state, crtc, new_crtc_state);
|
||||
}
|
||||
|
||||
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
|
||||
|
@ -6944,7 +7110,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
|
|||
if (intel_crtc_has_dp_encoder(new_crtc_state))
|
||||
intel_dp_set_m_n(new_crtc_state, M1_N1);
|
||||
|
||||
intel_set_pipe_timings(new_crtc_state);
|
||||
intel_set_transcoder_timings(new_crtc_state);
|
||||
intel_set_pipe_src_size(new_crtc_state);
|
||||
|
||||
if (new_crtc_state->has_pch_encoder)
|
||||
|
@ -7089,7 +7255,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
|
|||
intel_encoders_pre_enable(state, crtc);
|
||||
|
||||
if (!transcoder_is_dsi(cpu_transcoder))
|
||||
intel_set_pipe_timings(new_crtc_state);
|
||||
intel_set_transcoder_timings(new_crtc_state);
|
||||
|
||||
intel_set_pipe_src_size(new_crtc_state);
|
||||
|
||||
|
@ -7275,7 +7441,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
|
|||
return false;
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
return phy <= PHY_D;
|
||||
else if (IS_ELKHARTLAKE(dev_priv))
|
||||
else if (IS_JSL_EHL(dev_priv))
|
||||
return phy <= PHY_C;
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
return phy <= PHY_B;
|
||||
|
@ -7289,7 +7455,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
|
|||
return false;
|
||||
else if (INTEL_GEN(dev_priv) >= 12)
|
||||
return phy >= PHY_D && phy <= PHY_I;
|
||||
else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
|
||||
else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv))
|
||||
return phy >= PHY_C && phy <= PHY_F;
|
||||
else
|
||||
return false;
|
||||
|
@ -7297,23 +7463,23 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
|
|||
|
||||
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
|
||||
{
|
||||
if (IS_ROCKETLAKE(i915) && port >= PORT_D)
|
||||
return (enum phy)port - 1;
|
||||
else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
|
||||
if (IS_ROCKETLAKE(i915) && port >= PORT_TC1)
|
||||
return PHY_C + port - PORT_TC1;
|
||||
else if (IS_JSL_EHL(i915) && port == PORT_D)
|
||||
return PHY_A;
|
||||
|
||||
return (enum phy)port;
|
||||
return PHY_A + port - PORT_A;
|
||||
}
|
||||
|
||||
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
|
||||
return PORT_TC_NONE;
|
||||
return TC_PORT_NONE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
return port - PORT_D;
|
||||
|
||||
return port - PORT_C;
|
||||
return TC_PORT_1 + port - PORT_TC1;
|
||||
else
|
||||
return TC_PORT_1 + port - PORT_C;
|
||||
}
|
||||
|
||||
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
|
||||
|
@ -7484,7 +7650,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
|
|||
if (intel_crtc_has_dp_encoder(new_crtc_state))
|
||||
intel_dp_set_m_n(new_crtc_state, M1_N1);
|
||||
|
||||
intel_set_pipe_timings(new_crtc_state);
|
||||
intel_set_transcoder_timings(new_crtc_state);
|
||||
intel_set_pipe_src_size(new_crtc_state);
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
|
||||
|
@ -7552,7 +7718,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
|
|||
if (intel_crtc_has_dp_encoder(new_crtc_state))
|
||||
intel_dp_set_m_n(new_crtc_state, M1_N1);
|
||||
|
||||
intel_set_pipe_timings(new_crtc_state);
|
||||
intel_set_transcoder_timings(new_crtc_state);
|
||||
intel_set_pipe_src_size(new_crtc_state);
|
||||
|
||||
i9xx_set_pipeconf(new_crtc_state);
|
||||
|
@ -8806,7 +8972,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
|
|||
crtc_state->dpll_hw_state.dpll = dpll;
|
||||
}
|
||||
|
||||
static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
|
||||
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
@ -8892,8 +9058,8 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
|
|||
return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
|
||||
}
|
||||
|
||||
static void intel_get_pipe_timings(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
@ -9516,7 +9682,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
|||
if (INTEL_GEN(dev_priv) < 4)
|
||||
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
|
||||
|
||||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
intel_get_transcoder_timings(crtc, pipe_config);
|
||||
intel_get_pipe_src_size(crtc, pipe_config);
|
||||
|
||||
i9xx_get_pfit_config(pipe_config);
|
||||
|
@ -10801,7 +10967,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
|
|||
pipe_config->pixel_multiplier = 1;
|
||||
}
|
||||
|
||||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
intel_get_transcoder_timings(crtc, pipe_config);
|
||||
intel_get_pipe_src_size(crtc, pipe_config);
|
||||
|
||||
ilk_get_pfit_config(pipe_config);
|
||||
|
@ -11218,7 +11384,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
|||
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
|
||||
INTEL_GEN(dev_priv) >= 11) {
|
||||
hsw_get_ddi_port_state(crtc, pipe_config);
|
||||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
intel_get_transcoder_timings(crtc, pipe_config);
|
||||
}
|
||||
|
||||
intel_get_pipe_src_size(crtc, pipe_config);
|
||||
|
@ -11234,18 +11400,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
|||
} else {
|
||||
pipe_config->output_format =
|
||||
bdw_get_pipemisc_output_format(crtc);
|
||||
|
||||
/*
|
||||
* Currently there is no interface defined to
|
||||
* check user preference between RGB/YCBCR444
|
||||
* or YCBCR420. So the only possible case for
|
||||
* YCBCR444 usage is driving YCBCR420 output
|
||||
* with LSPCON, when pipe is configured for
|
||||
* YCBCR444 output and LSPCON takes care of
|
||||
* downsampling it.
|
||||
*/
|
||||
pipe_config->lspcon_downsampling =
|
||||
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
|
||||
}
|
||||
|
||||
pipe_config->gamma_mode = intel_de_read(dev_priv,
|
||||
|
@ -11817,6 +11971,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
|
|||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skl_write_cursor_wm(plane, crtc_state);
|
||||
|
||||
if (!needs_modeset(crtc_state))
|
||||
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
|
||||
|
||||
if (plane->cursor.base != base ||
|
||||
plane->cursor.size != fbc_ctl ||
|
||||
plane->cursor.cntl != cntl) {
|
||||
|
@ -12828,8 +12985,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
|
|||
|
||||
}
|
||||
|
||||
if (!mode_changed)
|
||||
intel_psr2_sel_fetch_update(state, crtc);
|
||||
if (!mode_changed) {
|
||||
ret = intel_psr2_sel_fetch_update(state, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -13102,6 +13262,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
|
|||
transcoder_name(pipe_config->cpu_transcoder),
|
||||
pipe_config->pipe_bpp, pipe_config->dither);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
|
||||
transcoder_name(pipe_config->mst_master_transcoder));
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
|
||||
transcoder_name(pipe_config->master_transcoder),
|
||||
|
@ -13199,8 +13362,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
|
|||
pipe_config->csc_mode, pipe_config->gamma_mode,
|
||||
pipe_config->gamma_enable, pipe_config->csc_enable);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
|
||||
transcoder_name(pipe_config->mst_master_transcoder));
|
||||
drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
|
||||
pipe_config->hw.degamma_lut ?
|
||||
drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
|
||||
pipe_config->hw.gamma_lut ?
|
||||
drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
|
||||
|
||||
dump_planes:
|
||||
if (!state)
|
||||
|
@ -13294,6 +13460,7 @@ intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
|
|||
crtc_state->hw.active = crtc_state->uapi.active;
|
||||
crtc_state->hw.mode = crtc_state->uapi.mode;
|
||||
crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
|
||||
crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
|
||||
intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
|
||||
}
|
||||
|
||||
|
@ -13305,6 +13472,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
|
|||
drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
|
||||
|
||||
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
|
||||
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
|
||||
|
||||
/* copy color blobs to uapi */
|
||||
drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
|
||||
|
@ -14852,8 +15020,10 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
|
|||
int i;
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
int ret = intel_crtc_atomic_check(state, crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
int ret;
|
||||
|
||||
ret = intel_crtc_atomic_check(state, crtc);
|
||||
if (ret) {
|
||||
drm_dbg_atomic(&i915->drm,
|
||||
"[CRTC:%d:%s] atomic driver check failed\n",
|
||||
|
@ -14882,6 +15052,139 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: asynchronous flip implementation
|
||||
*
|
||||
* Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
|
||||
* flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
|
||||
* Correspondingly, support is currently added for primary plane only.
|
||||
*
|
||||
* Async flip can only change the plane surface address, so anything else
|
||||
* changing is rejected from the intel_atomic_check_async() function.
|
||||
* Once this check is cleared, flip done interrupt is enabled using
|
||||
* the skl_enable_flip_done() function.
|
||||
*
|
||||
* As soon as the surface address register is written, flip done interrupt is
|
||||
* generated and the requested events are sent to the usersapce in the interrupt
|
||||
* handler itself. The timestamp and sequence sent during the flip done event
|
||||
* correspond to the last vblank and have no relation to the actual time when
|
||||
* the flip done event was sent.
|
||||
*/
|
||||
static int intel_atomic_check_async(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
const struct intel_plane_state *new_plane_state, *old_plane_state;
|
||||
struct intel_crtc *crtc;
|
||||
struct intel_plane *plane;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
if (needs_modeset(new_crtc_state)) {
|
||||
drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!new_crtc_state->hw.active) {
|
||||
drm_dbg_kms(&i915->drm, "CRTC inactive\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Active planes cannot be changed during async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
||||
new_plane_state, i) {
|
||||
/*
|
||||
* TODO: Async flip is only supported through the page flip IOCTL
|
||||
* as of now. So support currently added for primary plane only.
|
||||
* Support for other planes on platforms on which supports
|
||||
* this(vlv/chv and icl+) should be added when async flip is
|
||||
* enabled in the atomic IOCTL path.
|
||||
*/
|
||||
if (plane->id != PLANE_PRIMARY)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* FIXME: This check is kept generic for all platforms.
|
||||
* Need to verify this for all gen9 and gen10 platforms to enable
|
||||
* this selectively if required.
|
||||
*/
|
||||
switch (new_plane_state->hw.fb->modifier) {
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
case I915_FORMAT_MOD_Yf_TILED:
|
||||
break;
|
||||
default:
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Linear memory/CCS does not support async flips\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->color_plane[0].stride !=
|
||||
new_plane_state->color_plane[0].stride) {
|
||||
drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.fb->modifier !=
|
||||
new_plane_state->hw.fb->modifier) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Framebuffer modifiers cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.fb->format !=
|
||||
new_plane_state->hw.fb->format) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Framebuffer format cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.rotation !=
|
||||
new_plane_state->hw.rotation) {
|
||||
drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
|
||||
!drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Plane size/co-ordinates cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
|
||||
drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.pixel_blend_mode !=
|
||||
new_plane_state->hw.pixel_blend_mode) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Pixel blend mode cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Color encoding cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
|
||||
drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_atomic_check - validate state object
|
||||
* @dev: drm device
|
||||
|
@ -15050,6 +15353,12 @@ static int intel_atomic_check(struct drm_device *dev,
|
|||
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
if (new_crtc_state->uapi.async_flip) {
|
||||
ret = intel_atomic_check_async(state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->update_pipe)
|
||||
continue;
|
||||
|
@ -15615,6 +15924,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
|||
|
||||
intel_dbuf_pre_plane_update(state);
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
if (new_crtc_state->uapi.async_flip)
|
||||
skl_enable_flip_done(crtc);
|
||||
}
|
||||
|
||||
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
|
||||
dev_priv->display.commit_modeset_enables(state);
|
||||
|
||||
|
@ -15636,6 +15950,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
|||
drm_atomic_helper_wait_for_flip_done(dev, &state->base);
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
if (new_crtc_state->uapi.async_flip)
|
||||
skl_disable_flip_done(crtc);
|
||||
|
||||
if (new_crtc_state->hw.active &&
|
||||
!needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->preload_luts &&
|
||||
|
@ -16750,6 +17067,11 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
drm_crtc_create_scaling_filter_property(&crtc->base,
|
||||
BIT(DRM_SCALING_FILTER_DEFAULT) |
|
||||
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
|
||||
|
||||
intel_color_init(crtc);
|
||||
|
||||
intel_crtc_crc_init(crtc);
|
||||
|
@ -16894,19 +17216,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
|
|||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
intel_ddi_init(dev_priv, PORT_A);
|
||||
intel_ddi_init(dev_priv, PORT_B);
|
||||
intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */
|
||||
intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */
|
||||
intel_ddi_init(dev_priv, PORT_TC1);
|
||||
intel_ddi_init(dev_priv, PORT_TC2);
|
||||
} else if (INTEL_GEN(dev_priv) >= 12) {
|
||||
intel_ddi_init(dev_priv, PORT_A);
|
||||
intel_ddi_init(dev_priv, PORT_B);
|
||||
intel_ddi_init(dev_priv, PORT_D);
|
||||
intel_ddi_init(dev_priv, PORT_E);
|
||||
intel_ddi_init(dev_priv, PORT_F);
|
||||
intel_ddi_init(dev_priv, PORT_G);
|
||||
intel_ddi_init(dev_priv, PORT_H);
|
||||
intel_ddi_init(dev_priv, PORT_I);
|
||||
intel_ddi_init(dev_priv, PORT_TC1);
|
||||
intel_ddi_init(dev_priv, PORT_TC2);
|
||||
intel_ddi_init(dev_priv, PORT_TC2);
|
||||
intel_ddi_init(dev_priv, PORT_TC4);
|
||||
intel_ddi_init(dev_priv, PORT_TC5);
|
||||
intel_ddi_init(dev_priv, PORT_TC6);
|
||||
icl_dsi_init(dev_priv);
|
||||
} else if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
} else if (IS_JSL_EHL(dev_priv)) {
|
||||
intel_ddi_init(dev_priv, PORT_A);
|
||||
intel_ddi_init(dev_priv, PORT_B);
|
||||
intel_ddi_init(dev_priv, PORT_C);
|
||||
|
@ -17741,6 +18063,8 @@ retry:
|
|||
}
|
||||
|
||||
if (crtc_state->hw.active) {
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/*
|
||||
* We've not yet detected sink capabilities
|
||||
* (audio,infoframes,etc.) and thus we don't want to
|
||||
|
@ -17762,22 +18086,15 @@ retry:
|
|||
*/
|
||||
crtc_state->uapi.color_mgmt_changed = true;
|
||||
|
||||
/*
|
||||
* FIXME hack to force full modeset when DSC is being
|
||||
* used.
|
||||
*
|
||||
* As long as we do not have full state readout and
|
||||
* config comparison of crtc_state->dsc, we have no way
|
||||
* to ensure reliable fastset. Remove once we have
|
||||
* readout for DSC.
|
||||
*/
|
||||
if (crtc_state->dsc.compression_enable) {
|
||||
ret = drm_atomic_add_affected_connectors(state,
|
||||
&crtc->base);
|
||||
if (ret)
|
||||
goto out;
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
drm_dbg_kms(dev, "Force full modeset for DSC\n");
|
||||
for_each_intel_encoder_mask(dev, encoder,
|
||||
crtc_state->uapi.encoder_mask) {
|
||||
if (encoder->initial_fastset_check &&
|
||||
!encoder->initial_fastset_check(encoder, crtc_state)) {
|
||||
ret = drm_atomic_add_affected_connectors(state,
|
||||
&crtc->base);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17816,6 +18133,9 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
|
|||
|
||||
mode_config->funcs = &intel_mode_funcs;
|
||||
|
||||
if (INTEL_GEN(i915) >= 9)
|
||||
mode_config->async_page_flip = true;
|
||||
|
||||
/*
|
||||
* Maximum framebuffer dimensions, chosen to match
|
||||
* the maximum render engine surface size on gen4+.
|
||||
|
@ -18049,6 +18369,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
|
|||
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
intel_hpd_init(i915);
|
||||
intel_hpd_poll_disable(i915);
|
||||
|
||||
intel_init_ipc(i915);
|
||||
|
||||
|
@ -18515,6 +18836,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
|||
|
||||
encoder->base.crtc = &crtc->base;
|
||||
encoder->get_config(encoder, crtc_state);
|
||||
if (encoder->sync_state)
|
||||
encoder->sync_state(encoder, crtc_state);
|
||||
} else {
|
||||
encoder->base.crtc = NULL;
|
||||
}
|
||||
|
@ -18685,6 +19008,15 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
|
|||
intel_de_write(dev_priv, CHICKEN_PAR1_1,
|
||||
intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
|
||||
}
|
||||
|
||||
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
|
||||
/* Display WA #1142:kbl,cfl,cml */
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
|
||||
KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
|
||||
intel_de_rmw(dev_priv, CHICKEN_MISC_2,
|
||||
KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
|
||||
KBL_ARB_FILL_SPARE_14);
|
||||
}
|
||||
}
|
||||
|
||||
static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <drm/drm_util.h>
|
||||
|
||||
enum link_m_n_set;
|
||||
enum drm_scaling_filter;
|
||||
struct dpll;
|
||||
struct drm_connector;
|
||||
struct drm_device;
|
||||
|
@ -207,6 +208,14 @@ enum port {
|
|||
PORT_H,
|
||||
PORT_I,
|
||||
|
||||
/* tgl+ */
|
||||
PORT_TC1 = PORT_D,
|
||||
PORT_TC2,
|
||||
PORT_TC3,
|
||||
PORT_TC4,
|
||||
PORT_TC5,
|
||||
PORT_TC6,
|
||||
|
||||
I915_MAX_PORTS
|
||||
};
|
||||
|
||||
|
@ -243,14 +252,14 @@ static inline const char *port_identifier(enum port port)
|
|||
}
|
||||
|
||||
enum tc_port {
|
||||
PORT_TC_NONE = -1,
|
||||
TC_PORT_NONE = -1,
|
||||
|
||||
PORT_TC1 = 0,
|
||||
PORT_TC2,
|
||||
PORT_TC3,
|
||||
PORT_TC4,
|
||||
PORT_TC5,
|
||||
PORT_TC6,
|
||||
TC_PORT_1 = 0,
|
||||
TC_PORT_2,
|
||||
TC_PORT_3,
|
||||
TC_PORT_4,
|
||||
TC_PORT_5,
|
||||
TC_PORT_6,
|
||||
|
||||
I915_MAX_TC_PORTS
|
||||
};
|
||||
|
@ -282,6 +291,14 @@ enum aux_ch {
|
|||
AUX_CH_G,
|
||||
AUX_CH_H,
|
||||
AUX_CH_I,
|
||||
|
||||
/* tgl+ */
|
||||
AUX_CH_USBC1 = AUX_CH_D,
|
||||
AUX_CH_USBC2,
|
||||
AUX_CH_USBC3,
|
||||
AUX_CH_USBC4,
|
||||
AUX_CH_USBC5,
|
||||
AUX_CH_USBC6,
|
||||
};
|
||||
|
||||
#define aux_ch_name(a) ((a) + 'A')
|
||||
|
@ -599,6 +616,9 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
|||
|
||||
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set);
|
||||
void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
int id, int set, enum drm_scaling_filter filter);
|
||||
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
|
|
|
@ -518,8 +518,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
|
|||
CSR_VERSION_MINOR(csr->version));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
|
||||
dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
|
||||
if (IS_DGFX(dev_priv)) {
|
||||
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
|
||||
} else {
|
||||
dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
|
||||
dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: DMC_DEBUG3 is a general purpose reg.
|
||||
* According to B.Specs:49196 DMC f/w reuses DC5/6 counter
|
||||
|
|
|
@ -1424,6 +1424,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
|||
return;
|
||||
|
||||
intel_hpd_init(dev_priv);
|
||||
intel_hpd_poll_disable(dev_priv);
|
||||
|
||||
/* Re-enable the ADPA, if we have one */
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
|
@ -1449,7 +1450,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
|||
|
||||
/* Prevent us from re-enabling polling on accident in late suspend */
|
||||
if (!dev_priv->drm.dev->power.is_suspended)
|
||||
intel_hpd_poll_init(dev_priv);
|
||||
intel_hpd_poll_enable(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
@ -3650,7 +3651,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
|
|||
.name = "DDI F IO power well",
|
||||
.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
.id = CNL_DISP_PW_DDI_F_IO,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
|
||||
|
@ -3660,7 +3661,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
|
|||
.name = "AUX F",
|
||||
.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
.id = CNL_DISP_PW_DDI_F_AUX,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
|
||||
|
@ -4150,7 +4151,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
|||
.name = "TC cold off",
|
||||
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
|
||||
.ops = &tgl_tc_cold_off_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
.id = TGL_DISP_PW_TC_COLD_OFF,
|
||||
},
|
||||
{
|
||||
.name = "AUX A",
|
||||
|
@ -4492,7 +4493,10 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
|||
int max_dc;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
max_dc = 4;
|
||||
if (IS_DG1(dev_priv))
|
||||
max_dc = 3;
|
||||
else
|
||||
max_dc = 4;
|
||||
/*
|
||||
* DC9 has a separate HW flow from the rest of the DC states,
|
||||
* not depending on the DMC firmware. It's needed by system
|
||||
|
@ -4554,13 +4558,18 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
|||
static int
|
||||
__set_power_wells(struct i915_power_domains *power_domains,
|
||||
const struct i915_power_well_desc *power_well_descs,
|
||||
int power_well_count)
|
||||
int power_well_descs_sz, u64 skip_mask)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(power_domains,
|
||||
struct drm_i915_private,
|
||||
power_domains);
|
||||
u64 power_well_ids = 0;
|
||||
int i;
|
||||
int power_well_count = 0;
|
||||
int i, plt_idx = 0;
|
||||
|
||||
for (i = 0; i < power_well_descs_sz; i++)
|
||||
if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
|
||||
power_well_count++;
|
||||
|
||||
power_domains->power_well_count = power_well_count;
|
||||
power_domains->power_wells =
|
||||
|
@ -4570,10 +4579,14 @@ __set_power_wells(struct i915_power_domains *power_domains,
|
|||
if (!power_domains->power_wells)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < power_well_count; i++) {
|
||||
for (i = 0; i < power_well_descs_sz; i++) {
|
||||
enum i915_power_well_id id = power_well_descs[i].id;
|
||||
|
||||
power_domains->power_wells[i].desc = &power_well_descs[i];
|
||||
if (BIT_ULL(id) & skip_mask)
|
||||
continue;
|
||||
|
||||
power_domains->power_wells[plt_idx++].desc =
|
||||
&power_well_descs[i];
|
||||
|
||||
if (id == DISP_PW_ID_NONE)
|
||||
continue;
|
||||
|
@ -4586,9 +4599,12 @@ __set_power_wells(struct i915_power_domains *power_domains,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define set_power_wells(power_domains, __power_well_descs) \
|
||||
#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
|
||||
__set_power_wells(power_domains, __power_well_descs, \
|
||||
ARRAY_SIZE(__power_well_descs))
|
||||
ARRAY_SIZE(__power_well_descs), skip_mask)
|
||||
|
||||
#define set_power_wells(power_domains, __power_well_descs) \
|
||||
set_power_wells_mask(power_domains, __power_well_descs, 0)
|
||||
|
||||
/**
|
||||
* intel_power_domains_init - initializes the power domain structures
|
||||
|
@ -4622,23 +4638,21 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
|||
* The enabling order will be from lower to higher indexed wells,
|
||||
* the disabling order is reversed.
|
||||
*/
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
if (IS_DG1(dev_priv)) {
|
||||
err = set_power_wells_mask(power_domains, tgl_power_wells,
|
||||
BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
err = set_power_wells(power_domains, rkl_power_wells);
|
||||
} else if (IS_GEN(dev_priv, 12)) {
|
||||
err = set_power_wells(power_domains, tgl_power_wells);
|
||||
} else if (IS_GEN(dev_priv, 11)) {
|
||||
err = set_power_wells(power_domains, icl_power_wells);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
} else if (IS_CNL_WITH_PORT_F(dev_priv)) {
|
||||
err = set_power_wells(power_domains, cnl_power_wells);
|
||||
|
||||
/*
|
||||
* DDI and Aux IO are getting enabled for all ports
|
||||
* regardless the presence or use. So, in order to avoid
|
||||
* timeouts, lets remove them from the list
|
||||
* for the SKUs without port F.
|
||||
*/
|
||||
if (!IS_CNL_WITH_PORT_F(dev_priv))
|
||||
power_domains->power_well_count -= 2;
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
err = set_power_wells_mask(power_domains, cnl_power_wells,
|
||||
BIT_ULL(CNL_DISP_PW_DDI_F_IO) |
|
||||
BIT_ULL(CNL_DISP_PW_DDI_F_AUX));
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
err = set_power_wells(power_domains, glk_power_wells);
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
|
@ -4758,6 +4772,17 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
|
|||
gen9_dbuf_slices_update(dev_priv, 0);
|
||||
}
|
||||
|
||||
static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
|
||||
enum dbuf_slice slice;
|
||||
|
||||
for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++)
|
||||
intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
|
||||
DBUF_TRACKER_STATE_SERVICE_MASK,
|
||||
DBUF_TRACKER_STATE_SERVICE(8));
|
||||
}
|
||||
|
||||
static void icl_mbus_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
|
||||
|
@ -5263,8 +5288,9 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
|
|||
unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
|
||||
int config, i;
|
||||
|
||||
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
|
||||
/* Wa_1409767108: tgl */
|
||||
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
|
||||
/* Wa_1409767108:tgl,dg1 */
|
||||
table = wa_1409767108_buddy_page_masks;
|
||||
else
|
||||
table = tgl_buddy_page_masks;
|
||||
|
@ -5326,6 +5352,9 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
|
|||
/* 4. Enable CDCLK. */
|
||||
intel_cdclk_init_hw(dev_priv);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
gen12_dbuf_slices_config(dev_priv);
|
||||
|
||||
/* 5. Enable DBUF. */
|
||||
gen9_dbuf_enable(dev_priv);
|
||||
|
||||
|
|
|
@ -101,8 +101,11 @@ enum i915_power_well_id {
|
|||
SKL_DISP_PW_MISC_IO,
|
||||
SKL_DISP_PW_1,
|
||||
SKL_DISP_PW_2,
|
||||
CNL_DISP_PW_DDI_F_IO,
|
||||
CNL_DISP_PW_DDI_F_AUX,
|
||||
ICL_DISP_PW_3,
|
||||
SKL_DISP_DC_OFF,
|
||||
TGL_DISP_PW_TC_COLD_OFF,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
|
|
|
@ -187,6 +187,21 @@ struct intel_encoder {
|
|||
* be set correctly before calling this function. */
|
||||
void (*get_config)(struct intel_encoder *,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
|
||||
/*
|
||||
* Optional hook called during init/resume to sync any state
|
||||
* stored in the encoder (eg. DP link parameters) wrt. the HW state.
|
||||
*/
|
||||
void (*sync_state)(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
/*
|
||||
* Optional hook, returning true if this encoder allows a fastset
|
||||
* during the initial commit, false otherwise.
|
||||
*/
|
||||
bool (*initial_fastset_check)(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
/*
|
||||
* Acquires the power domains needed for an active encoder during
|
||||
* hardware state readout.
|
||||
|
@ -199,6 +214,11 @@ struct intel_encoder {
|
|||
* device interrupts are disabled.
|
||||
*/
|
||||
void (*suspend)(struct intel_encoder *);
|
||||
/*
|
||||
* Called during system reboot/shutdown after all the
|
||||
* encoders have been disabled and suspended.
|
||||
*/
|
||||
void (*shutdown)(struct intel_encoder *encoder);
|
||||
enum hpd_pin hpd_pin;
|
||||
enum intel_display_power_domain power_domain;
|
||||
/* for communication with audio component; protected by av_mutex */
|
||||
|
@ -515,6 +535,7 @@ struct intel_plane_state {
|
|||
unsigned int rotation;
|
||||
enum drm_color_encoding color_encoding;
|
||||
enum drm_color_range color_range;
|
||||
enum drm_scaling_filter scaling_filter;
|
||||
} hw;
|
||||
|
||||
struct i915_ggtt_view view;
|
||||
|
@ -805,6 +826,7 @@ struct intel_crtc_state {
|
|||
bool active, enable;
|
||||
struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
|
||||
struct drm_display_mode mode, adjusted_mode;
|
||||
enum drm_scaling_filter scaling_filter;
|
||||
} hw;
|
||||
|
||||
/**
|
||||
|
@ -1035,9 +1057,6 @@ struct intel_crtc_state {
|
|||
/* Output format RGB/YCBCR etc */
|
||||
enum intel_output_format output_format;
|
||||
|
||||
/* Output down scaling is done in LSPCON device */
|
||||
bool lspcon_downsampling;
|
||||
|
||||
/* enable pipe gamma? */
|
||||
bool gamma_enable;
|
||||
|
||||
|
@ -1183,6 +1202,9 @@ struct intel_plane {
|
|||
struct intel_plane_state *plane_state);
|
||||
int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void (*async_flip)(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
};
|
||||
|
||||
struct intel_watermark_params {
|
||||
|
@ -1270,7 +1292,6 @@ struct intel_dp {
|
|||
int link_rate;
|
||||
u8 lane_count;
|
||||
u8 sink_count;
|
||||
bool link_mst;
|
||||
bool link_trained;
|
||||
bool has_hdmi_sink;
|
||||
bool has_audio;
|
||||
|
@ -1280,6 +1301,8 @@ struct intel_dp {
|
|||
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
|
||||
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
|
||||
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
|
||||
u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
|
||||
u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
|
||||
u8 fec_capable;
|
||||
/* source rates */
|
||||
int num_source_rates;
|
||||
|
@ -1312,8 +1335,6 @@ struct intel_dp {
|
|||
unsigned long last_backlight_off;
|
||||
ktime_t panel_power_off_time;
|
||||
|
||||
struct notifier_block edp_notifier;
|
||||
|
||||
/*
|
||||
* Pipe whose power sequencer is currently locked into
|
||||
* this port. Only relevant on VLV/CHV.
|
||||
|
@ -1336,14 +1357,6 @@ struct intel_dp {
|
|||
bool is_mst;
|
||||
int active_mst_links;
|
||||
|
||||
/*
|
||||
* DP_TP_* registers may be either on port or transcoder register space.
|
||||
*/
|
||||
struct {
|
||||
i915_reg_t dp_tp_ctl;
|
||||
i915_reg_t dp_tp_status;
|
||||
} regs;
|
||||
|
||||
/* connector directly attached - won't be use for modeset in mst world */
|
||||
struct intel_connector *attached_connector;
|
||||
|
||||
|
@ -1363,13 +1376,19 @@ struct intel_dp {
|
|||
i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
|
||||
|
||||
/* This is called before a link training is starterd */
|
||||
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
|
||||
void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat);
|
||||
void (*set_idle_link_train)(struct intel_dp *intel_dp);
|
||||
void (*set_signal_levels)(struct intel_dp *intel_dp);
|
||||
void (*prepare_link_retrain)(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void (*set_link_train)(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u8 dp_train_pat);
|
||||
void (*set_idle_link_train)(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void (*set_signal_levels)(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
u8 (*preemph_max)(struct intel_dp *intel_dp);
|
||||
u8 (*voltage_max)(struct intel_dp *intel_dp);
|
||||
u8 (*voltage_max)(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* Displayport compliance testing */
|
||||
struct intel_dp_compliance compliance;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include "i915_reg.h"
|
||||
|
||||
enum intel_output_format;
|
||||
enum pipe;
|
||||
enum port;
|
||||
struct drm_connector_state;
|
||||
|
@ -35,7 +36,7 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
|
|||
struct link_config_limits *limits);
|
||||
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state);
|
||||
int intel_dp_min_bpp(enum intel_output_format output_format);
|
||||
bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t dp_reg, enum port port,
|
||||
enum pipe *pipe);
|
||||
|
@ -44,19 +45,19 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
|
|||
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count,
|
||||
bool link_mst);
|
||||
int link_rate, int lane_count);
|
||||
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count);
|
||||
int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable);
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
|
||||
int intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
|
@ -92,16 +93,15 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
|
|||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u8 dp_train_pat);
|
||||
void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
|
||||
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
|
||||
u8 *link_bw, u8 *rate_select);
|
||||
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
|
||||
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
|
||||
bool
|
||||
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
|
||||
|
||||
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
|
||||
int intel_dp_link_required(int pixel_clock, int bpp);
|
||||
|
@ -122,7 +122,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
|
|||
struct intel_crtc_state *crtc_state,
|
||||
unsigned int type);
|
||||
bool intel_digital_port_connected(struct intel_encoder *encoder);
|
||||
void intel_dp_process_phy_request(struct intel_dp *intel_dp);
|
||||
|
||||
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
|
||||
{
|
||||
|
@ -139,4 +138,9 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state,
|
|||
int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
|
||||
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_sync_state(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_DP_H__ */
|
||||
|
|
|
@ -343,8 +343,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
|
|||
* the panel can support backlight control over the aux channel
|
||||
*/
|
||||
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
|
||||
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
|
||||
!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
|
||||
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) {
|
||||
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,152 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
|
|||
link_status[3], link_status[4], link_status[5]);
|
||||
}
|
||||
|
||||
static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
|
||||
{
|
||||
int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
|
||||
|
||||
/*
|
||||
* Pretend no LTTPRs in case of LTTPR detection error, or
|
||||
* if too many (>8) LTTPRs are detected. This translates to link
|
||||
* training in transparent mode.
|
||||
*/
|
||||
return count <= 0 ? 0 : count;
|
||||
}
|
||||
|
||||
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
|
||||
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
|
||||
}
|
||||
|
||||
static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
|
||||
char *buf, size_t buf_size)
|
||||
{
|
||||
if (dp_phy == DP_PHY_DPRX)
|
||||
snprintf(buf, buf_size, "DPRX");
|
||||
else
|
||||
snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
|
||||
}
|
||||
|
||||
static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
|
||||
char phy_name[10];
|
||||
|
||||
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
|
||||
|
||||
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"failed to read the PHY caps for %s\n",
|
||||
phy_name);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"%s PHY capabilities: %*ph\n",
|
||||
phy_name,
|
||||
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
|
||||
phy_caps);
|
||||
}
|
||||
|
||||
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
|
||||
intel_dp->lttpr_common_caps) < 0) {
|
||||
memset(intel_dp->lttpr_common_caps, 0,
|
||||
sizeof(intel_dp->lttpr_common_caps));
|
||||
return false;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"LTTPR common capabilities: %*ph\n",
|
||||
(int)sizeof(intel_dp->lttpr_common_caps),
|
||||
intel_dp->lttpr_common_caps);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
|
||||
{
|
||||
u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
|
||||
DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
|
||||
|
||||
return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
|
||||
* @intel_dp: Intel DP struct
|
||||
*
|
||||
* Read the LTTPR common capabilities, switch to non-transparent link training
|
||||
* mode if any is detected and read the PHY capabilities for all detected
|
||||
* LTTPRs. In case of an LTTPR detection error or if the number of
|
||||
* LTTPRs is more than is supported (8), fall back to the no-LTTPR,
|
||||
* transparent mode link training mode.
|
||||
*
|
||||
* Returns:
|
||||
* >0 if LTTPRs were detected and the non-transparent LT mode was set
|
||||
* 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
|
||||
* detection failure and the transparent LT mode was set
|
||||
*/
|
||||
int intel_dp_lttpr_init(struct intel_dp *intel_dp)
|
||||
{
|
||||
int lttpr_count;
|
||||
bool ret;
|
||||
int i;
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
return 0;
|
||||
|
||||
ret = intel_dp_read_lttpr_common_caps(intel_dp);
|
||||
|
||||
/*
|
||||
* See DP Standard v2.0 3.6.6.1. about the explicit disabling of
|
||||
* non-transparent mode and the disable->enable non-transparent mode
|
||||
* sequence.
|
||||
*/
|
||||
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
lttpr_count = intel_dp_lttpr_count(intel_dp);
|
||||
|
||||
/*
|
||||
* In case of unsupported number of LTTPRs or failing to switch to
|
||||
* non-transparent mode fall-back to transparent link training mode,
|
||||
* still taking into account any LTTPR common lane- rate/count limits.
|
||||
*/
|
||||
if (lttpr_count == 0)
|
||||
return 0;
|
||||
|
||||
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
|
||||
|
||||
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
|
||||
intel_dp_reset_lttpr_count(intel_dp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < lttpr_count; i++)
|
||||
intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));
|
||||
|
||||
return lttpr_count;
|
||||
}
|
||||
EXPORT_SYMBOL(intel_dp_lttpr_init);
|
||||
|
||||
static u8 dp_voltage_max(u8 preemph)
|
||||
{
|
||||
switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
|
||||
|
@ -49,36 +195,109 @@ static u8 dp_voltage_max(u8 preemph)
|
|||
}
|
||||
}
|
||||
|
||||
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
|
||||
|
||||
if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
else
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
}
|
||||
|
||||
static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
|
||||
|
||||
if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_3;
|
||||
else
|
||||
return DP_TRAIN_PRE_EMPH_LEVEL_2;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int lttpr_count = intel_dp_lttpr_count(intel_dp);
|
||||
|
||||
drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);
|
||||
|
||||
return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
|
||||
}
|
||||
|
||||
static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 voltage_max;
|
||||
|
||||
/*
|
||||
* Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
|
||||
* the DPRX_PHY we train.
|
||||
*/
|
||||
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
|
||||
voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
|
||||
else
|
||||
voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
|
||||
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
|
||||
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
|
||||
|
||||
return voltage_max;
|
||||
}
|
||||
|
||||
static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 preemph_max;
|
||||
|
||||
/*
|
||||
* Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
|
||||
* the DPRX_PHY we train.
|
||||
*/
|
||||
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
|
||||
preemph_max = intel_dp->preemph_max(intel_dp);
|
||||
else
|
||||
preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
|
||||
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
|
||||
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
|
||||
|
||||
return preemph_max;
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
u8 v = 0;
|
||||
u8 p = 0;
|
||||
int lane;
|
||||
u8 voltage_max;
|
||||
u8 preemph_max;
|
||||
|
||||
for (lane = 0; lane < intel_dp->lane_count; lane++) {
|
||||
for (lane = 0; lane < crtc_state->lane_count; lane++) {
|
||||
v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
|
||||
p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
|
||||
}
|
||||
|
||||
preemph_max = intel_dp->preemph_max(intel_dp);
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
|
||||
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
|
||||
|
||||
preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
|
||||
if (p >= preemph_max)
|
||||
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
|
||||
|
||||
v = min(v, dp_voltage_max(p));
|
||||
|
||||
voltage_max = intel_dp->voltage_max(intel_dp);
|
||||
drm_WARN_ON_ONCE(&i915->drm,
|
||||
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
|
||||
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
|
||||
|
||||
voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
|
||||
if (v >= voltage_max)
|
||||
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
|
||||
|
||||
|
@ -86,59 +305,70 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
|||
intel_dp->train_set[lane] = v | p;
|
||||
}
|
||||
|
||||
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
return dp_phy == DP_PHY_DPRX ?
|
||||
DP_TRAINING_PATTERN_SET :
|
||||
DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy,
|
||||
u8 dp_train_pat)
|
||||
{
|
||||
int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
|
||||
u8 buf[sizeof(intel_dp->train_set) + 1];
|
||||
int ret, len;
|
||||
int len;
|
||||
|
||||
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
|
||||
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
|
||||
dp_train_pat);
|
||||
|
||||
buf[0] = dp_train_pat;
|
||||
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
|
||||
DP_TRAINING_PATTERN_DISABLE) {
|
||||
/* don't write DP_TRAINING_LANEx_SET on disable */
|
||||
len = 1;
|
||||
} else {
|
||||
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
|
||||
memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
|
||||
len = intel_dp->lane_count + 1;
|
||||
}
|
||||
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
|
||||
memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
|
||||
len = crtc_state->lane_count + 1;
|
||||
|
||||
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
|
||||
buf, len);
|
||||
|
||||
return ret == len;
|
||||
return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy,
|
||||
u8 dp_train_pat)
|
||||
{
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
intel_dp_set_signal_levels(intel_dp);
|
||||
return intel_dp_set_link_train(intel_dp, dp_train_pat);
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state);
|
||||
return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_update_link_train(struct intel_dp *intel_dp)
|
||||
intel_dp_update_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
int reg = dp_phy == DP_PHY_DPRX ?
|
||||
DP_TRAINING_LANE0_SET :
|
||||
DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
|
||||
int ret;
|
||||
|
||||
intel_dp_set_signal_levels(intel_dp);
|
||||
intel_dp_set_signal_levels(intel_dp, crtc_state);
|
||||
|
||||
ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
|
||||
intel_dp->train_set, intel_dp->lane_count);
|
||||
ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
|
||||
intel_dp->train_set, crtc_state->lane_count);
|
||||
|
||||
return ret == intel_dp->lane_count;
|
||||
return ret == crtc_state->lane_count;
|
||||
}
|
||||
|
||||
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
|
||||
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
int lane;
|
||||
|
||||
for (lane = 0; lane < intel_dp->lane_count; lane++)
|
||||
for (lane = 0; lane < crtc_state->lane_count; lane++)
|
||||
if ((intel_dp->train_set[lane] &
|
||||
DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
return false;
|
||||
|
@ -146,21 +376,22 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Enable corresponding port and start training pattern 1 */
|
||||
/*
|
||||
* Prepare link training by configuring the link parameters. On DDI platforms
|
||||
* also enable the port here.
|
||||
*/
|
||||
static bool
|
||||
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 voltage;
|
||||
int voltage_tries, cr_tries, max_cr_tries;
|
||||
bool max_vswing_reached = false;
|
||||
u8 link_config[2];
|
||||
u8 link_bw, rate_select;
|
||||
|
||||
if (intel_dp->prepare_link_retrain)
|
||||
intel_dp->prepare_link_retrain(intel_dp);
|
||||
intel_dp->prepare_link_retrain(intel_dp, crtc_state);
|
||||
|
||||
intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
|
||||
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
|
||||
&link_bw, &rate_select);
|
||||
|
||||
if (link_bw)
|
||||
|
@ -172,7 +403,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
|||
|
||||
/* Write the link configuration data */
|
||||
link_config[0] = link_bw;
|
||||
link_config[1] = intel_dp->lane_count;
|
||||
link_config[1] = crtc_state->lane_count;
|
||||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
|
||||
|
@ -188,8 +419,34 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
|||
|
||||
intel_dp->DP |= DP_PORT_EN;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
if (dp_phy == DP_PHY_DPRX)
|
||||
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
|
||||
else
|
||||
drm_dp_lttpr_link_train_clock_recovery_delay();
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the link training clock recovery phase on the given DP PHY using
|
||||
* training pattern 1.
|
||||
*/
|
||||
static bool
|
||||
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 voltage;
|
||||
int voltage_tries, cr_tries, max_cr_tries;
|
||||
bool max_vswing_reached = false;
|
||||
|
||||
/* clock recovery */
|
||||
if (!intel_dp_reset_link_train(intel_dp,
|
||||
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
drm_err(&i915->drm, "failed to enable link training\n");
|
||||
|
@ -213,14 +470,15 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
|||
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
|
||||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
|
||||
intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
|
||||
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status)) {
|
||||
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
|
||||
link_status) < 0) {
|
||||
drm_err(&i915->drm, "failed to get link status\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
|
||||
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
|
||||
drm_dbg_kms(&i915->drm, "clock recovery OK\n");
|
||||
return true;
|
||||
}
|
||||
|
@ -239,8 +497,9 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
|||
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Update training set as requested by target */
|
||||
intel_dp_get_adjust_train(intel_dp, link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp)) {
|
||||
intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
|
||||
link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to update link training\n");
|
||||
return false;
|
||||
|
@ -252,7 +511,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
|||
else
|
||||
voltage_tries = 1;
|
||||
|
||||
if (intel_dp_link_max_vswing_reached(intel_dp))
|
||||
if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
|
||||
max_vswing_reached = true;
|
||||
|
||||
}
|
||||
|
@ -266,7 +525,9 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
|||
* or for 1.4 devices that support it, training Pattern 3 for HBR2
|
||||
* or 1.2 devices that support it, Training Pattern 2 otherwise.
|
||||
*/
|
||||
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
|
||||
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
|
||||
|
||||
|
@ -275,12 +536,14 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
|
|||
* for all downstream devices that support HBR3. There are no known eDP
|
||||
* panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
|
||||
* specification.
|
||||
* LTTPRs must support TPS4.
|
||||
*/
|
||||
source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
|
||||
sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
|
||||
sink_tps4 = dp_phy != DP_PHY_DPRX ||
|
||||
drm_dp_tps4_supported(intel_dp->dpcd);
|
||||
if (source_tps4 && sink_tps4) {
|
||||
return DP_TRAINING_PATTERN_4;
|
||||
} else if (intel_dp->link_rate == 810000) {
|
||||
} else if (crtc_state->port_clock == 810000) {
|
||||
if (!source_tps4)
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"8.1 Gbps link rate without source HBR3/TPS4 support\n");
|
||||
|
@ -294,10 +557,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
|
|||
* all sinks follow the spec.
|
||||
*/
|
||||
source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
|
||||
sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
|
||||
sink_tps3 = dp_phy != DP_PHY_DPRX ||
|
||||
drm_dp_tps3_supported(intel_dp->dpcd);
|
||||
if (source_tps3 && sink_tps3) {
|
||||
return DP_TRAINING_PATTERN_3;
|
||||
} else if (intel_dp->link_rate >= 540000) {
|
||||
} else if (crtc_state->port_clock >= 540000) {
|
||||
if (!source_tps3)
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
|
||||
|
@ -309,8 +573,28 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
|
|||
return DP_TRAINING_PATTERN_2;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
if (dp_phy == DP_PHY_DPRX) {
|
||||
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
|
||||
} else {
|
||||
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
|
||||
|
||||
drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the link training channel equalization phase on the given DP PHY
|
||||
* using one of training pattern 2, 3 or 4 depending on the source and
|
||||
* sink capabilities.
|
||||
*/
|
||||
static bool
|
||||
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int tries;
|
||||
|
@ -318,22 +602,23 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
|||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
bool channel_eq = false;
|
||||
|
||||
training_pattern = intel_dp_training_pattern(intel_dp);
|
||||
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
|
||||
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
|
||||
if (training_pattern != DP_TRAINING_PATTERN_4)
|
||||
training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
|
||||
|
||||
/* channel equalization */
|
||||
if (!intel_dp_set_link_train(intel_dp,
|
||||
if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
|
||||
training_pattern)) {
|
||||
drm_err(&i915->drm, "failed to start channel equalization\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (tries = 0; tries < 5; tries++) {
|
||||
|
||||
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status)) {
|
||||
intel_dp_link_training_channel_equalization_delay(intel_dp,
|
||||
dp_phy);
|
||||
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
|
||||
link_status) < 0) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to get link status\n");
|
||||
break;
|
||||
|
@ -341,7 +626,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
|||
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
crtc_state->lane_count)) {
|
||||
intel_dp_dump_link_status(link_status);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Clock recovery check failed, cannot "
|
||||
|
@ -350,7 +635,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
if (drm_dp_channel_eq_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
crtc_state->lane_count)) {
|
||||
channel_eq = true;
|
||||
drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
|
||||
"successful\n");
|
||||
|
@ -358,8 +643,9 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
/* Update training set as requested by target */
|
||||
intel_dp_get_adjust_train(intel_dp, link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp)) {
|
||||
intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
|
||||
link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to update link training\n");
|
||||
break;
|
||||
|
@ -373,54 +659,142 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
|||
"Channel equalization failed 5 times\n");
|
||||
}
|
||||
|
||||
intel_dp_set_idle_link_train(intel_dp);
|
||||
|
||||
return channel_eq;
|
||||
|
||||
}
|
||||
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
|
||||
u8 val = DP_TRAINING_PATTERN_DISABLE;
|
||||
|
||||
return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_stop_link_train - stop link training
|
||||
* @intel_dp: DP struct
|
||||
* @crtc_state: state for CRTC attached to the encoder
|
||||
*
|
||||
* Stop the link training of the @intel_dp port, disabling the test pattern
|
||||
* symbol generation on the port and disabling the training pattern in
|
||||
* the sink's DPCD.
|
||||
*
|
||||
* What symbols are output on the port after this point is
|
||||
* platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
|
||||
* with the pipe being disabled, on older platforms it's HW specific if/how an
|
||||
* idle pattern is generated, as the pipe is already enabled here for those.
|
||||
*
|
||||
* This function must be called after intel_dp_start_link_train().
|
||||
*/
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
intel_dp->link_trained = true;
|
||||
|
||||
intel_dp_set_link_train(intel_dp,
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
intel_dp_program_link_training_pattern(intel_dp,
|
||||
crtc_state,
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
static bool
|
||||
intel_dp_link_train_phy(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy)
|
||||
{
|
||||
struct intel_connector *intel_connector = intel_dp->attached_connector;
|
||||
char phy_name[10];
|
||||
bool ret = false;
|
||||
|
||||
if (!intel_dp_link_training_clock_recovery(intel_dp))
|
||||
goto failure_handling;
|
||||
if (!intel_dp_link_training_channel_equalization(intel_dp))
|
||||
goto failure_handling;
|
||||
if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
|
||||
goto out;
|
||||
|
||||
if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
|
||||
goto out;
|
||||
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
|
||||
"[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
return;
|
||||
ret ? "passed" : "failed",
|
||||
crtc_state->port_clock, crtc_state->lane_count,
|
||||
intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
|
||||
|
||||
failure_handling:
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_connector *intel_connector = intel_dp->attached_connector;
|
||||
|
||||
if (intel_dp->hobl_active) {
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"Link Training failed with HOBL active, not enabling it from now on");
|
||||
intel_dp->hobl_failed = true;
|
||||
} else if (intel_dp_get_link_train_fallback_values(intel_dp,
|
||||
intel_dp->link_rate,
|
||||
intel_dp->lane_count)) {
|
||||
crtc_state->port_clock,
|
||||
crtc_state->lane_count)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Schedule a Hotplug Uevent to userspace to start modeset */
|
||||
schedule_work(&intel_connector->modeset_retry_work);
|
||||
}
|
||||
|
||||
/* Perform the link training on all LTTPRs and the DPRX on a link. */
|
||||
static bool
|
||||
intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int lttpr_count)
|
||||
{
|
||||
bool ret = true;
|
||||
int i;
|
||||
|
||||
intel_dp_prepare_link_train(intel_dp, crtc_state);
|
||||
|
||||
for (i = lttpr_count - 1; i >= 0; i--) {
|
||||
enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
|
||||
|
||||
ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
|
||||
intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
|
||||
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
|
||||
|
||||
if (intel_dp->set_idle_link_train)
|
||||
intel_dp->set_idle_link_train(intel_dp, crtc_state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_start_link_train - start link training
|
||||
* @intel_dp: DP struct
|
||||
* @crtc_state: state for CRTC attached to the encoder
|
||||
*
|
||||
* Start the link training of the @intel_dp port, scheduling a fallback
|
||||
* retraining with reduced link rate/lane parameters if the link training
|
||||
* fails.
|
||||
* After calling this function intel_dp_stop_link_train() must be called.
|
||||
*/
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
/*
|
||||
* TODO: Reiniting LTTPRs here won't be needed once proper connector
|
||||
* HW state readout is added.
|
||||
*/
|
||||
int lttpr_count = intel_dp_lttpr_init(intel_dp);
|
||||
|
||||
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
|
||||
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
|
||||
}
|
||||
|
|
|
@ -8,11 +8,24 @@
|
|||
|
||||
#include <drm/drm_dp_helper.h>
|
||||
|
||||
struct intel_crtc_state;
|
||||
struct intel_dp;
|
||||
|
||||
int intel_dp_lttpr_init(struct intel_dp *intel_dp);
|
||||
|
||||
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum drm_dp_phy dp_phy,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE]);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* Get the TPSx symbol type of the value programmed to DP_TRAINING_PATTERN_SET */
|
||||
static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
|
||||
{
|
||||
return pattern & ~DP_LINK_SCRAMBLING_DISABLE;
|
||||
}
|
||||
|
||||
#endif /* __INTEL_DP_LINK_TRAINING_H__ */
|
||||
|
|
|
@ -130,7 +130,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
|||
limits.min_lane_count =
|
||||
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
limits.min_bpp = intel_dp_min_bpp(pipe_config);
|
||||
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
|
||||
/*
|
||||
* FIXME: If all the streams can't fit into the link with
|
||||
* their current pipe_bpp we should reduce pipe_bpp across
|
||||
|
@ -318,19 +318,23 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void clear_act_sent(struct intel_dp *intel_dp)
|
||||
static void clear_act_sent(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
|
||||
intel_de_write(i915, intel_dp->regs.dp_tp_status,
|
||||
intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
|
||||
DP_TP_STATUS_ACT_SENT);
|
||||
}
|
||||
|
||||
static void wait_for_act_sent(struct intel_dp *intel_dp)
|
||||
static void wait_for_act_sent(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
|
||||
if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status,
|
||||
if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
|
||||
DP_TP_STATUS_ACT_SENT, 1))
|
||||
drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
|
||||
|
||||
|
@ -392,7 +396,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
|||
|
||||
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
|
||||
|
||||
clear_act_sent(intel_dp);
|
||||
clear_act_sent(encoder, old_crtc_state);
|
||||
|
||||
val = intel_de_read(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
|
||||
|
@ -401,7 +405,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
|||
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
|
||||
val);
|
||||
|
||||
wait_for_act_sent(intel_dp);
|
||||
wait_for_act_sent(encoder, old_crtc_state);
|
||||
|
||||
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
|
||||
|
||||
|
@ -488,7 +492,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
|||
intel_dp->active_mst_links);
|
||||
|
||||
if (first_mst_stream)
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
|
||||
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
|
||||
|
||||
|
@ -535,7 +539,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
|||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
|
||||
|
||||
clear_act_sent(intel_dp);
|
||||
clear_act_sent(encoder, pipe_config);
|
||||
|
||||
intel_ddi_enable_transcoder_func(encoder, pipe_config);
|
||||
|
||||
|
@ -549,7 +553,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
|||
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
|
||||
intel_dp->active_mst_links);
|
||||
|
||||
wait_for_act_sent(intel_dp);
|
||||
wait_for_act_sent(encoder, pipe_config);
|
||||
|
||||
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
|
||||
|
||||
|
@ -587,6 +591,15 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
|||
intel_ddi_get_config(&dig_port->base, pipe_config);
|
||||
}
|
||||
|
||||
static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
|
||||
return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
|
@ -893,6 +906,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
|
|||
intel_encoder->enable = intel_mst_enable_dp;
|
||||
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
|
||||
intel_encoder->get_config = intel_dp_mst_enc_get_config;
|
||||
intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
|
||||
|
||||
return intel_mst;
|
||||
|
||||
|
|
|
@ -644,16 +644,16 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
|
|||
return mask;
|
||||
}
|
||||
|
||||
|
||||
void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u32 deemph_reg_value, u32 margin_reg_value,
|
||||
bool uniq_trans_scale)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
|
@ -666,7 +666,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
if (intel_crtc->config->lane_count > 2) {
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
|
@ -679,7 +679,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
|
||||
|
||||
if (intel_crtc->config->lane_count > 2) {
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
|
@ -687,7 +687,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
/* Program swing deemph */
|
||||
for (i = 0; i < intel_crtc->config->lane_count; i++) {
|
||||
for (i = 0; i < crtc_state->lane_count; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
|
||||
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
|
||||
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
|
||||
|
@ -695,7 +695,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
/* Program swing margin */
|
||||
for (i = 0; i < intel_crtc->config->lane_count; i++) {
|
||||
for (i = 0; i < crtc_state->lane_count; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
|
||||
val &= ~DPIO_SWING_MARGIN000_MASK;
|
||||
|
@ -718,7 +718,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
* For now, for this unique transition scale selection, set bit
|
||||
* 27 for ch0 and ch1.
|
||||
*/
|
||||
for (i = 0; i < intel_crtc->config->lane_count; i++) {
|
||||
for (i = 0; i < crtc_state->lane_count; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
|
||||
if (uniq_trans_scale)
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
|
@ -732,7 +732,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
|||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
if (intel_crtc->config->lane_count > 2) {
|
||||
if (crtc_state->lane_count > 2) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
@ -992,14 +992,15 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u32 demph_reg_value, u32 preemph_reg_value,
|
||||
u32 uniqtranscale_reg_value, u32 tx3_demph)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
|
|||
u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
|
||||
|
||||
void chv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u32 deemph_reg_value, u32 margin_reg_value,
|
||||
bool uniq_trans_scale);
|
||||
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
|
||||
|
@ -46,6 +47,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state);
|
||||
|
||||
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
u32 demph_reg_value, u32 preemph_reg_value,
|
||||
u32 uniqtranscale_reg_value, u32 tx3_demph);
|
||||
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
|
||||
|
|
|
@ -151,14 +151,14 @@ static i915_reg_t
|
|||
intel_combo_pll_enable_reg(struct drm_i915_private *i915,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
|
||||
if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
|
||||
if (IS_DG1(i915))
|
||||
return DG1_DPLL_ENABLE(pll->info->id);
|
||||
else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
|
||||
return MG_PLL_ENABLE(0);
|
||||
|
||||
return CNL_DPLL_ENABLE(pll->info->id);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_prepare_shared_dpll - call a dpll's prepare hook
|
||||
* @crtc_state: CRTC, and its state, which has a shared dpll
|
||||
|
@ -1602,9 +1602,19 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
|
|||
case DPLL_CFGCR2_PDIV_3:
|
||||
p0 = 3;
|
||||
break;
|
||||
case DPLL_CFGCR2_PDIV_7_INVALID:
|
||||
/*
|
||||
* Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
|
||||
* handling it the same way as PDIV_7.
|
||||
*/
|
||||
drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
|
||||
fallthrough;
|
||||
case DPLL_CFGCR2_PDIV_7:
|
||||
p0 = 7;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(p0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (p2) {
|
||||
|
@ -1620,6 +1630,9 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
|
|||
case DPLL_CFGCR2_KDIV_1:
|
||||
p2 = 1;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(p2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
|
||||
|
@ -2622,11 +2635,22 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Display WA #22010492432: tgl
|
||||
* Program half of the nominal DCO divider fraction value.
|
||||
*/
|
||||
static bool
|
||||
tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
|
||||
{
|
||||
return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400;
|
||||
}
|
||||
|
||||
static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
|
||||
const struct intel_shared_dpll *pll,
|
||||
int ref_clock)
|
||||
{
|
||||
const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
|
||||
u32 dco_fraction;
|
||||
u32 p0, p1, p2, dco_freq;
|
||||
|
||||
p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
|
||||
|
@ -2669,8 +2693,13 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
|
|||
dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
|
||||
ref_clock;
|
||||
|
||||
dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
|
||||
DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
|
||||
dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
|
||||
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
|
||||
|
||||
if (tgl_combo_pll_div_frac_wa_needed(dev_priv))
|
||||
dco_fraction *= 2;
|
||||
|
||||
dco_freq += (dco_fraction * ref_clock) / 0x8000;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
|
||||
return 0;
|
||||
|
@ -2948,16 +2977,6 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
|
|||
/* the following params are unused */
|
||||
};
|
||||
|
||||
/*
|
||||
* Display WA #22010492432: tgl
|
||||
* Divide the nominal .dco_fraction value by 2.
|
||||
*/
|
||||
static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
|
||||
.dco_integer = 0x54, .dco_fraction = 0x1800,
|
||||
/* the following params are unused */
|
||||
.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
|
||||
};
|
||||
|
||||
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
|
||||
struct skl_wrpll_params *pll_params)
|
||||
{
|
||||
|
@ -2991,14 +3010,12 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
|
|||
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
|
||||
fallthrough;
|
||||
case 19200:
|
||||
case 38400:
|
||||
*pll_params = tgl_tbt_pll_19_2MHz_values;
|
||||
break;
|
||||
case 24000:
|
||||
*pll_params = tgl_tbt_pll_24MHz_values;
|
||||
break;
|
||||
case 38400:
|
||||
*pll_params = tgl_tbt_pll_38_4MHz_values;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (dev_priv->dpll.ref_clks.nssc) {
|
||||
|
@ -3065,9 +3082,14 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
|
|||
const struct skl_wrpll_params *pll_params,
|
||||
struct intel_dpll_hw_state *pll_state)
|
||||
{
|
||||
u32 dco_fraction = pll_params->dco_fraction;
|
||||
|
||||
memset(pll_state, 0, sizeof(*pll_state));
|
||||
|
||||
pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
|
||||
if (tgl_combo_pll_div_frac_wa_needed(i915))
|
||||
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
|
||||
|
||||
pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
|
||||
pll_params->dco_integer;
|
||||
|
||||
pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
|
||||
|
@ -3524,12 +3546,22 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
|||
|
||||
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
if (IS_DG1(dev_priv)) {
|
||||
if (port == PORT_D || port == PORT_E) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_DG1_DPLL2) |
|
||||
BIT(DPLL_ID_DG1_DPLL3);
|
||||
} else {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_DG1_DPLL0) |
|
||||
BIT(DPLL_ID_DG1_DPLL1);
|
||||
}
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_EHL_DPLL4) |
|
||||
BIT(DPLL_ID_ICL_DPLL1) |
|
||||
BIT(DPLL_ID_ICL_DPLL0);
|
||||
} else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) {
|
||||
} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_EHL_DPLL4) |
|
||||
BIT(DPLL_ID_ICL_DPLL1) |
|
||||
|
@ -3820,7 +3852,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
|||
if (!(val & PLL_ENABLE))
|
||||
goto out;
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
if (IS_DG1(dev_priv)) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv,
|
||||
RKL_DPLL_CFGCR0(id));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv,
|
||||
|
@ -3831,7 +3866,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
|||
hw_state->cfgcr1 = intel_de_read(dev_priv,
|
||||
TGL_DPLL_CFGCR1(id));
|
||||
} else {
|
||||
if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
|
||||
if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv,
|
||||
ICL_DPLL_CFGCR0(4));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv,
|
||||
|
@ -3873,14 +3908,17 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
|
|||
const enum intel_dpll_id id = pll->info->id;
|
||||
i915_reg_t cfgcr0_reg, cfgcr1_reg;
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv)) {
|
||||
if (IS_DG1(dev_priv)) {
|
||||
cfgcr0_reg = DG1_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = DG1_DPLL_CFGCR1(id);
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
cfgcr0_reg = RKL_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = RKL_DPLL_CFGCR1(id);
|
||||
} else if (INTEL_GEN(dev_priv) >= 12) {
|
||||
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
|
||||
} else {
|
||||
if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
|
||||
if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
|
||||
cfgcr0_reg = ICL_DPLL_CFGCR0(4);
|
||||
cfgcr1_reg = ICL_DPLL_CFGCR1(4);
|
||||
} else {
|
||||
|
@ -4054,7 +4092,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv) &&
|
||||
if (IS_JSL_EHL(dev_priv) &&
|
||||
pll->info->id == DPLL_ID_EHL_DPLL4) {
|
||||
|
||||
/*
|
||||
|
@ -4167,7 +4205,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv,
|
|||
|
||||
icl_pll_disable(dev_priv, pll, enable_reg);
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv) &&
|
||||
if (IS_JSL_EHL(dev_priv) &&
|
||||
pll->info->id == DPLL_ID_EHL_DPLL4)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
|
||||
pll->wakeref);
|
||||
|
@ -4317,6 +4355,22 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
|
|||
.dump_hw_state = icl_dump_hw_state,
|
||||
};
|
||||
|
||||
static const struct dpll_info dg1_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
|
||||
{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
|
||||
{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr dg1_pll_mgr = {
|
||||
.dpll_info = dg1_plls,
|
||||
.get_dplls = icl_get_dplls,
|
||||
.put_dplls = icl_put_dplls,
|
||||
.update_ref_clks = icl_update_dpll_ref_clks,
|
||||
.dump_hw_state = icl_dump_hw_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_shared_dpll_init - Initialize shared DPLLs
|
||||
* @dev: drm device
|
||||
|
@ -4330,11 +4384,13 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
|||
const struct dpll_info *dpll_info;
|
||||
int i;
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
if (IS_DG1(dev_priv))
|
||||
dpll_mgr = &dg1_pll_mgr;
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
dpll_mgr = &rkl_pll_mgr;
|
||||
else if (INTEL_GEN(dev_priv) >= 12)
|
||||
dpll_mgr = &tgl_pll_mgr;
|
||||
else if (IS_ELKHARTLAKE(dev_priv))
|
||||
else if (IS_JSL_EHL(dev_priv))
|
||||
dpll_mgr = &ehl_pll_mgr;
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
dpll_mgr = &icl_pll_mgr;
|
||||
|
@ -4476,7 +4532,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
|
|||
pll->on = pll->info->funcs->get_hw_state(i915, pll,
|
||||
&pll->state.hw_state);
|
||||
|
||||
if (IS_ELKHARTLAKE(i915) && pll->on &&
|
||||
if (IS_JSL_EHL(i915) && pll->on &&
|
||||
pll->info->id == DPLL_ID_EHL_DPLL4) {
|
||||
pll->wakeref = intel_display_power_get(i915,
|
||||
POWER_DOMAIN_DPLL_DC_OFF);
|
||||
|
|
|
@ -154,6 +154,23 @@ enum intel_dpll_id {
|
|||
* @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
|
||||
*/
|
||||
DPLL_ID_TGL_MGPLL6 = 8,
|
||||
|
||||
/**
|
||||
* @DPLL_ID_DG1_DPLL0: DG1 combo PHY DPLL0
|
||||
*/
|
||||
DPLL_ID_DG1_DPLL0 = 0,
|
||||
/**
|
||||
* @DPLL_ID_DG1_DPLL1: DG1 combo PHY DPLL1
|
||||
*/
|
||||
DPLL_ID_DG1_DPLL1 = 1,
|
||||
/**
|
||||
* @DPLL_ID_DG1_DPLL2: DG1 combo PHY DPLL2
|
||||
*/
|
||||
DPLL_ID_DG1_DPLL2 = 2,
|
||||
/**
|
||||
* @DPLL_ID_DG1_DPLL3: DG1 combo PHY DPLL3
|
||||
*/
|
||||
DPLL_ID_DG1_DPLL3 = 3,
|
||||
};
|
||||
|
||||
#define I915_NUM_PLLS 9
|
||||
|
|
|
@ -167,6 +167,7 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
|
|||
|
||||
/* icl_dsi.c */
|
||||
void icl_dsi_init(struct drm_i915_private *dev_priv);
|
||||
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* intel_dsi.c */
|
||||
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
|
||||
|
|
|
@ -907,6 +907,13 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
|
||||
if (INTEL_GEN(dev_priv) >= 11 &&
|
||||
(cache->plane.src_h + cache->plane.adjusted_y) % 4) {
|
||||
fbc->no_fbc_reason = "plane height + offset is non-modulo of 4";
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,11 +90,20 @@ static const struct gmbus_pin gmbus_pins_icp[] = {
|
|||
[GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO },
|
||||
};
|
||||
|
||||
static const struct gmbus_pin gmbus_pins_dg1[] = {
|
||||
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
|
||||
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
|
||||
[GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
|
||||
[GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
|
||||
};
|
||||
|
||||
/* pin is expected to be valid */
|
||||
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
|
||||
unsigned int pin)
|
||||
{
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
return &gmbus_pins_dg1[pin];
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
return &gmbus_pins_icp[pin];
|
||||
else if (HAS_PCH_CNP(dev_priv))
|
||||
return &gmbus_pins_cnp[pin];
|
||||
|
@ -113,7 +122,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
unsigned int size;
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
size = ARRAY_SIZE(gmbus_pins_dg1);
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
size = ARRAY_SIZE(gmbus_pins_icp);
|
||||
else if (HAS_PCH_CNP(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_cnp);
|
||||
|
|
|
@ -1445,10 +1445,9 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
|
|||
}
|
||||
|
||||
static
|
||||
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
||||
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
union {
|
||||
struct hdcp2_rep_stream_manage stream_manage;
|
||||
|
@ -1457,6 +1456,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
|||
const struct intel_hdcp_shim *shim = hdcp->shim;
|
||||
int ret;
|
||||
|
||||
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
|
||||
return -ERANGE;
|
||||
|
||||
/* Prepare RepeaterAuth_Stream_Manage msg */
|
||||
msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
|
||||
drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
|
||||
|
@ -1472,28 +1474,21 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
|||
ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
|
||||
sizeof(msgs.stream_manage));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
|
||||
&msgs.stream_ready, sizeof(msgs.stream_ready));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
hdcp->port_data.seq_num_m = hdcp->seq_num_m;
|
||||
hdcp->port_data.streams[0].stream_type = hdcp->content_type;
|
||||
|
||||
ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
out:
|
||||
hdcp->seq_num_m++;
|
||||
|
||||
if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
|
||||
drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
|
@ -1564,17 +1559,6 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hdcp2_authenticate_repeater(struct intel_connector *connector)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = hdcp2_authenticate_repeater_topology(connector);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return hdcp2_propagate_stream_management_info(connector);
|
||||
}
|
||||
|
||||
static int hdcp2_authenticate_sink(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
|
@ -1611,7 +1595,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
|
|||
}
|
||||
|
||||
if (hdcp->is_repeater) {
|
||||
ret = hdcp2_authenticate_repeater(connector);
|
||||
ret = hdcp2_authenticate_repeater_topology(connector);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Repeater Auth Failed. Err: %d\n", ret);
|
||||
|
@ -1619,11 +1603,6 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
|
|||
}
|
||||
}
|
||||
|
||||
hdcp->port_data.streams[0].stream_type = hdcp->content_type;
|
||||
ret = hdcp2_authenticate_port(connector);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1704,15 +1683,59 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
int i, tries = 3, ret;
|
||||
|
||||
if (!connector->hdcp.is_repeater)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < tries; i++) {
|
||||
ret = _hdcp2_propagate_stream_management_info(connector);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
/* Lets restart the auth incase of seq_num_m roll over */
|
||||
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"seq_num_m roll over.(%d)\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"HDCP2 stream management %d of %d Failed.(%d)\n",
|
||||
i + 1, tries, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
int ret, i, tries = 3;
|
||||
|
||||
for (i = 0; i < tries; i++) {
|
||||
ret = hdcp2_authenticate_sink(connector);
|
||||
if (!ret)
|
||||
break;
|
||||
if (!ret) {
|
||||
ret = hdcp2_propagate_stream_management_info(connector);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Stream management failed.(%d)\n",
|
||||
ret);
|
||||
break;
|
||||
}
|
||||
hdcp->port_data.streams[0].stream_type =
|
||||
hdcp->content_type;
|
||||
ret = hdcp2_authenticate_port(connector);
|
||||
if (!ret)
|
||||
break;
|
||||
drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
/* Clearing the mei hdcp session */
|
||||
drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
|
||||
|
@ -1721,7 +1744,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
|
|||
drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
|
||||
}
|
||||
|
||||
if (i != tries) {
|
||||
if (!ret) {
|
||||
/*
|
||||
* Ensuring the required 200mSec min time interval between
|
||||
* Session Key Exchange and encryption.
|
||||
|
|
|
@ -2775,8 +2775,9 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
vlv_phy_pre_encoder_enable(encoder, pipe_config);
|
||||
|
||||
/* HDMI 1.0V-2dB */
|
||||
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
|
||||
0x2b247878);
|
||||
vlv_set_phy_signal_level(encoder, pipe_config,
|
||||
0x2b245f5f, 0x00002000,
|
||||
0x5578b83a, 0x2b247878);
|
||||
|
||||
dig_port->set_infoframes(encoder,
|
||||
pipe_config->has_infoframe,
|
||||
|
@ -2853,7 +2854,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
|
|||
|
||||
/* FIXME: Program the support xxx V-dB */
|
||||
/* Use 800mV-0dB */
|
||||
chv_set_phy_signal_level(encoder, 128, 102, false);
|
||||
chv_set_phy_signal_level(encoder, pipe_config, 128, 102, false);
|
||||
|
||||
dig_port->set_infoframes(encoder,
|
||||
pipe_config->has_infoframe,
|
||||
|
@ -3139,6 +3140,11 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
|||
return GMBUS_PIN_1_BXT + phy;
|
||||
}
|
||||
|
||||
static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
return intel_port_to_phy(dev_priv, port) + 1;
|
||||
}
|
||||
|
||||
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
|
@ -3176,7 +3182,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
|
|||
return ddc_pin;
|
||||
}
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
|
||||
else if (HAS_PCH_MCC(dev_priv))
|
||||
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
|
||||
|
@ -3214,7 +3222,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
|
|||
dig_port->set_infoframes = g4x_set_infoframes;
|
||||
dig_port->infoframes_enabled = g4x_infoframes_enabled;
|
||||
} else if (HAS_DDI(dev_priv)) {
|
||||
if (dig_port->lspcon.active) {
|
||||
if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) {
|
||||
dig_port->write_infoframe = lspcon_write_infoframe;
|
||||
dig_port->read_infoframe = lspcon_read_infoframe;
|
||||
dig_port->set_infoframes = lspcon_set_infoframes;
|
||||
|
|
|
@ -213,6 +213,12 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_setup(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->display_irqs_enabled && i915->display.hpd_irq_setup)
|
||||
i915->display.hpd_irq_setup(i915);
|
||||
}
|
||||
|
||||
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
|
@ -248,8 +254,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
|||
dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
|
||||
}
|
||||
|
||||
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
intel_hpd_irq_setup(dev_priv);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
|
@ -556,8 +561,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
* Disable any IRQs that storms were detected on. Polling enablement
|
||||
* happens later in our hotplug work.
|
||||
*/
|
||||
if (storm_detected && dev_priv->display_irqs_enabled)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
if (storm_detected)
|
||||
intel_hpd_irq_setup(dev_priv);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
/*
|
||||
|
@ -584,7 +589,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
* This is a separate step from interrupt enabling to simplify the locking rules
|
||||
* in the driver load and resume code.
|
||||
*
|
||||
* Also see: intel_hpd_poll_init(), which enables connector polling
|
||||
* Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
|
||||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -595,19 +600,13 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
|
||||
}
|
||||
|
||||
WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
|
||||
schedule_work(&dev_priv->hotplug.poll_init_work);
|
||||
|
||||
/*
|
||||
* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked checks happy.
|
||||
*/
|
||||
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
intel_hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void i915_hpd_poll_init_work(struct work_struct *work)
|
||||
|
@ -654,12 +653,12 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_poll_init - enables/disables polling for connectors with hpd
|
||||
* intel_hpd_poll_enable - enable polling for connectors with hpd
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function enables polling for all connectors, regardless of whether or
|
||||
* not they support hotplug detection. Under certain conditions HPD may not be
|
||||
* functional. On most Intel GPUs, this happens when we enter runtime suspend.
|
||||
* This function enables polling for all connectors which support HPD.
|
||||
* Under certain conditions HPD may not be functional. On most Intel GPUs,
|
||||
* this happens when we enter runtime suspend.
|
||||
* On Valleyview and Cherryview systems, this also happens when we shut off all
|
||||
* of the powerwells.
|
||||
*
|
||||
|
@ -667,9 +666,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
|
|||
* dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
|
||||
* worker.
|
||||
*
|
||||
* Also see: intel_hpd_init(), which restores hpd handling.
|
||||
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
|
||||
*/
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
|
||||
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
|
||||
|
||||
|
@ -682,6 +681,31 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
|
|||
schedule_work(&dev_priv->hotplug.poll_init_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_poll_disable - disable polling for connectors with hpd
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function disables polling for all connectors which support HPD.
|
||||
* Under certain conditions HPD may not be functional. On most Intel GPUs,
|
||||
* this happens when we enter runtime suspend.
|
||||
* On Valleyview and Cherryview systems, this also happens when we shut off all
|
||||
* of the powerwells.
|
||||
*
|
||||
* Since this function can get called in contexts where we're already holding
|
||||
* dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
|
||||
* worker.
|
||||
*
|
||||
* Also used during driver init to initialize connector->polled
|
||||
* appropriately for all connectors.
|
||||
*
|
||||
* Also see: intel_hpd_init() and intel_hpd_poll_enable().
|
||||
*/
|
||||
void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
|
||||
schedule_work(&dev_priv->hotplug.poll_init_work);
|
||||
}
|
||||
|
||||
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
|
||||
|
|
|
@ -14,7 +14,8 @@ struct intel_digital_port;
|
|||
struct intel_encoder;
|
||||
enum port;
|
||||
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
|
||||
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector);
|
||||
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||
|
|
|
@ -184,21 +184,6 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
|
|||
return true;
|
||||
}
|
||||
|
||||
void lspcon_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
if (drm_mode_is_420_only(info, adjusted_mode) &&
|
||||
connector->ycbcr_420_allowed) {
|
||||
crtc_state->port_clock /= 2;
|
||||
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
|
||||
crtc_state->lspcon_downsampling = true;
|
||||
}
|
||||
}
|
||||
|
||||
static bool lspcon_probe(struct intel_lspcon *lspcon)
|
||||
{
|
||||
int retry;
|
||||
|
@ -492,14 +477,19 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
|
|||
return;
|
||||
}
|
||||
|
||||
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
|
||||
if (crtc_state->lspcon_downsampling)
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
|
||||
else
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
|
||||
} else {
|
||||
/*
|
||||
* Currently there is no interface defined to
|
||||
* check user preference between RGB/YCBCR444
|
||||
* or YCBCR420. So the only possible case for
|
||||
* YCBCR444 usage is driving YCBCR420 output
|
||||
* with LSPCON, when pipe is configured for
|
||||
* YCBCR444 output and LSPCON takes care of
|
||||
* downsampling it.
|
||||
*/
|
||||
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
|
||||
else
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
|
||||
}
|
||||
|
||||
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
|
||||
conn_state->connector,
|
||||
|
@ -525,44 +515,17 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void lspcon_resume(struct intel_lspcon *lspcon)
|
||||
{
|
||||
enum drm_lspcon_mode expected_mode;
|
||||
|
||||
if (lspcon_wake_native_aux_ch(lspcon)) {
|
||||
expected_mode = DRM_LSPCON_MODE_PCON;
|
||||
lspcon_resume_in_pcon_wa(lspcon);
|
||||
} else {
|
||||
expected_mode = DRM_LSPCON_MODE_LS;
|
||||
}
|
||||
|
||||
if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON)
|
||||
return;
|
||||
|
||||
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
|
||||
DRM_ERROR("LSPCON resume failed\n");
|
||||
else
|
||||
DRM_DEBUG_KMS("LSPCON resume success\n");
|
||||
}
|
||||
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
|
||||
{
|
||||
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
|
||||
}
|
||||
|
||||
bool lspcon_init(struct intel_digital_port *dig_port)
|
||||
static bool lspcon_init(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct intel_dp *dp = &dig_port->dp;
|
||||
struct intel_lspcon *lspcon = &dig_port->lspcon;
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_connector *connector = &dp->attached_connector->base;
|
||||
|
||||
if (!HAS_LSPCON(dev_priv)) {
|
||||
DRM_ERROR("LSPCON is not supported on this platform\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
lspcon->active = false;
|
||||
lspcon->mode = DRM_LSPCON_MODE_INVALID;
|
||||
|
||||
|
@ -586,3 +549,37 @@ bool lspcon_init(struct intel_digital_port *dig_port)
|
|||
DRM_DEBUG_KMS("Success: LSPCON init\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
void lspcon_resume(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct intel_lspcon *lspcon = &dig_port->lspcon;
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum drm_lspcon_mode expected_mode;
|
||||
|
||||
if (!intel_bios_is_lspcon_present(dev_priv, dig_port->base.port))
|
||||
return;
|
||||
|
||||
if (!lspcon->active) {
|
||||
if (!lspcon_init(dig_port)) {
|
||||
DRM_ERROR("LSPCON init failed on port %c\n",
|
||||
port_name(dig_port->base.port));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (lspcon_wake_native_aux_ch(lspcon)) {
|
||||
expected_mode = DRM_LSPCON_MODE_PCON;
|
||||
lspcon_resume_in_pcon_wa(lspcon);
|
||||
} else {
|
||||
expected_mode = DRM_LSPCON_MODE_LS;
|
||||
}
|
||||
|
||||
if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON)
|
||||
return;
|
||||
|
||||
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
|
||||
DRM_ERROR("LSPCON resume failed\n");
|
||||
else
|
||||
DRM_DEBUG_KMS("LSPCON resume success\n");
|
||||
}
|
||||
|
|
|
@ -15,8 +15,7 @@ struct intel_digital_port;
|
|||
struct intel_encoder;
|
||||
struct intel_lspcon;
|
||||
|
||||
bool lspcon_init(struct intel_digital_port *dig_port);
|
||||
void lspcon_resume(struct intel_lspcon *lspcon);
|
||||
void lspcon_resume(struct intel_digital_port *dig_port);
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
|
||||
void lspcon_write_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -32,7 +31,5 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
|
|||
const struct drm_connector_state *conn_state);
|
||||
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void lspcon_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_LSPCON_H__ */
|
||||
|
|
|
@ -371,6 +371,15 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state,
|
|||
intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void intel_lvds_shutdown(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_CYCLE_DELAY_ACTIVE, 5000))
|
||||
drm_err(&dev_priv->drm,
|
||||
"timed out waiting for panel power cycle delay\n");
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
intel_lvds_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
|
@ -897,6 +906,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
|
|||
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
|
||||
intel_encoder->get_config = intel_lvds_get_config;
|
||||
intel_encoder->update_pipe = intel_panel_update_backlight;
|
||||
intel_encoder->shutdown = intel_lvds_shutdown;
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
|
|
|
@ -1007,12 +1007,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
|
|||
int ret;
|
||||
|
||||
ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to get panel details from OpRegion (%d)\n",
|
||||
ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = (panel_details >> 8) & 0xff;
|
||||
if (ret > 0x10) {
|
||||
|
|
|
@ -91,19 +91,14 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
|
|||
}
|
||||
}
|
||||
|
||||
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Cannot enable DSC and PSR2 simultaneously */
|
||||
drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable &&
|
||||
crtc_state->has_psr2);
|
||||
|
||||
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
|
||||
case I915_PSR_DEBUG_DISABLE:
|
||||
case I915_PSR_DEBUG_FORCE_PSR1:
|
||||
return false;
|
||||
default:
|
||||
return crtc_state->has_psr2;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -729,6 +724,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!psr2_global_enabled(dev_priv)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* DSC and PSR2 cannot be enabled simultaneously. If a requested
|
||||
* resolution requires DSC to be enabled, priority is given to DSC
|
||||
|
@ -817,8 +817,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
|
|||
if (intel_dp != dev_priv->psr.dp)
|
||||
return;
|
||||
|
||||
if (!psr_global_enabled(dev_priv))
|
||||
if (!psr_global_enabled(dev_priv)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* HSW spec explicitly says PSR is tied to port A.
|
||||
* BDW+ platforms have a instance of PSR registers per transcoder but
|
||||
|
@ -942,7 +945,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
|
|||
intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
|
||||
}
|
||||
|
||||
if (HAS_PSR_HW_TRACKING(dev_priv))
|
||||
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
|
||||
dev_priv->psr.psr2_sel_fetch_enabled ?
|
||||
IGNORE_PSR2_HW_TRACKING : 0);
|
||||
|
@ -959,7 +962,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
|
|||
|
||||
drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
|
||||
|
||||
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
|
||||
dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
|
||||
dev_priv->psr.busy_frontbuffer_bits = 0;
|
||||
dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
|
||||
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
|
||||
|
@ -1029,15 +1032,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
|
|||
drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
|
||||
if (!psr_global_enabled(dev_priv)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
|
@ -1152,7 +1147,21 @@ void intel_psr_disable(struct intel_dp *intel_dp,
|
|||
|
||||
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
if (IS_TIGERLAKE(dev_priv))
|
||||
/*
|
||||
* Writes to CURSURFLIVE in TGL are causing IOMMU errors and
|
||||
* visual glitches that are often reproduced when executing
|
||||
* CPU intensive workloads while a eDP 4K panel is attached.
|
||||
*
|
||||
* Manually exiting PSR causes the frontbuffer to be updated
|
||||
* without glitches and the IOMMU errors are also gone but
|
||||
* this comes at the cost of less time with PSR active.
|
||||
*
|
||||
* So using this workaround until this issue is root caused
|
||||
* and a better fix is found.
|
||||
*/
|
||||
intel_psr_exit(dev_priv);
|
||||
else if (INTEL_GEN(dev_priv) >= 9)
|
||||
/*
|
||||
* Display WA #0884: skl+
|
||||
* This documented WA for bxt can be safely applied
|
||||
|
@ -1171,6 +1180,38 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
|
|||
intel_psr_exit(dev_priv);
|
||||
}
|
||||
|
||||
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state,
|
||||
int color_plane)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
enum pipe pipe = plane->pipe;
|
||||
u32 val;
|
||||
|
||||
if (!crtc_state->enable_psr2_sel_fetch)
|
||||
return;
|
||||
|
||||
val = plane_state ? plane_state->ctl : 0;
|
||||
val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
|
||||
if (!val || plane->id == PLANE_CURSOR)
|
||||
return;
|
||||
|
||||
val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1;
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
|
||||
|
||||
val = plane_state->color_plane[color_plane].y << 16;
|
||||
val |= plane_state->color_plane[color_plane].x;
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
|
||||
val);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16;
|
||||
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
|
||||
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
|
||||
}
|
||||
|
||||
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
@ -1185,16 +1226,91 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
|
|||
crtc_state->psr2_man_track_ctl);
|
||||
}
|
||||
|
||||
void intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
|
||||
struct drm_rect *clip, bool full_update)
|
||||
{
|
||||
u32 val = PSR2_MAN_TRK_CTL_ENABLE;
|
||||
|
||||
if (full_update) {
|
||||
val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (clip->y1 == -1)
|
||||
goto exit;
|
||||
|
||||
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
|
||||
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
|
||||
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
|
||||
exit:
|
||||
crtc_state->psr2_man_track_ctl = val;
|
||||
}
|
||||
|
||||
static void clip_area_update(struct drm_rect *overlap_damage_area,
|
||||
struct drm_rect *damage_area)
|
||||
{
|
||||
if (overlap_damage_area->y1 == -1) {
|
||||
overlap_damage_area->y1 = damage_area->y1;
|
||||
overlap_damage_area->y2 = damage_area->y2;
|
||||
return;
|
||||
}
|
||||
|
||||
if (damage_area->y1 < overlap_damage_area->y1)
|
||||
overlap_damage_area->y1 = damage_area->y1;
|
||||
|
||||
if (damage_area->y2 > overlap_damage_area->y2)
|
||||
overlap_damage_area->y2 = damage_area->y2;
|
||||
}
|
||||
|
||||
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_plane_state *new_plane_state, *old_plane_state;
|
||||
struct drm_rect pipe_clip = { .y1 = -1 };
|
||||
struct intel_plane *plane;
|
||||
bool full_update = false;
|
||||
int i, ret;
|
||||
|
||||
if (!crtc_state->enable_psr2_sel_fetch)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
crtc_state->psr2_man_track_ctl = PSR2_MAN_TRK_CTL_ENABLE |
|
||||
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
|
||||
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
||||
new_plane_state, i) {
|
||||
struct drm_rect temp;
|
||||
|
||||
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* TODO: Not clear how to handle planes with negative position,
|
||||
* also planes are not updated if they have a negative X
|
||||
* position so for now doing a full update in this cases
|
||||
*/
|
||||
if (new_plane_state->uapi.dst.y1 < 0 ||
|
||||
new_plane_state->uapi.dst.x1 < 0) {
|
||||
full_update = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!new_plane_state->uapi.visible)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* For now doing a selective fetch in the whole plane area,
|
||||
* optimizations will come in the future.
|
||||
*/
|
||||
temp.y1 = new_plane_state->uapi.dst.y1;
|
||||
temp.y2 = new_plane_state->uapi.dst.y2;
|
||||
clip_area_update(&pipe_clip, &temp);
|
||||
}
|
||||
|
||||
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1222,8 +1338,8 @@ void intel_psr_update(struct intel_dp *intel_dp,
|
|||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
|
||||
enable = crtc_state->has_psr && psr_global_enabled(dev_priv);
|
||||
psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
|
||||
enable = crtc_state->has_psr;
|
||||
psr2_enable = crtc_state->has_psr2;
|
||||
|
||||
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
|
||||
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
|
||||
|
@ -1320,11 +1436,12 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
|
|||
|
||||
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct drm_atomic_state *state;
|
||||
struct intel_crtc *crtc;
|
||||
int err;
|
||||
struct drm_connector *conn;
|
||||
int err = 0;
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state)
|
||||
|
@ -1334,25 +1451,38 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
|
|||
state->acquire_ctx = &ctx;
|
||||
|
||||
retry:
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_crtc_state(state, crtc);
|
||||
|
||||
if (IS_ERR(crtc_state)) {
|
||||
err = PTR_ERR(crtc_state);
|
||||
goto error;
|
||||
}
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(conn, &conn_iter) {
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (crtc_state->hw.active && crtc_state->has_psr) {
|
||||
/* Mark mode as changed to trigger a pipe->update() */
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
|
||||
continue;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, conn);
|
||||
if (IS_ERR(conn_state)) {
|
||||
err = PTR_ERR(conn_state);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!conn_state->crtc)
|
||||
continue;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
err = PTR_ERR(crtc_state);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Mark mode as changed to trigger a pipe->update() */
|
||||
crtc_state->mode_changed = true;
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
err = drm_atomic_commit(state);
|
||||
if (err == 0)
|
||||
err = drm_atomic_commit(state);
|
||||
|
||||
error:
|
||||
if (err == -EDEADLK) {
|
||||
drm_atomic_state_clear(state);
|
||||
err = drm_modeset_backoff(&ctx);
|
||||
|
@ -1754,7 +1884,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
|
|||
return;
|
||||
|
||||
intel_connector = to_intel_connector(connector);
|
||||
dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
|
||||
dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder));
|
||||
if (dev_priv->psr.dp != &dig_port->dp)
|
||||
return;
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@ struct intel_crtc_state;
|
|||
struct intel_dp;
|
||||
struct intel_crtc;
|
||||
struct intel_atomic_state;
|
||||
struct intel_plane_state;
|
||||
struct intel_plane;
|
||||
|
||||
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
|
||||
|
@ -45,8 +47,12 @@ void intel_psr_atomic_check(struct drm_connector *connector,
|
|||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state);
|
||||
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
|
||||
void intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
|
||||
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state,
|
||||
int color_plane);
|
||||
|
||||
#endif /* __INTEL_PSR_H__ */
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include "intel_frontbuffer.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
|
||||
|
@ -93,6 +94,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
|
|||
DEFINE_WAIT(wait);
|
||||
u32 psr_status;
|
||||
|
||||
if (new_crtc_state->uapi.async_flip)
|
||||
return;
|
||||
|
||||
vblank_start = adjusted_mode->crtc_vblank_start;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vblank_start = DIV_ROUND_UP(vblank_start, 2);
|
||||
|
@ -200,8 +204,19 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
|
|||
ktime_t end_vbl_time = ktime_get();
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
if (new_crtc_state->uapi.async_flip)
|
||||
return;
|
||||
|
||||
trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
|
||||
|
||||
/*
|
||||
* Incase of mipi dsi command mode, we need to set frame update
|
||||
* request for every commit.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 11 &&
|
||||
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
|
||||
icl_dsi_frame_update(new_crtc_state);
|
||||
|
||||
/* We're still in the vblank-evade critical section, this can't race.
|
||||
* Would be slightly nice to just grab the vblank count and arm the
|
||||
* event outside of the critical section - the spinlock might spin for a
|
||||
|
@ -429,6 +444,7 @@ skl_program_scaler(struct intel_plane *plane,
|
|||
u16 y_hphase, uv_rgb_hphase;
|
||||
u16 y_vphase, uv_rgb_vphase;
|
||||
int hscale, vscale;
|
||||
u32 ps_ctrl;
|
||||
|
||||
hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
|
||||
&plane_state->uapi.dst,
|
||||
|
@ -455,8 +471,13 @@ skl_program_scaler(struct intel_plane *plane,
|
|||
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
|
||||
}
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id),
|
||||
PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
|
||||
ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
|
||||
ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
|
||||
|
||||
skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
|
||||
plane_state->hw.scaling_filter);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
|
||||
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
|
||||
|
@ -603,6 +624,29 @@ icl_program_input_csc(struct intel_plane *plane,
|
|||
PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
|
||||
}
|
||||
|
||||
static void
|
||||
skl_plane_async_flip(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
unsigned long irqflags;
|
||||
enum plane_id plane_id = plane->id;
|
||||
enum pipe pipe = plane->pipe;
|
||||
u32 surf_addr = plane_state->color_plane[0].offset;
|
||||
u32 plane_ctl = plane_state->ctl;
|
||||
|
||||
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
|
||||
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
|
||||
intel_plane_ggtt_offset(plane_state) + surf_addr);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void
|
||||
skl_program_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -617,8 +661,6 @@ skl_program_plane(struct intel_plane *plane,
|
|||
u32 stride = skl_plane_stride(plane_state, color_plane);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
int aux_plane = intel_main_to_aux_plane(fb, color_plane);
|
||||
u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
|
||||
u32 aux_stride = skl_plane_stride(plane_state, aux_plane);
|
||||
int crtc_x = plane_state->uapi.dst.x1;
|
||||
int crtc_y = plane_state->uapi.dst.y1;
|
||||
u32 x = plane_state->color_plane[color_plane].x;
|
||||
|
@ -626,7 +668,7 @@ skl_program_plane(struct intel_plane *plane,
|
|||
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
|
||||
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
|
||||
u8 alpha = plane_state->hw.alpha >> 8;
|
||||
u32 plane_color_ctl = 0;
|
||||
u32 plane_color_ctl = 0, aux_dist = 0;
|
||||
unsigned long irqflags;
|
||||
u32 keymsk, keymax;
|
||||
u32 plane_ctl = plane_state->ctl;
|
||||
|
@ -653,6 +695,13 @@ skl_program_plane(struct intel_plane *plane,
|
|||
crtc_y = 0;
|
||||
}
|
||||
|
||||
if (aux_plane) {
|
||||
aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
aux_dist |= skl_plane_stride(plane_state, aux_plane);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
|
||||
|
@ -661,8 +710,6 @@ skl_program_plane(struct intel_plane *plane,
|
|||
intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
|
||||
(src_h << 16) | src_w);
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
aux_dist |= aux_stride;
|
||||
intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
|
||||
|
||||
if (icl_is_hdr_plane(dev_priv, plane_id))
|
||||
|
@ -690,6 +737,9 @@ skl_program_plane(struct intel_plane *plane,
|
|||
intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
|
||||
(plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
|
||||
|
||||
if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
|
||||
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
|
||||
|
||||
/*
|
||||
* The control register self-arms if the plane was previously
|
||||
* disabled. Try to make the plane enable atomic by writing
|
||||
|
@ -2842,8 +2892,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
|
|||
static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id)
|
||||
{
|
||||
/* Wa_14010477008:tgl[a0..c0],rkl[all] */
|
||||
if (IS_ROCKETLAKE(dev_priv) ||
|
||||
/* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
|
||||
if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
|
||||
IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
|
||||
return false;
|
||||
|
||||
|
@ -3089,6 +3139,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
|||
plane->get_hw_state = skl_plane_get_hw_state;
|
||||
plane->check_plane = skl_plane_check;
|
||||
plane->min_cdclk = skl_plane_min_cdclk;
|
||||
plane->async_flip = skl_plane_async_flip;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
formats = icl_get_plane_formats(dev_priv, pipe,
|
||||
|
@ -3160,6 +3211,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
|||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
drm_plane_enable_fb_damage_clips(&plane->base);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
drm_plane_create_scaling_filter_property(&plane->base,
|
||||
BIT(DRM_SCALING_FILTER_DEFAULT) |
|
||||
BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
|
||||
|
||||
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
|
||||
|
||||
return plane;
|
||||
|
|
|
@ -228,9 +228,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
|
|||
return;
|
||||
|
||||
/* If live status mismatches the VBT flag, trust the live status. */
|
||||
drm_err(&i915->drm,
|
||||
"Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
|
||||
dig_port->tc_port_name, live_status_mask);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
|
||||
dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
|
||||
|
||||
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
|
||||
}
|
||||
|
@ -652,7 +652,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
|
|||
enum port port = dig_port->base.port;
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, port);
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
|
||||
if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
|
||||
return;
|
||||
|
||||
snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
|
||||
|
|
|
@ -782,7 +782,7 @@ struct lfp_backlight_data_entry {
|
|||
u8 active_low_pwm:1;
|
||||
u8 obsolete1:5;
|
||||
u16 pwm_freq_hz;
|
||||
u8 min_brightness;
|
||||
u8 min_brightness; /* Obsolete from 234+ */
|
||||
u8 obsolete2;
|
||||
u8 obsolete3;
|
||||
} __packed;
|
||||
|
@ -792,11 +792,19 @@ struct lfp_backlight_control_method {
|
|||
u8 controller:4;
|
||||
} __packed;
|
||||
|
||||
struct lfp_brightness_level {
|
||||
u16 level;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lfp_backlight_data {
|
||||
u8 entry_size;
|
||||
struct lfp_backlight_data_entry data[16];
|
||||
u8 level[16];
|
||||
u8 level[16]; /* Obsolete from 234+ */
|
||||
struct lfp_backlight_control_method backlight_control[16];
|
||||
struct lfp_brightness_level brightness_level[16]; /* 234+ */
|
||||
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
|
||||
u8 brightness_precision_bits[16]; /* 236+ */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
@ -827,6 +835,7 @@ struct bdb_lfp_power {
|
|||
u16 lace_enabled_status;
|
||||
struct agressiveness_profile_entry aggressivenes[16];
|
||||
u16 hobl; /* 232+ */
|
||||
u16 vrr_feature_enabled; /* 233+ */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
|
|
@ -985,6 +985,13 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
|
|||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
|
||||
}
|
||||
|
||||
static void intel_dsi_shutdown(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
|
||||
}
|
||||
|
||||
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
enum pipe *pipe)
|
||||
{
|
||||
|
@ -1843,6 +1850,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
|
|||
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
|
||||
intel_encoder->get_config = intel_dsi_get_config;
|
||||
intel_encoder->update_pipe = intel_panel_update_backlight;
|
||||
intel_encoder->shutdown = intel_dsi_shutdown;
|
||||
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
||||
|
|
|
@ -835,7 +835,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
|||
u16 snb_gmch_ctl;
|
||||
|
||||
/* TODO: We're not aware of mappable constraints on gen8 yet */
|
||||
if (!IS_DGFX(i915)) {
|
||||
if (!HAS_LMEM(i915)) {
|
||||
ggtt->gmadr = pci_resource(pdev, 2);
|
||||
ggtt->mappable_end = resource_size(&ggtt->gmadr);
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ struct drm_i915_mocs_table {
|
|||
* they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
|
||||
* PTE and will be initialized to an invalid value.
|
||||
*
|
||||
* The last two entries are reserved by the hardware. For ICL+ they
|
||||
* The last few entries are reserved by the hardware. For ICL+ they
|
||||
* should be initialized according to bspec and never used, for older
|
||||
* platforms they should never be written to.
|
||||
*
|
||||
|
@ -286,6 +286,39 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
|
|||
GEN11_MOCS_ENTRIES
|
||||
};
|
||||
|
||||
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
|
||||
/* Error */
|
||||
MOCS_ENTRY(0, 0, L3_0_DIRECT),
|
||||
|
||||
/* UC */
|
||||
MOCS_ENTRY(1, 0, L3_1_UC),
|
||||
|
||||
/* Reserved */
|
||||
MOCS_ENTRY(2, 0, L3_0_DIRECT),
|
||||
MOCS_ENTRY(3, 0, L3_0_DIRECT),
|
||||
MOCS_ENTRY(4, 0, L3_0_DIRECT),
|
||||
|
||||
/* WB - L3 */
|
||||
MOCS_ENTRY(5, 0, L3_3_WB),
|
||||
/* WB - L3 50% */
|
||||
MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
|
||||
/* WB - L3 25% */
|
||||
MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
|
||||
/* WB - L3 12.5% */
|
||||
MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
|
||||
|
||||
/* HDC:L1 + L3 */
|
||||
MOCS_ENTRY(48, 0, L3_3_WB),
|
||||
/* HDC:L1 */
|
||||
MOCS_ENTRY(49, 0, L3_1_UC),
|
||||
|
||||
/* HW Reserved */
|
||||
MOCS_ENTRY(60, 0, L3_1_UC),
|
||||
MOCS_ENTRY(61, 0, L3_1_UC),
|
||||
MOCS_ENTRY(62, 0, L3_1_UC),
|
||||
MOCS_ENTRY(63, 0, L3_1_UC),
|
||||
};
|
||||
|
||||
enum {
|
||||
HAS_GLOBAL_MOCS = BIT(0),
|
||||
HAS_ENGINE_MOCS = BIT(1),
|
||||
|
@ -312,7 +345,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
|
|||
{
|
||||
unsigned int flags;
|
||||
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
if (IS_DG1(i915)) {
|
||||
table->size = ARRAY_SIZE(dg1_mocs_table);
|
||||
table->table = dg1_mocs_table;
|
||||
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
|
||||
} else if (INTEL_GEN(i915) >= 12) {
|
||||
table->size = ARRAY_SIZE(tgl_mocs_table);
|
||||
table->table = tgl_mocs_table;
|
||||
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
|
||||
|
|
|
@ -390,6 +390,16 @@ static void gen5_rps_update(struct intel_rps *rps)
|
|||
spin_unlock_irq(&mchdev_lock);
|
||||
}
|
||||
|
||||
static unsigned int gen5_invert_freq(struct intel_rps *rps,
|
||||
unsigned int val)
|
||||
{
|
||||
/* Invert the frequency bin into an ips delay */
|
||||
val = rps->max_freq - val;
|
||||
val = rps->min_freq + val;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static bool gen5_rps_set(struct intel_rps *rps, u8 val)
|
||||
{
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
|
@ -404,8 +414,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val)
|
|||
}
|
||||
|
||||
/* Invert the frequency bin into an ips delay */
|
||||
val = rps->max_freq - val;
|
||||
val = rps->min_freq + val;
|
||||
val = gen5_invert_freq(rps, val);
|
||||
|
||||
rgvswctl =
|
||||
(MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
|
||||
|
@ -500,6 +509,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
|
|||
|
||||
static bool gen5_rps_enable(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
u8 fstart, vstart;
|
||||
u32 rgvmodectl;
|
||||
|
@ -557,6 +567,10 @@ static bool gen5_rps_enable(struct intel_rps *rps)
|
|||
rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
|
||||
rps->ips.last_time2 = ktime_get_raw_ns();
|
||||
|
||||
spin_lock(&i915->irq_lock);
|
||||
ilk_enable_display_irq(i915, DE_PCU_EVENT);
|
||||
spin_unlock(&i915->irq_lock);
|
||||
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
rps->ips.corr = init_emon(uncore);
|
||||
|
@ -566,11 +580,16 @@ static bool gen5_rps_enable(struct intel_rps *rps)
|
|||
|
||||
static void gen5_rps_disable(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
u16 rgvswctl;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
|
||||
spin_lock(&i915->irq_lock);
|
||||
ilk_disable_display_irq(i915, DE_PCU_EVENT);
|
||||
spin_unlock(&i915->irq_lock);
|
||||
|
||||
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
|
||||
|
||||
/* Ack interrupts, disable EFC interrupt */
|
||||
|
@ -578,11 +597,6 @@ static void gen5_rps_disable(struct intel_rps *rps)
|
|||
intel_uncore_read(uncore, MEMINTREN) &
|
||||
~MEMINT_EVAL_CHG_EN);
|
||||
intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
|
||||
intel_uncore_write(uncore, DEIER,
|
||||
intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
|
||||
intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
|
||||
intel_uncore_write(uncore, DEIMR,
|
||||
intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
|
||||
|
||||
/* Go back to the starting frequency */
|
||||
gen5_rps_set(rps, rps->idle_freq);
|
||||
|
@ -1272,8 +1286,9 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips)
|
|||
{
|
||||
struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
unsigned long t, corr, state1, corr2, state2;
|
||||
unsigned int t, state1, state2;
|
||||
u32 pxvid, ext_v;
|
||||
u64 corr, corr2;
|
||||
|
||||
lockdep_assert_held(&mchdev_lock);
|
||||
|
||||
|
@ -1294,11 +1309,10 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips)
|
|||
else /* < 50 */
|
||||
corr = t * 301 + 1004;
|
||||
|
||||
corr = corr * 150142 * state1 / 10000 - 78642;
|
||||
corr /= 100000;
|
||||
corr2 = corr * ips->corr;
|
||||
corr = div_u64(corr * 150142 * state1, 10000) - 78642;
|
||||
corr2 = div_u64(corr, 100000) * ips->corr;
|
||||
|
||||
state2 = corr2 * state1 / 10000;
|
||||
state2 = div_u64(corr2 * state1, 10000);
|
||||
state2 /= 100; /* convert to mW */
|
||||
|
||||
__gen5_ips_update(ips);
|
||||
|
@ -1432,8 +1446,10 @@ int intel_gpu_freq(struct intel_rps *rps, int val)
|
|||
return chv_gpu_freq(rps, val);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
return byt_gpu_freq(rps, val);
|
||||
else
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
return val * GT_FREQUENCY_MULTIPLIER;
|
||||
else
|
||||
return val;
|
||||
}
|
||||
|
||||
int intel_freq_opcode(struct intel_rps *rps, int val)
|
||||
|
@ -1447,8 +1463,10 @@ int intel_freq_opcode(struct intel_rps *rps, int val)
|
|||
return chv_freq_opcode(rps, val);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
return byt_freq_opcode(rps, val);
|
||||
else
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
|
||||
else
|
||||
return val;
|
||||
}
|
||||
|
||||
static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
|
||||
|
@ -1864,8 +1882,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
|||
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
|
||||
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
|
||||
else
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
|
||||
else
|
||||
cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
|
||||
MEMSTAT_PSTATE_SHIFT);
|
||||
|
||||
return cagf;
|
||||
}
|
||||
|
@ -1873,14 +1894,17 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
|||
static u32 read_cagf(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
u32 freq;
|
||||
|
||||
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
|
||||
vlv_punit_get(i915);
|
||||
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
|
||||
vlv_punit_put(i915);
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
|
||||
} else {
|
||||
freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
|
||||
freq = intel_uncore_read(uncore, MEMSTAT_ILK);
|
||||
}
|
||||
|
||||
return intel_rps_get_cagf(rps, freq);
|
||||
|
|
|
@ -169,7 +169,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
|
|||
u8 eu_en;
|
||||
u8 s_en;
|
||||
|
||||
if (IS_ELKHARTLAKE(gt->i915))
|
||||
if (IS_JSL_EHL(gt->i915))
|
||||
intel_sseu_set_info(sseu, 1, 4, 8);
|
||||
else
|
||||
intel_sseu_set_info(sseu, 1, 8, 8);
|
||||
|
|
|
@ -672,6 +672,20 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
|
|||
0);
|
||||
}
|
||||
|
||||
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal)
|
||||
{
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
|
||||
/* Wa_1409044764 */
|
||||
WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
|
||||
DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
|
||||
|
||||
/* Wa_22010493298 */
|
||||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
||||
struct i915_wa_list *wal,
|
||||
|
@ -684,7 +698,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
|||
|
||||
wa_init_start(wal, name, engine->name);
|
||||
|
||||
if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
|
||||
if (IS_DG1(i915))
|
||||
dg1_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
|
||||
tgl_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
|
@ -1212,7 +1228,7 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
|
||||
/* Wa_1607087056:icl,ehl,jsl */
|
||||
if (IS_ICELAKE(i915) ||
|
||||
IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
|
||||
IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) {
|
||||
wa_write_or(wal,
|
||||
SLICE_UNIT_LEVEL_CLKGATE,
|
||||
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
||||
|
@ -1244,10 +1260,36 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
|||
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
||||
}
|
||||
|
||||
static void
|
||||
dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* Wa_1607087056:dg1 */
|
||||
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0))
|
||||
wa_write_or(wal,
|
||||
SLICE_UNIT_LEVEL_CLKGATE,
|
||||
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1409420604:dg1 */
|
||||
if (IS_DG1(i915))
|
||||
wa_write_or(wal,
|
||||
SUBSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
CPSSUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1408615072:dg1 */
|
||||
/* Empirical testing shows this register is unaffected by engine reset. */
|
||||
if (IS_DG1(i915))
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
VSUNIT_CLKGATE_DIS_TGL);
|
||||
}
|
||||
|
||||
static void
|
||||
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
if (IS_TIGERLAKE(i915))
|
||||
if (IS_DG1(i915))
|
||||
dg1_gt_workarounds_init(i915, wal);
|
||||
else if (IS_TIGERLAKE(i915))
|
||||
tgl_gt_workarounds_init(i915, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
|
@ -1612,6 +1654,20 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
|
|||
}
|
||||
}
|
||||
|
||||
static void dg1_whitelist_build(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_wa_list *w = &engine->whitelist;
|
||||
|
||||
tgl_whitelist_build(engine);
|
||||
|
||||
/* GEN:BUG:1409280441:dg1 */
|
||||
if (IS_DG1_REVID(engine->i915, DG1_REVID_A0, DG1_REVID_A0) &&
|
||||
(engine->class == RENDER_CLASS ||
|
||||
engine->class == COPY_ENGINE_CLASS))
|
||||
whitelist_reg_ext(w, RING_ID(engine->mmio_base),
|
||||
RING_FORCE_TO_NONPRIV_ACCESS_RD);
|
||||
}
|
||||
|
||||
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
@ -1619,7 +1675,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
|
|||
|
||||
wa_init_start(w, "whitelist", engine->name);
|
||||
|
||||
if (IS_GEN(i915, 12))
|
||||
if (IS_DG1(i915))
|
||||
dg1_whitelist_build(engine);
|
||||
else if (IS_GEN(i915, 12))
|
||||
tgl_whitelist_build(engine);
|
||||
else if (IS_GEN(i915, 11))
|
||||
icl_whitelist_build(engine);
|
||||
|
@ -1673,15 +1731,18 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
/*
|
||||
* Wa_1607138336:tgl
|
||||
* Wa_1607063988:tgl
|
||||
* Wa_1607138336:tgl[a0],dg1[a0]
|
||||
* Wa_1607063988:tgl[a0],dg1[a0]
|
||||
*/
|
||||
wa_write_or(wal,
|
||||
GEN9_CTX_PREEMPT_REG,
|
||||
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
|
||||
}
|
||||
|
||||
if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
/*
|
||||
* Wa_1606679103:tgl
|
||||
* (see also Wa_1606682166:icl)
|
||||
|
@ -1695,35 +1756,41 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
VSUNIT_CLKGATE_DIS_TGL);
|
||||
}
|
||||
|
||||
if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606931601:tgl,rkl */
|
||||
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606931601:tgl,rkl,dg1 */
|
||||
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
|
||||
|
||||
/* Wa_1409804808:tgl,rkl */
|
||||
/*
|
||||
* Wa_1407928979:tgl A*
|
||||
* Wa_18011464164:tgl[B0+],dg1[B0+]
|
||||
* Wa_22010931296:tgl[B0+],dg1[B0+]
|
||||
* Wa_14010919138:rkl, dg1
|
||||
*/
|
||||
wa_write_or(wal, GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1409804808:tgl,rkl,dg1[a0] */
|
||||
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
||||
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
|
||||
|
||||
/*
|
||||
* Wa_1409085225:tgl
|
||||
* Wa_14010229206:tgl,rkl
|
||||
* Wa_14010229206:tgl,rkl,dg1[a0]
|
||||
*/
|
||||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
||||
|
||||
/*
|
||||
* Wa_1407928979:tgl A*
|
||||
* Wa_18011464164:tgl B0+
|
||||
* Wa_22010931296:tgl B0+
|
||||
* Wa_14010919138:rkl,tgl
|
||||
*/
|
||||
wa_write_or(wal, GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1607030317:tgl
|
||||
* Wa_1607186500:tgl
|
||||
* Wa_1607297627:tgl,rkl there are multiple entries for this
|
||||
* WA in the BSpec; some indicate this is an A0-only WA,
|
||||
* others indicate it applies to all steppings.
|
||||
* Wa_1607297627:tgl,rkl,dg1[a0]
|
||||
*
|
||||
* On TGL and RKL there are multiple entries for this WA in the
|
||||
* BSpec; some indicate this is an A0-only WA, others indicate
|
||||
* it applies to all steppings so we trust the "all steppings."
|
||||
* For DG1 this only applies to A0.
|
||||
*/
|
||||
wa_masked_en(wal,
|
||||
GEN6_RC_SLEEP_PSMI_CONTROL,
|
||||
|
@ -1839,7 +1906,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
|
||||
/* Wa_22010271021:ehl */
|
||||
if (IS_ELKHARTLAKE(i915))
|
||||
if (IS_JSL_EHL(i915))
|
||||
wa_masked_en(wal,
|
||||
GEN9_CS_DEBUG_MODE1,
|
||||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
|
@ -2031,10 +2098,12 @@ err_obj:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static const struct {
|
||||
struct mcr_range {
|
||||
u32 start;
|
||||
u32 end;
|
||||
} mcr_ranges_gen8[] = {
|
||||
};
|
||||
|
||||
static const struct mcr_range mcr_ranges_gen8[] = {
|
||||
{ .start = 0x5500, .end = 0x55ff },
|
||||
{ .start = 0x7000, .end = 0x7fff },
|
||||
{ .start = 0x9400, .end = 0x97ff },
|
||||
|
@ -2043,11 +2112,25 @@ static const struct {
|
|||
{},
|
||||
};
|
||||
|
||||
static const struct mcr_range mcr_ranges_gen12[] = {
|
||||
{ .start = 0x8150, .end = 0x815f },
|
||||
{ .start = 0x9520, .end = 0x955f },
|
||||
{ .start = 0xb100, .end = 0xb3ff },
|
||||
{ .start = 0xde80, .end = 0xe8ff },
|
||||
{ .start = 0x24a00, .end = 0x24a7f },
|
||||
{},
|
||||
};
|
||||
|
||||
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
||||
{
|
||||
const struct mcr_range *mcr_ranges;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(i915) < 8)
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
mcr_ranges = mcr_ranges_gen12;
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
mcr_ranges = mcr_ranges_gen8;
|
||||
else
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
@ -2055,9 +2138,9 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
|
|||
* which only controls CPU initiated MMIO. Routing does not
|
||||
* work for CS access so we cannot verify them on this path.
|
||||
*/
|
||||
for (i = 0; mcr_ranges_gen8[i].start; i++)
|
||||
if (offset >= mcr_ranges_gen8[i].start &&
|
||||
offset <= mcr_ranges_gen8[i].end)
|
||||
for (i = 0; mcr_ranges[i].start; i++)
|
||||
if (offset >= mcr_ranges[i].start &&
|
||||
offset <= mcr_ranges[i].end)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
|
@ -312,18 +312,18 @@ void intel_guc_write_params(struct intel_guc *guc)
|
|||
int i;
|
||||
|
||||
/*
|
||||
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
|
||||
* All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
|
||||
* they are power context saved so it's ok to release forcewake
|
||||
* when we are done here and take it again at xfer time.
|
||||
*/
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
|
||||
|
||||
intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
|
||||
|
||||
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
|
||||
intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
|
||||
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
|
||||
}
|
||||
|
||||
int intel_guc_init(struct intel_guc *guc)
|
||||
|
|
|
@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
|||
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
|
||||
fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(JASPERLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
|
||||
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
|
||||
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
|
||||
fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
|
||||
|
|
|
@ -173,23 +173,24 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
|||
int pipe;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
|
||||
BXT_DE_PORT_HP_DDIB |
|
||||
BXT_DE_PORT_HP_DDIC);
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
||||
~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) |
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) |
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
BXT_DE_PORT_HP_DDIA;
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
BXT_DE_PORT_HP_DDIB;
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
BXT_DE_PORT_HP_DDIC;
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -327,7 +328,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
|||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_PORT_DP_A_HOTPLUG;
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
||||
else
|
||||
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
|
||||
|
||||
|
|
|
@ -290,8 +290,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
|||
case FORCEWAKE_RENDER_GEN9_REG:
|
||||
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
|
||||
break;
|
||||
case FORCEWAKE_BLITTER_GEN9_REG:
|
||||
ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
|
||||
case FORCEWAKE_GT_GEN9_REG:
|
||||
ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
|
||||
break;
|
||||
case FORCEWAKE_MEDIA_GEN9_REG:
|
||||
ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
|
||||
|
@ -2209,9 +2209,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
|
||||
MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
|
||||
|
||||
MMIO_D(WM0_PIPEA_ILK, D_ALL);
|
||||
MMIO_D(WM0_PIPEB_ILK, D_ALL);
|
||||
MMIO_D(WM0_PIPEC_IVB, D_ALL);
|
||||
MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
|
||||
MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
|
||||
MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
|
||||
MMIO_D(WM1_LP_ILK, D_ALL);
|
||||
MMIO_D(WM2_LP_ILK, D_ALL);
|
||||
MMIO_D(WM3_LP_ILK, D_ALL);
|
||||
|
@ -2901,8 +2901,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
|
||||
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
|
||||
|
||||
|
|
|
@ -101,8 +101,8 @@
|
|||
|
||||
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
|
||||
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
|
||||
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
|
||||
#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
|
||||
#define FORCEWAKE_GT_GEN9_REG 0xa188
|
||||
#define FORCEWAKE_ACK_GT_GEN9_REG 0x130044
|
||||
#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
|
||||
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
|
||||
#define FORCEWAKE_ACK_HSW_REG 0x130044
|
||||
|
|
|
@ -786,7 +786,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
intel_wakeref_t wakeref;
|
||||
int ret = 0;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
|
@ -1009,7 +1008,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
|
||||
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
|
|
|
@ -84,6 +84,7 @@
|
|||
#include "intel_gvt.h"
|
||||
#include "intel_memory_region.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_sideband.h"
|
||||
#include "vlv_suspend.h"
|
||||
|
||||
static struct drm_driver driver;
|
||||
|
@ -616,6 +617,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
intel_dram_detect(dev_priv);
|
||||
|
||||
intel_pcode_init(dev_priv);
|
||||
|
||||
intel_bw_init_hw(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -668,7 +671,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|||
/* Reveal our presence to userspace */
|
||||
if (drm_dev_register(dev, 0) == 0) {
|
||||
i915_debugfs_register(dev_priv);
|
||||
intel_display_debugfs_register(dev_priv);
|
||||
if (HAS_DISPLAY(dev_priv))
|
||||
intel_display_debugfs_register(dev_priv);
|
||||
i915_setup_sysfs(dev_priv);
|
||||
|
||||
/* Depends on sysfs having been initialized */
|
||||
|
@ -840,9 +844,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
i915->params.fake_lmem_start) {
|
||||
mkwrite_device_info(i915)->memory_regions =
|
||||
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
|
||||
mkwrite_device_info(i915)->is_dgfx = true;
|
||||
GEM_BUG_ON(!HAS_LMEM(i915));
|
||||
GEM_BUG_ON(!IS_DGFX(i915));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -1036,6 +1038,35 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
|||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
for_each_intel_encoder(dev, encoder)
|
||||
if (encoder->shutdown)
|
||||
encoder->shutdown(encoder);
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
void i915_driver_shutdown(struct drm_i915_private *i915)
|
||||
{
|
||||
i915_gem_suspend(i915);
|
||||
|
||||
drm_kms_helper_poll_disable(&i915->drm);
|
||||
|
||||
drm_atomic_helper_shutdown(&i915->drm);
|
||||
|
||||
intel_dp_mst_suspend(i915);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(i915);
|
||||
intel_hpd_cancel_work(i915);
|
||||
|
||||
intel_suspend_encoders(i915);
|
||||
intel_shutdown_encoders(i915);
|
||||
}
|
||||
|
||||
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
|
||||
|
@ -1089,7 +1120,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||
|
||||
i915_ggtt_suspend(&dev_priv->ggtt);
|
||||
|
||||
i915_save_state(dev_priv);
|
||||
i915_save_display(dev_priv);
|
||||
|
||||
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
|
||||
intel_opregion_suspend(dev_priv, opregion_target_state);
|
||||
|
@ -1202,7 +1233,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
|
||||
intel_csr_ucode_resume(dev_priv);
|
||||
|
||||
i915_restore_state(dev_priv);
|
||||
i915_restore_display(dev_priv);
|
||||
intel_pps_unlock_regs_wa(dev_priv);
|
||||
|
||||
intel_init_pch_refclk(dev_priv);
|
||||
|
@ -1225,26 +1256,15 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
|
||||
intel_modeset_init_hw(dev_priv);
|
||||
intel_init_clock_gating(dev_priv);
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* MST sideband requires HPD interrupts enabled */
|
||||
intel_dp_mst_resume(dev_priv);
|
||||
|
||||
intel_display_resume(dev);
|
||||
|
||||
intel_hpd_poll_disable(dev_priv);
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
/*
|
||||
* ... but also need to make sure that hotplug processing
|
||||
* doesn't cause havoc. Like in the driver load code we don't
|
||||
* bother with the tiny race here where we might lose hotplug
|
||||
* notifications.
|
||||
* */
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
intel_opregion_resume(dev_priv);
|
||||
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
|
||||
|
@ -1556,7 +1576,7 @@ static int intel_runtime_suspend(struct device *kdev)
|
|||
assert_forcewakes_inactive(&dev_priv->uncore);
|
||||
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
intel_hpd_poll_init(dev_priv);
|
||||
intel_hpd_poll_enable(dev_priv);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
|
||||
return 0;
|
||||
|
@ -1601,8 +1621,10 @@ static int intel_runtime_resume(struct device *kdev)
|
|||
* power well, so hpd is reinitialized from there. For
|
||||
* everyone else do it here.
|
||||
*/
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
|
||||
intel_hpd_init(dev_priv);
|
||||
intel_hpd_poll_disable(dev_priv);
|
||||
}
|
||||
|
||||
intel_enable_ipc(dev_priv);
|
||||
|
||||
|
|
|
@ -110,8 +110,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20200917"
|
||||
#define DRIVER_TIMESTAMP 1600375437
|
||||
#define DRIVER_DATE "20201103"
|
||||
#define DRIVER_TIMESTAMP 1604406085
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
|
@ -1419,7 +1419,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
|||
#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
|
||||
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
|
||||
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
|
||||
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
|
||||
#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
|
||||
IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
|
||||
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
|
||||
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
|
||||
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
|
||||
|
@ -1560,8 +1561,8 @@ extern const struct i915_rev_steppings kbl_revids[];
|
|||
|
||||
#define EHL_REVID_A0 0x0
|
||||
|
||||
#define IS_EHL_REVID(p, since, until) \
|
||||
(IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
|
||||
#define IS_JSL_EHL_REVID(p, since, until) \
|
||||
(IS_JSL_EHL(p) && IS_REVID(p, since, until))
|
||||
|
||||
enum {
|
||||
TGL_REVID_A0,
|
||||
|
@ -1783,6 +1784,7 @@ extern const struct dev_pm_ops i915_pm_ops;
|
|||
|
||||
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
void i915_driver_remove(struct drm_i915_private *i915);
|
||||
void i915_driver_shutdown(struct drm_i915_private *i915);
|
||||
|
||||
int i915_resume_switcheroo(struct drm_i915_private *i915);
|
||||
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -118,6 +118,9 @@ void i965_disable_vblank(struct drm_crtc *crtc);
|
|||
void ilk_disable_vblank(struct drm_crtc *crtc);
|
||||
void bdw_disable_vblank(struct drm_crtc *crtc);
|
||||
|
||||
void skl_enable_flip_done(struct intel_crtc *crtc);
|
||||
void skl_disable_flip_done(struct intel_crtc *crtc);
|
||||
|
||||
void gen2_irq_reset(struct intel_uncore *uncore);
|
||||
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
|
||||
i915_reg_t iir, i915_reg_t ier);
|
||||
|
|
|
@ -847,6 +847,14 @@ static const struct intel_device_info ehl_info = {
|
|||
.ppgtt_size = 36,
|
||||
};
|
||||
|
||||
static const struct intel_device_info jsl_info = {
|
||||
GEN11_FEATURES,
|
||||
PLATFORM(INTEL_JASPERLAKE),
|
||||
.require_force_probe = 1,
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
|
||||
.ppgtt_size = 36,
|
||||
};
|
||||
|
||||
#define GEN12_FEATURES \
|
||||
GEN11_FEATURES, \
|
||||
GEN(12), \
|
||||
|
@ -901,6 +909,8 @@ static const struct intel_device_info rkl_info = {
|
|||
GEN12_FEATURES, \
|
||||
.memory_regions = REGION_SMEM | REGION_LMEM, \
|
||||
.has_master_unit_irq = 1, \
|
||||
.has_llc = 0, \
|
||||
.has_snoop = 1, \
|
||||
.is_dgfx = 1
|
||||
|
||||
static const struct intel_device_info dg1_info __maybe_unused = {
|
||||
|
@ -911,6 +921,8 @@ static const struct intel_device_info dg1_info __maybe_unused = {
|
|||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
|
||||
BIT(VCS0) | BIT(VCS2),
|
||||
/* Wa_16011227922 */
|
||||
.ppgtt_size = 47,
|
||||
};
|
||||
|
||||
#undef GEN
|
||||
|
@ -986,6 +998,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
INTEL_CNL_IDS(&cnl_info),
|
||||
INTEL_ICL_11_IDS(&icl_info),
|
||||
INTEL_EHL_IDS(&ehl_info),
|
||||
INTEL_JSL_IDS(&jsl_info),
|
||||
INTEL_TGL_12_IDS(&tgl_info),
|
||||
INTEL_RKL_IDS(&rkl_info),
|
||||
{0, 0, 0}
|
||||
|
@ -1091,11 +1104,19 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void i915_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_i915_private *i915 = pci_get_drvdata(pdev);
|
||||
|
||||
i915_driver_shutdown(i915);
|
||||
}
|
||||
|
||||
static struct pci_driver i915_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = i915_pci_probe,
|
||||
.remove = i915_pci_remove,
|
||||
.shutdown = i915_pci_shutdown,
|
||||
.driver.pm = &i915_pm_ops,
|
||||
};
|
||||
|
||||
|
|
|
@ -242,7 +242,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
|
||||
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
|
||||
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
|
||||
#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c))
|
||||
#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__))
|
||||
|
||||
|
||||
/*
|
||||
* Device info offset array based helpers for groups of registers with unevenly
|
||||
|
@ -2527,6 +2528,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define RING_PSMI_CTL(base) _MMIO((base) + 0x50)
|
||||
#define RING_MAX_IDLE(base) _MMIO((base) + 0x54)
|
||||
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
|
||||
#define RING_ID(base) _MMIO((base) + 0x8c)
|
||||
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
|
||||
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
|
||||
#define RESET_CTL_CAT_ERROR REG_BIT(2)
|
||||
|
@ -4146,6 +4148,7 @@ enum {
|
|||
|
||||
#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
|
||||
#define TGL_VRH_GATING_DIS REG_BIT(31)
|
||||
#define DPT_GATING_DIS REG_BIT(22)
|
||||
|
||||
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
|
||||
#define BXT_GMBUS_GATING_DIS (1 << 14)
|
||||
|
@ -4618,6 +4621,110 @@ enum {
|
|||
#define PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(2)
|
||||
#define PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(1)
|
||||
|
||||
/* Icelake DSC Rate Control Range Parameter Registers */
|
||||
#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
|
||||
#define RC_BPG_OFFSET_SHIFT 10
|
||||
#define RC_MAX_QP_SHIFT 5
|
||||
#define RC_MIN_QP_SHIFT 0
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
|
||||
|
||||
/* VGA port control */
|
||||
#define ADPA _MMIO(0x61100)
|
||||
#define PCH_ADPA _MMIO(0xe1100)
|
||||
|
@ -6327,15 +6434,16 @@ enum {
|
|||
_MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
|
||||
|
||||
/* define the Watermark register on Ironlake */
|
||||
#define WM0_PIPEA_ILK _MMIO(0x45100)
|
||||
#define _WM0_PIPEA_ILK 0x45100
|
||||
#define _WM0_PIPEB_ILK 0x45104
|
||||
#define _WM0_PIPEC_IVB 0x45200
|
||||
#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
|
||||
_WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
|
||||
#define WM0_PIPE_PLANE_MASK (0xffff << 16)
|
||||
#define WM0_PIPE_PLANE_SHIFT 16
|
||||
#define WM0_PIPE_SPRITE_MASK (0xff << 8)
|
||||
#define WM0_PIPE_SPRITE_SHIFT 8
|
||||
#define WM0_PIPE_CURSOR_MASK (0xff)
|
||||
|
||||
#define WM0_PIPEB_ILK _MMIO(0x45104)
|
||||
#define WM0_PIPEC_IVB _MMIO(0x45200)
|
||||
#define WM1_LP_ILK _MMIO(0x45108)
|
||||
#define WM1_LP_SR_EN (1 << 31)
|
||||
#define WM1_LP_LATENCY_SHIFT 24
|
||||
|
@ -6923,6 +7031,7 @@ enum {
|
|||
#define PLANE_CTL_TILED_X (1 << 10)
|
||||
#define PLANE_CTL_TILED_Y (4 << 10)
|
||||
#define PLANE_CTL_TILED_YF (5 << 10)
|
||||
#define PLANE_CTL_ASYNC_FLIP (1 << 9)
|
||||
#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8)
|
||||
#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */
|
||||
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
|
||||
|
@ -7379,6 +7488,7 @@ enum {
|
|||
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
|
||||
#define PS_FILTER_MASK (3 << 23)
|
||||
#define PS_FILTER_MEDIUM (0 << 23)
|
||||
#define PS_FILTER_PROGRAMMED (1 << 23)
|
||||
#define PS_FILTER_EDGE_ENHANCE (2 << 23)
|
||||
#define PS_FILTER_BILINEAR (3 << 23)
|
||||
#define PS_VERT3TAP (1 << 21)
|
||||
|
@ -7393,6 +7503,10 @@ enum {
|
|||
#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
|
||||
#define PS_PLANE_Y_SEL_MASK (7 << 5)
|
||||
#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
|
||||
#define PS_Y_VERT_FILTER_SELECT(set) ((set) << 4)
|
||||
#define PS_Y_HORZ_FILTER_SELECT(set) ((set) << 3)
|
||||
#define PS_UV_VERT_FILTER_SELECT(set) ((set) << 2)
|
||||
#define PS_UV_HORZ_FILTER_SELECT(set) ((set) << 1)
|
||||
|
||||
#define _PS_PWR_GATE_1A 0x68160
|
||||
#define _PS_PWR_GATE_2A 0x68260
|
||||
|
@ -7455,6 +7569,17 @@ enum {
|
|||
#define _PS_ECC_STAT_2B 0x68AD0
|
||||
#define _PS_ECC_STAT_1C 0x691D0
|
||||
|
||||
#define _PS_COEF_SET0_INDEX_1A 0x68198
|
||||
#define _PS_COEF_SET0_INDEX_2A 0x68298
|
||||
#define _PS_COEF_SET0_INDEX_1B 0x68998
|
||||
#define _PS_COEF_SET0_INDEX_2B 0x68A98
|
||||
#define PS_COEE_INDEX_AUTO_INC (1 << 10)
|
||||
|
||||
#define _PS_COEF_SET0_DATA_1A 0x6819C
|
||||
#define _PS_COEF_SET0_DATA_2A 0x6829C
|
||||
#define _PS_COEF_SET0_DATA_1B 0x6899C
|
||||
#define _PS_COEF_SET0_DATA_2B 0x68A9C
|
||||
|
||||
#define _ID(id, a, b) _PICK_EVEN(id, a, b)
|
||||
#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
|
||||
_ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
|
||||
|
@ -7483,7 +7608,13 @@ enum {
|
|||
#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
|
||||
_ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
|
||||
_ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
|
||||
#define CNL_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
|
||||
_ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
|
||||
_ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
|
||||
|
||||
#define CNL_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
|
||||
_ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
|
||||
_ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
|
||||
/* legacy palette */
|
||||
#define _LGC_PALETTE_A 0x4a000
|
||||
#define _LGC_PALETTE_B 0x4a800
|
||||
|
@ -7532,6 +7663,7 @@ enum {
|
|||
#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
|
||||
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
|
||||
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
|
||||
#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
|
||||
|
||||
#define DMC_DEBUG3 _MMIO(0x101090)
|
||||
|
||||
|
@ -7681,6 +7813,9 @@ enum {
|
|||
(GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
|
||||
GEN11_PIPE_PLANE5_FAULT)
|
||||
|
||||
#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
|
||||
#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
|
||||
|
||||
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
|
||||
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
|
||||
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
|
||||
|
@ -7694,13 +7829,11 @@ enum {
|
|||
#define GEN9_AUX_CHANNEL_B (1 << 25)
|
||||
#define DSI1_TE (1 << 24)
|
||||
#define DSI0_TE (1 << 23)
|
||||
#define BXT_DE_PORT_HP_DDIC (1 << 5)
|
||||
#define BXT_DE_PORT_HP_DDIB (1 << 4)
|
||||
#define BXT_DE_PORT_HP_DDIA (1 << 3)
|
||||
#define BXT_DE_PORT_HOTPLUG_MASK (BXT_DE_PORT_HP_DDIA | \
|
||||
BXT_DE_PORT_HP_DDIB | \
|
||||
BXT_DE_PORT_HP_DDIC)
|
||||
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
|
||||
#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
|
||||
#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
|
||||
#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
|
||||
#define BXT_DE_PORT_GMBUS (1 << 1)
|
||||
#define GEN8_AUX_CHANNEL_A (1 << 0)
|
||||
#define TGL_DE_PORT_AUX_USBC6 (1 << 13)
|
||||
|
@ -7759,27 +7892,27 @@ enum {
|
|||
#define GEN11_DE_HPD_IMR _MMIO(0x44474)
|
||||
#define GEN11_DE_HPD_IIR _MMIO(0x44478)
|
||||
#define GEN11_DE_HPD_IER _MMIO(0x4447c)
|
||||
#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16))
|
||||
#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(PORT_TC6) | \
|
||||
GEN11_TC_HOTPLUG(PORT_TC5) | \
|
||||
GEN11_TC_HOTPLUG(PORT_TC4) | \
|
||||
GEN11_TC_HOTPLUG(PORT_TC3) | \
|
||||
GEN11_TC_HOTPLUG(PORT_TC2) | \
|
||||
GEN11_TC_HOTPLUG(PORT_TC1))
|
||||
#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port))
|
||||
#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(PORT_TC6) | \
|
||||
GEN11_TBT_HOTPLUG(PORT_TC5) | \
|
||||
GEN11_TBT_HOTPLUG(PORT_TC4) | \
|
||||
GEN11_TBT_HOTPLUG(PORT_TC3) | \
|
||||
GEN11_TBT_HOTPLUG(PORT_TC2) | \
|
||||
GEN11_TBT_HOTPLUG(PORT_TC1))
|
||||
#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
|
||||
#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
|
||||
GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
|
||||
GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
|
||||
GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
|
||||
GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
|
||||
GEN11_TC_HOTPLUG(HPD_PORT_TC1))
|
||||
#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
|
||||
#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
|
||||
GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
|
||||
GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
|
||||
GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
|
||||
GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
|
||||
GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
|
||||
|
||||
#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
|
||||
#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
|
||||
#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4)
|
||||
#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
|
||||
#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
|
||||
#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4)
|
||||
#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
|
||||
#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
|
||||
#define GEN11_CSME (31)
|
||||
|
@ -7865,6 +7998,7 @@ enum {
|
|||
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
|
||||
|
||||
#define CHICKEN_PAR1_1 _MMIO(0x42080)
|
||||
#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
|
||||
#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16)
|
||||
#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
|
||||
#define DPA_MASK_VBLANK_SRD (1 << 15)
|
||||
|
@ -7877,6 +8011,8 @@ enum {
|
|||
|
||||
#define CHICKEN_MISC_2 _MMIO(0x42084)
|
||||
#define CNL_COMP_PWR_DOWN (1 << 23)
|
||||
#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
|
||||
#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
|
||||
#define GLK_CL2_PWR_DOWN (1 << 12)
|
||||
#define GLK_CL1_PWR_DOWN (1 << 11)
|
||||
#define GLK_CL0_PWR_DOWN (1 << 10)
|
||||
|
@ -7919,11 +8055,15 @@ enum {
|
|||
#define DISP_ARB_CTL2 _MMIO(0x45004)
|
||||
#define DISP_DATA_PARTITION_5_6 (1 << 6)
|
||||
#define DISP_IPC_ENABLE (1 << 3)
|
||||
#define _DBUF_CTL_S1 0x45008
|
||||
#define _DBUF_CTL_S2 0x44FE8
|
||||
#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
|
||||
#define DBUF_POWER_REQUEST (1 << 31)
|
||||
#define DBUF_POWER_STATE (1 << 30)
|
||||
|
||||
#define _DBUF_CTL_S1 0x45008
|
||||
#define _DBUF_CTL_S2 0x44FE8
|
||||
#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
|
||||
#define DBUF_POWER_REQUEST REG_BIT(31)
|
||||
#define DBUF_POWER_STATE REG_BIT(30)
|
||||
#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
|
||||
#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
|
||||
|
||||
#define GEN7_MSG_CTL _MMIO(0x45010)
|
||||
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
|
||||
#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
|
||||
|
@ -8014,13 +8154,15 @@ enum {
|
|||
#define GEN8_L3CNTLREG _MMIO(0x7034)
|
||||
#define GEN8_ERRDETBCTRL (1 << 9)
|
||||
|
||||
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
|
||||
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
|
||||
#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9)
|
||||
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
|
||||
#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
|
||||
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
|
||||
#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
|
||||
|
||||
#define HIZ_CHICKEN _MMIO(0x7018)
|
||||
# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
|
||||
# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3)
|
||||
# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15)
|
||||
# define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
|
||||
# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
|
||||
|
||||
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
|
||||
#define DISABLE_PIXEL_MASK_CAMMING (1 << 14)
|
||||
|
@ -8208,23 +8350,18 @@ enum {
|
|||
|
||||
/* south display engine interrupt: ICP/TGP */
|
||||
#define SDE_GMBUS_ICP (1 << 23)
|
||||
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
|
||||
#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16))
|
||||
#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \
|
||||
SDE_DDI_HOTPLUG_ICP(PORT_A))
|
||||
#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC1))
|
||||
#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \
|
||||
SDE_DDI_HOTPLUG_ICP(PORT_B) | \
|
||||
SDE_DDI_HOTPLUG_ICP(PORT_A))
|
||||
#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC5) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
|
||||
SDE_TC_HOTPLUG_ICP(PORT_TC1))
|
||||
#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
|
||||
#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
|
||||
#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
|
||||
SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
|
||||
SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
|
||||
SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
|
||||
#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
|
||||
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
|
||||
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
|
||||
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
|
||||
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
|
||||
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
|
||||
|
||||
#define SDEISR _MMIO(0xc4000)
|
||||
#define SDEIMR _MMIO(0xc4004)
|
||||
|
@ -8292,139 +8429,21 @@ enum {
|
|||
*/
|
||||
|
||||
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
|
||||
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port)))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port)))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port)))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port)))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port)))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port)))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
|
||||
|
||||
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
|
||||
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
|
||||
#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
|
||||
|
||||
#define SHPD_FILTER_CNT _MMIO(0xc4038)
|
||||
#define SHPD_FILTER_CNT_500_ADJ 0x001D9
|
||||
|
||||
/* Icelake DSC Rate Control Range Parameter Registers */
|
||||
#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
|
||||
#define RC_BPG_OFFSET_SHIFT 10
|
||||
#define RC_MAX_QP_SHIFT 5
|
||||
#define RC_MIN_QP_SHIFT 0
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
|
||||
|
||||
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
|
||||
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
|
||||
|
||||
#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
|
||||
SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
|
||||
#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
|
||||
ICP_TC_HPD_ENABLE(PORT_TC3) | \
|
||||
ICP_TC_HPD_ENABLE(PORT_TC2) | \
|
||||
ICP_TC_HPD_ENABLE(PORT_TC1))
|
||||
#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \
|
||||
SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
|
||||
SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
|
||||
#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
|
||||
ICP_TC_HPD_ENABLE(PORT_TC5) | \
|
||||
ICP_TC_HPD_ENABLE_MASK)
|
||||
|
||||
#define _PCH_DPLL_A 0xc6014
|
||||
#define _PCH_DPLL_B 0xc6018
|
||||
#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
|
||||
|
@ -8684,6 +8703,10 @@ enum {
|
|||
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
|
||||
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
|
||||
#define FDIA_PHASE_SYNC_SHIFT_EN 18
|
||||
#define INVERT_DDID_HPD (1 << 18)
|
||||
#define INVERT_DDIC_HPD (1 << 17)
|
||||
#define INVERT_DDIB_HPD (1 << 16)
|
||||
#define INVERT_DDIA_HPD (1 << 15)
|
||||
#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
|
||||
#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
|
||||
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
|
||||
|
@ -8954,12 +8977,12 @@ enum {
|
|||
#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4)
|
||||
#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4)
|
||||
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
|
||||
#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188)
|
||||
#define FORCEWAKE_GT_GEN9 _MMIO(0xa188)
|
||||
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
|
||||
#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4)
|
||||
#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4)
|
||||
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
|
||||
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
|
||||
#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044)
|
||||
#define FORCEWAKE_KERNEL BIT(0)
|
||||
#define FORCEWAKE_USER BIT(1)
|
||||
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
|
||||
|
@ -9223,6 +9246,9 @@ enum {
|
|||
#define GEN9_SAGV_DISABLE 0x0
|
||||
#define GEN9_SAGV_IS_DISABLED 0x1
|
||||
#define GEN9_SAGV_ENABLE 0x3
|
||||
#define DG1_PCODE_STATUS 0x7E
|
||||
#define DG1_UNCORE_GET_INIT_STATUS 0x0
|
||||
#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
|
||||
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
|
||||
#define GEN6_PCODE_DATA _MMIO(0x138128)
|
||||
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
|
||||
|
@ -10257,6 +10283,7 @@ enum skl_power_gate {
|
|||
#define DPLL_CFGCR2_PDIV_2 (1 << 2)
|
||||
#define DPLL_CFGCR2_PDIV_3 (2 << 2)
|
||||
#define DPLL_CFGCR2_PDIV_7 (4 << 2)
|
||||
#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
|
||||
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
|
||||
|
||||
#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
|
||||
|
@ -10276,9 +10303,9 @@ enum skl_power_gate {
|
|||
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
|
||||
#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
|
||||
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \
|
||||
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
|
||||
(tc_port) + 12 : \
|
||||
(tc_port) - PORT_TC4 + 21))
|
||||
(tc_port) - TC_PORT_4 + 21))
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
|
||||
|
@ -10307,6 +10334,10 @@ enum skl_power_gate {
|
|||
#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
|
||||
_MG_PLL2_ENABLE)
|
||||
|
||||
/* DG1 PLL */
|
||||
#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
|
||||
_MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
|
||||
|
||||
#define _MG_REFCLKIN_CTL_PORT1 0x16892C
|
||||
#define _MG_REFCLKIN_CTL_PORT2 0x16992C
|
||||
#define _MG_REFCLKIN_CTL_PORT3 0x16A92C
|
||||
|
@ -10523,6 +10554,20 @@ enum skl_power_gate {
|
|||
#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
|
||||
_TGL_DPLL1_CFGCR1)
|
||||
|
||||
#define _DG1_DPLL2_CFGCR0 0x16C284
|
||||
#define _DG1_DPLL3_CFGCR0 0x16C28C
|
||||
#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
|
||||
_TGL_DPLL1_CFGCR0, \
|
||||
_DG1_DPLL2_CFGCR0, \
|
||||
_DG1_DPLL3_CFGCR0)
|
||||
|
||||
#define _DG1_DPLL2_CFGCR1 0x16C288
|
||||
#define _DG1_DPLL3_CFGCR1 0x16C290
|
||||
#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
|
||||
_TGL_DPLL1_CFGCR1, \
|
||||
_DG1_DPLL2_CFGCR1, \
|
||||
_DG1_DPLL3_CFGCR1)
|
||||
|
||||
#define _DKL_PHY1_BASE 0x168000
|
||||
#define _DKL_PHY2_BASE 0x169000
|
||||
#define _DKL_PHY3_BASE 0x16A000
|
||||
|
@ -11003,14 +11048,17 @@ enum skl_power_gate {
|
|||
#define _CGM_PIPE_A_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6790C)
|
||||
#define _CGM_PIPE_A_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x67910)
|
||||
#define _CGM_PIPE_A_DEGAMMA (VLV_DISPLAY_BASE + 0x66000)
|
||||
#define CGM_PIPE_DEGAMMA_RED_MASK REG_GENMASK(13, 0)
|
||||
#define CGM_PIPE_DEGAMMA_GREEN_MASK REG_GENMASK(29, 16)
|
||||
#define CGM_PIPE_DEGAMMA_BLUE_MASK REG_GENMASK(13, 0)
|
||||
#define _CGM_PIPE_A_GAMMA (VLV_DISPLAY_BASE + 0x67000)
|
||||
#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0)
|
||||
#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
|
||||
#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0)
|
||||
#define _CGM_PIPE_A_MODE (VLV_DISPLAY_BASE + 0x67A00)
|
||||
#define CGM_PIPE_MODE_GAMMA (1 << 2)
|
||||
#define CGM_PIPE_MODE_CSC (1 << 1)
|
||||
#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
|
||||
#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0)
|
||||
#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
|
||||
#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0)
|
||||
|
||||
#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
|
||||
#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
|
||||
|
|
|
@ -32,7 +32,57 @@
|
|||
#include "i915_reg.h"
|
||||
#include "i915_suspend.h"
|
||||
|
||||
static void i915_save_display(struct drm_i915_private *dev_priv)
|
||||
static void intel_save_swf(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Scratch space */
|
||||
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
} else if (HAS_GMCH(dev_priv)) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_restore_swf(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Scratch space */
|
||||
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
} else if (HAS_GMCH(dev_priv)) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_save_display(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
|
@ -43,12 +93,16 @@ static void i915_save_display(struct drm_i915_private *dev_priv)
|
|||
if (IS_GEN(dev_priv, 4))
|
||||
pci_read_config_word(pdev, GCDGMBUS,
|
||||
&dev_priv->regfile.saveGCDGMBUS);
|
||||
|
||||
intel_save_swf(dev_priv);
|
||||
}
|
||||
|
||||
static void i915_restore_display(struct drm_i915_private *dev_priv)
|
||||
void i915_restore_display(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
intel_restore_swf(dev_priv);
|
||||
|
||||
if (IS_GEN(dev_priv, 4))
|
||||
pci_write_config_word(pdev, GCDGMBUS,
|
||||
dev_priv->regfile.saveGCDGMBUS);
|
||||
|
@ -64,61 +118,3 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_gmbus_reset(dev_priv);
|
||||
}
|
||||
|
||||
int i915_save_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
i915_save_display(dev_priv);
|
||||
|
||||
/* Scratch space */
|
||||
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
} else if (HAS_GMCH(dev_priv)) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
|
||||
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_restore_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
i915_restore_display(dev_priv);
|
||||
|
||||
/* Scratch space */
|
||||
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
|
||||
for (i = 0; i < 7; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
for (i = 0; i < 7; i++)
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
} else if (HAS_GMCH(dev_priv)) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
|
||||
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
|
||||
}
|
||||
for (i = 0; i < 3; i++)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
struct drm_i915_private;
|
||||
|
||||
int i915_save_state(struct drm_i915_private *i915);
|
||||
int i915_restore_state(struct drm_i915_private *i915);
|
||||
void i915_save_display(struct drm_i915_private *i915);
|
||||
void i915_restore_display(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* __I915_SUSPEND_H__ */
|
||||
|
|
|
@ -62,6 +62,7 @@ static const char * const platform_names[] = {
|
|||
PLATFORM_NAME(CANNONLAKE),
|
||||
PLATFORM_NAME(ICELAKE),
|
||||
PLATFORM_NAME(ELKHARTLAKE),
|
||||
PLATFORM_NAME(JASPERLAKE),
|
||||
PLATFORM_NAME(TIGERLAKE),
|
||||
PLATFORM_NAME(ROCKETLAKE),
|
||||
PLATFORM_NAME(DG1),
|
||||
|
|
|
@ -79,6 +79,7 @@ enum intel_platform {
|
|||
/* gen11 */
|
||||
INTEL_ICELAKE,
|
||||
INTEL_ELKHARTLAKE,
|
||||
INTEL_JASPERLAKE,
|
||||
/* gen12 */
|
||||
INTEL_TIGERLAKE,
|
||||
INTEL_ROCKETLAKE,
|
||||
|
|
|
@ -7,7 +7,8 @@
|
|||
#include "intel_dram.h"
|
||||
|
||||
struct dram_dimm_info {
|
||||
u8 size, width, ranks;
|
||||
u16 size;
|
||||
u8 width, ranks;
|
||||
};
|
||||
|
||||
struct dram_channel_info {
|
||||
|
@ -41,10 +42,10 @@ static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
|
|||
return dimm->ranks * 64 / (dimm->width ?: 1);
|
||||
}
|
||||
|
||||
/* Returns total GB for the whole DIMM */
|
||||
/* Returns total Gb for the whole DIMM */
|
||||
static int skl_get_dimm_size(u16 val)
|
||||
{
|
||||
return val & SKL_DRAM_SIZE_MASK;
|
||||
return (val & SKL_DRAM_SIZE_MASK) * 8;
|
||||
}
|
||||
|
||||
static int skl_get_dimm_width(u16 val)
|
||||
|
@ -74,10 +75,10 @@ static int skl_get_dimm_ranks(u16 val)
|
|||
return val + 1;
|
||||
}
|
||||
|
||||
/* Returns total GB for the whole DIMM */
|
||||
/* Returns total Gb for the whole DIMM */
|
||||
static int cnl_get_dimm_size(u16 val)
|
||||
{
|
||||
return (val & CNL_DRAM_SIZE_MASK) / 2;
|
||||
return (val & CNL_DRAM_SIZE_MASK) * 8 / 2;
|
||||
}
|
||||
|
||||
static int cnl_get_dimm_width(u16 val)
|
||||
|
@ -110,8 +111,8 @@ static int cnl_get_dimm_ranks(u16 val)
|
|||
static bool
|
||||
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
|
||||
{
|
||||
/* Convert total GB to Gb per DRAM device */
|
||||
return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
|
||||
/* Convert total Gb to Gb per DRAM device */
|
||||
return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -130,7 +131,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
|
|||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
|
||||
"CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
|
||||
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
|
||||
yesno(skl_is_16gb_dimm(dimm)));
|
||||
}
|
||||
|
@ -354,9 +355,9 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
|
|||
|
||||
/*
|
||||
* Size in register is Gb per DRAM device. Convert to total
|
||||
* GB to match the way we report this for non-LP platforms.
|
||||
* Gb to match the way we report this for non-LP platforms.
|
||||
*/
|
||||
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
|
||||
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
|
||||
}
|
||||
|
||||
static int bxt_get_dram_info(struct drm_i915_private *i915)
|
||||
|
@ -404,7 +405,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
|
|||
dram_info->type != type);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
|
||||
"CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
|
||||
i - BXT_D_CR_DRP0_DUNIT_START,
|
||||
dimm.size, dimm.width, dimm.ranks,
|
||||
intel_dram_type_str(type));
|
||||
|
|
|
@ -115,7 +115,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
|||
return PCH_ICP;
|
||||
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
|
||||
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
|
||||
return PCH_MCC;
|
||||
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
|
||||
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
|
||||
|
@ -126,7 +126,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
|||
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
|
||||
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
|
||||
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
|
||||
return PCH_JSP;
|
||||
default:
|
||||
return PCH_NONE;
|
||||
|
@ -157,7 +157,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
|
|||
|
||||
if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
|
||||
else if (IS_ELKHARTLAKE(dev_priv))
|
||||
else if (IS_JSL_EHL(dev_priv))
|
||||
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
|
||||
else if (IS_ICELAKE(dev_priv))
|
||||
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
|
||||
|
|
|
@ -3573,11 +3573,11 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
|
|||
_ilk_disable_lp_wm(dev_priv, dirty);
|
||||
|
||||
if (dirty & WM_DIRTY_PIPE(PIPE_A))
|
||||
I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
|
||||
I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
|
||||
if (dirty & WM_DIRTY_PIPE(PIPE_B))
|
||||
I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
|
||||
I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
|
||||
if (dirty & WM_DIRTY_PIPE(PIPE_C))
|
||||
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
|
||||
I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
|
||||
|
||||
if (dirty & WM_DIRTY_DDB) {
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
|
@ -3706,7 +3706,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
|
|||
* - All planes can enable watermarks for latencies >= SAGV engine block time
|
||||
* - We're not using an interlaced display configuration
|
||||
*/
|
||||
int
|
||||
static int
|
||||
intel_enable_sagv(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
@ -3740,7 +3740,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
intel_disable_sagv(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
@ -6287,13 +6287,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
|
|||
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
static const i915_reg_t wm0_pipe_reg[] = {
|
||||
[PIPE_A] = WM0_PIPEA_ILK,
|
||||
[PIPE_B] = WM0_PIPEB_ILK,
|
||||
[PIPE_C] = WM0_PIPEC_IVB,
|
||||
};
|
||||
|
||||
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
|
||||
hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
|
||||
|
||||
memset(active, 0, sizeof(*active));
|
||||
|
||||
|
@ -7116,25 +7111,26 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
0, CNL_DELAY_PMRSP);
|
||||
}
|
||||
|
||||
static void gen12_init_clock_gating(struct drm_i915_private *i915)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
|
||||
for (i = 0; i < I915_MAX_VCS; i++)
|
||||
if (HAS_ENGINE(&i915->gt, _VCS(i)))
|
||||
intel_uncore_rmw(&i915->uncore, POWERGATE_ENABLE, 0,
|
||||
VDN_HCP_POWERGATE_ENABLE(i) |
|
||||
VDN_MFX_POWERGATE_ENABLE(i));
|
||||
}
|
||||
|
||||
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 vd_pg_enable = 0;
|
||||
unsigned int i;
|
||||
gen12_init_clock_gating(dev_priv);
|
||||
|
||||
/* Wa_1409120013:tgl */
|
||||
I915_WRITE(ILK_DPFC_CHICKEN,
|
||||
ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
|
||||
|
||||
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
|
||||
for (i = 0; i < I915_MAX_VCS; i++) {
|
||||
if (HAS_ENGINE(&dev_priv->gt, _VCS(i)))
|
||||
vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
|
||||
VDN_MFX_POWERGATE_ENABLE(i);
|
||||
}
|
||||
|
||||
I915_WRITE(POWERGATE_ENABLE,
|
||||
I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
|
||||
|
||||
/* Wa_1409825376:tgl (pre-prod)*/
|
||||
if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
|
||||
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
|
||||
|
@ -7145,6 +7141,16 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
0, DFR_DISABLE);
|
||||
}
|
||||
|
||||
static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
gen12_init_clock_gating(dev_priv);
|
||||
|
||||
/* Wa_1409836686:dg1[a0] */
|
||||
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
|
||||
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
|
||||
DPT_GATING_DIS);
|
||||
}
|
||||
|
||||
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_PCH_CNP(dev_priv))
|
||||
|
@ -7197,6 +7203,10 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
cnp_init_clock_gating(dev_priv);
|
||||
gen9_init_clock_gating(dev_priv);
|
||||
|
||||
/* WAC6entrylatency:cfl */
|
||||
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
|
||||
FBC_LLC_FULLY_OPEN);
|
||||
|
||||
/*
|
||||
* WaFbcTurnOffFbcWatermark:cfl
|
||||
* Display WA #0562: cfl
|
||||
|
@ -7216,6 +7226,10 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
gen9_init_clock_gating(dev_priv);
|
||||
|
||||
/* WAC6entrylatency:kbl */
|
||||
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
|
||||
FBC_LLC_FULLY_OPEN);
|
||||
|
||||
/* WaDisableSDEUnitClockGating:kbl */
|
||||
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
|
@ -7590,7 +7604,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_GEN(dev_priv, 12))
|
||||
if (IS_DG1(dev_priv))
|
||||
dev_priv->display.init_clock_gating = dg1_init_clock_gating;
|
||||
else if (IS_GEN(dev_priv, 12))
|
||||
dev_priv->display.init_clock_gating = tgl_init_clock_gating;
|
||||
else if (IS_GEN(dev_priv, 11))
|
||||
dev_priv->display.init_clock_gating = icl_init_clock_gating;
|
||||
|
|
|
@ -49,8 +49,6 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
|
|||
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
|
||||
const struct intel_bw_state *bw_state);
|
||||
int intel_enable_sagv(struct drm_i915_private *dev_priv);
|
||||
int intel_disable_sagv(struct drm_i915_private *dev_priv);
|
||||
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
|
||||
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
|
||||
bool skl_wm_level_equals(const struct skl_wm_level *l1,
|
||||
|
|
|
@ -555,3 +555,18 @@ out:
|
|||
return ret ? ret : status;
|
||||
#undef COND
|
||||
}
|
||||
|
||||
void intel_pcode_init(struct drm_i915_private *i915)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!IS_DGFX(i915))
|
||||
return;
|
||||
|
||||
ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
|
||||
DG1_UNCORE_GET_INIT_STATUS,
|
||||
DG1_UNCORE_INIT_STATUS_COMPLETE,
|
||||
DG1_UNCORE_INIT_STATUS_COMPLETE, 50);
|
||||
if (ret)
|
||||
drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
|
||||
}
|
||||
|
|
|
@ -138,4 +138,6 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
|
|||
int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms);
|
||||
|
||||
void intel_pcode_init(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* _INTEL_SIDEBAND_H */
|
||||
|
|
|
@ -1051,37 +1051,37 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
|
|||
|
||||
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
|
||||
static const struct intel_forcewake_range __gen9_fw_ranges[] = {
|
||||
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
|
||||
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
|
||||
GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
|
||||
GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
|
||||
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
|
||||
GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
|
||||
GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
|
||||
GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
|
||||
};
|
||||
|
||||
|
@ -1089,33 +1089,33 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
|
|||
static const struct intel_forcewake_range __gen11_fw_ranges[] = {
|
||||
GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
|
||||
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8800, 0x8bff, 0),
|
||||
GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x9560, 0x95ff, 0),
|
||||
GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x24000, 0x2407f, 0),
|
||||
GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
|
||||
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
|
||||
GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
|
||||
|
@ -1124,44 +1124,113 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
|
|||
GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
|
||||
};
|
||||
|
||||
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
|
||||
/*
|
||||
* *Must* be sorted by offset ranges! See intel_fw_table_check().
|
||||
*
|
||||
* Note that the spec lists several reserved/unused ranges that don't
|
||||
* actually contain any registers. In the table below we'll combine those
|
||||
* reserved ranges with either the preceding or following range to keep the
|
||||
* table small and lookups fast.
|
||||
*/
|
||||
static const struct intel_forcewake_range __gen12_fw_ranges[] = {
|
||||
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
|
||||
GEN_FW_RANGE(0x0, 0x1fff, 0), /*
|
||||
0x0 - 0xaff: reserved
|
||||
0xb00 - 0x1fff: always on */
|
||||
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
|
||||
0x4000 - 0x48ff: gt
|
||||
0x4900 - 0x51ff: reserved */
|
||||
GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
|
||||
0x5200 - 0x53ff: render
|
||||
0x5400 - 0x54ff: reserved
|
||||
0x5500 - 0x7fff: render */
|
||||
GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
|
||||
0x8160 - 0x817f: reserved
|
||||
0x8180 - 0x81ff: always on */
|
||||
GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
|
||||
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
|
||||
0x8500 - 0x87ff: gt
|
||||
0x8800 - 0x8fff: reserved
|
||||
0x9000 - 0x947f: gt
|
||||
0x9480 - 0x94cf: reserved */
|
||||
GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
|
||||
0x9560 - 0x95ff: always on
|
||||
0x9600 - 0x97ff: reserved */
|
||||
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
|
||||
0xb400 - 0xbf7f: gt
|
||||
0xb480 - 0xbfff: reserved
|
||||
0xc000 - 0xcfff: gt */
|
||||
GEN_FW_RANGE(0xd000, 0xd7ff, 0),
|
||||
GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
|
||||
0xdc00 - 0xddff: render
|
||||
0xde00 - 0xde7f: reserved
|
||||
0xde80 - 0xe8ff: render
|
||||
0xe900 - 0xefff: reserved */
|
||||
GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
|
||||
0xf000 - 0xffff: gt
|
||||
0x10000 - 0x147ff: reserved */
|
||||
GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
|
||||
0x14800 - 0x14fff: render
|
||||
0x15000 - 0x16dff: reserved
|
||||
0x16e00 - 0x1bfff: render
|
||||
0x1c000 - 0x1ffff: reserved */
|
||||
GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
|
||||
GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
|
||||
GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
|
||||
0x24000 - 0x2407f: always on
|
||||
0x24080 - 0x2417f: reserved */
|
||||
GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
|
||||
0x24180 - 0x241ff: gt
|
||||
0x24200 - 0x249ff: reserved */
|
||||
GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
|
||||
0x24a00 - 0x24a7f: render
|
||||
0x24a80 - 0x251ff: reserved */
|
||||
GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
|
||||
0x25200 - 0x252ff: gt
|
||||
0x25300 - 0x255ff: reserved */
|
||||
GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
|
||||
GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
|
||||
0x25680 - 0x256ff: VD2
|
||||
0x25700 - 0x259ff: reserved */
|
||||
GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
|
||||
GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
|
||||
0x25a80 - 0x25aff: VD2
|
||||
0x25b00 - 0x2ffff: reserved */
|
||||
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
|
||||
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
|
||||
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
|
||||
GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
|
||||
GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
|
||||
GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
|
||||
GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
|
||||
GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
|
||||
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
|
||||
0x1c0000 - 0x1c2bff: VD0
|
||||
0x1c2c00 - 0x1c2cff: reserved
|
||||
0x1c2d00 - 0x1c2dff: VD0
|
||||
0x1c2e00 - 0x1c3eff: reserved
|
||||
0x1c3f00 - 0x1c3fff: VD0 */
|
||||
GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
|
||||
GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
|
||||
0x1c8000 - 0x1ca0ff: VE0
|
||||
0x1ca100 - 0x1cbeff: reserved
|
||||
0x1cbf00 - 0x1cbfff: VE0 */
|
||||
GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
|
||||
0x1cc000 - 0x1ccfff: VD0
|
||||
0x1cd000 - 0x1cffff: reserved */
|
||||
GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
|
||||
0x1d0000 - 0x1d2bff: VD2
|
||||
0x1d2c00 - 0x1d2cff: reserved
|
||||
0x1d2d00 - 0x1d2dff: VD2
|
||||
0x1d2e00 - 0x1d3eff: reserved
|
||||
0x1d3f00 - 0x1d3fff: VD2 */
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -1491,7 +1560,7 @@ static int __fw_domain_init(struct intel_uncore *uncore,
|
|||
d->id = domain_id;
|
||||
|
||||
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
|
||||
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
|
||||
BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
|
||||
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
|
||||
BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
|
||||
BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
|
||||
|
@ -1560,9 +1629,9 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
|
|||
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_RENDER_GEN9,
|
||||
FORCEWAKE_ACK_RENDER_GEN9);
|
||||
fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
|
||||
FORCEWAKE_BLITTER_GEN9,
|
||||
FORCEWAKE_ACK_BLITTER_GEN9);
|
||||
fw_domain_init(uncore, FW_DOMAIN_ID_GT,
|
||||
FORCEWAKE_GT_GEN9,
|
||||
FORCEWAKE_ACK_GT_GEN9);
|
||||
|
||||
for (i = 0; i < I915_MAX_VCS; i++) {
|
||||
if (!__HAS_ENGINE(emask, _VCS(i)))
|
||||
|
@ -1586,9 +1655,9 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
|
|||
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_RENDER_GEN9,
|
||||
FORCEWAKE_ACK_RENDER_GEN9);
|
||||
fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
|
||||
FORCEWAKE_BLITTER_GEN9,
|
||||
FORCEWAKE_ACK_BLITTER_GEN9);
|
||||
fw_domain_init(uncore, FW_DOMAIN_ID_GT,
|
||||
FORCEWAKE_GT_GEN9,
|
||||
FORCEWAKE_ACK_GT_GEN9);
|
||||
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
|
||||
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
|
||||
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
|
||||
|
@ -1723,11 +1792,15 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
|
|||
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
|
||||
* the register BAR remains the same size for all the earlier
|
||||
* generations up to Ironlake.
|
||||
* For dgfx chips register range is expanded to 4MB.
|
||||
*/
|
||||
if (INTEL_GEN(i915) < 5)
|
||||
mmio_size = 512 * 1024;
|
||||
else if (IS_DGFX(i915))
|
||||
mmio_size = 4 * 1024 * 1024;
|
||||
else
|
||||
mmio_size = 2 * 1024 * 1024;
|
||||
|
||||
uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
|
||||
if (uncore->regs == NULL) {
|
||||
drm_err(&i915->drm, "failed to map registers\n");
|
||||
|
|
|
@ -46,7 +46,7 @@ struct intel_uncore_mmio_debug {
|
|||
|
||||
enum forcewake_domain_id {
|
||||
FW_DOMAIN_ID_RENDER = 0,
|
||||
FW_DOMAIN_ID_BLITTER,
|
||||
FW_DOMAIN_ID_GT, /* also includes blitter engine */
|
||||
FW_DOMAIN_ID_MEDIA,
|
||||
FW_DOMAIN_ID_MEDIA_VDBOX0,
|
||||
FW_DOMAIN_ID_MEDIA_VDBOX1,
|
||||
|
@ -60,7 +60,7 @@ enum forcewake_domain_id {
|
|||
|
||||
enum forcewake_domains {
|
||||
FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
|
||||
FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
|
||||
FORCEWAKE_GT = BIT(FW_DOMAIN_ID_GT),
|
||||
FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
|
||||
FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
|
||||
FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
|
||||
|
|
|
@ -324,6 +324,13 @@ struct drm_crtc_state {
|
|||
*/
|
||||
bool self_refresh_active;
|
||||
|
||||
/**
|
||||
* @scaling_filter:
|
||||
*
|
||||
* Scaling filter to be applied
|
||||
*/
|
||||
enum drm_scaling_filter scaling_filter;
|
||||
|
||||
/**
|
||||
* @event:
|
||||
*
|
||||
|
@ -1083,6 +1090,12 @@ struct drm_crtc {
|
|||
/** @properties: property tracking for this CRTC */
|
||||
struct drm_object_properties properties;
|
||||
|
||||
/**
|
||||
* @scaling_filter_property: property to apply a particular filter while
|
||||
* scaling.
|
||||
*/
|
||||
struct drm_property *scaling_filter_property;
|
||||
|
||||
/**
|
||||
* @state:
|
||||
*
|
||||
|
@ -1266,4 +1279,7 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
|
|||
#define drm_for_each_crtc(crtc, dev) \
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
|
||||
unsigned int supported_filters);
|
||||
|
||||
#endif /* __DRM_CRTC_H__ */
|
||||
|
|
|
@ -1118,15 +1118,58 @@ struct drm_device;
|
|||
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
|
||||
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
|
||||
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
|
||||
|
||||
enum drm_dp_phy {
|
||||
DP_PHY_DPRX,
|
||||
|
||||
DP_PHY_LTTPR1,
|
||||
DP_PHY_LTTPR2,
|
||||
DP_PHY_LTTPR3,
|
||||
DP_PHY_LTTPR4,
|
||||
DP_PHY_LTTPR5,
|
||||
DP_PHY_LTTPR6,
|
||||
DP_PHY_LTTPR7,
|
||||
DP_PHY_LTTPR8,
|
||||
|
||||
DP_MAX_LTTPR_COUNT = DP_PHY_LTTPR8,
|
||||
};
|
||||
|
||||
#define DP_PHY_LTTPR(i) (DP_PHY_LTTPR1 + (i))
|
||||
|
||||
#define __DP_LTTPR1_BASE 0xf0010 /* 1.3 */
|
||||
#define __DP_LTTPR2_BASE 0xf0060 /* 1.3 */
|
||||
#define DP_LTTPR_BASE(dp_phy) \
|
||||
(__DP_LTTPR1_BASE + (__DP_LTTPR2_BASE - __DP_LTTPR1_BASE) * \
|
||||
((dp_phy) - DP_PHY_LTTPR1))
|
||||
|
||||
#define DP_LTTPR_REG(dp_phy, lttpr1_reg) \
|
||||
(DP_LTTPR_BASE(dp_phy) - DP_LTTPR_BASE(DP_PHY_LTTPR1) + (lttpr1_reg))
|
||||
|
||||
#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
|
||||
#define DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy) \
|
||||
DP_LTTPR_REG(dp_phy, DP_TRAINING_PATTERN_SET_PHY_REPEATER1)
|
||||
|
||||
#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
|
||||
#define DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy) \
|
||||
DP_LTTPR_REG(dp_phy, DP_TRAINING_LANE0_SET_PHY_REPEATER1)
|
||||
|
||||
#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
|
||||
#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
|
||||
#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
|
||||
#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
|
||||
#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
|
||||
DP_LTTPR_REG(dp_phy, DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
|
||||
|
||||
#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
|
||||
# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0)
|
||||
# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1)
|
||||
|
||||
#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
|
||||
#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \
|
||||
DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1)
|
||||
|
||||
#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
|
||||
|
||||
#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
|
||||
#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
|
||||
#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
|
||||
|
@ -1237,9 +1280,13 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
|
|||
#define DP_DSC_RECEIVER_CAP_SIZE 0xf
|
||||
#define EDP_PSR_RECEIVER_CAP_SIZE 2
|
||||
#define EDP_DISPLAY_CTL_CAP_SIZE 3
|
||||
#define DP_LTTPR_COMMON_CAP_SIZE 8
|
||||
#define DP_LTTPR_PHY_CAP_SIZE 3
|
||||
|
||||
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
void drm_dp_lttpr_link_train_clock_recovery_delay(void);
|
||||
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
void drm_dp_lttpr_link_train_channel_eq_delay(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
|
||||
|
||||
u8 drm_dp_link_rate_to_bw_code(int link_rate);
|
||||
int drm_dp_bw_code_to_link_rate(u8 link_bw);
|
||||
|
@ -1698,6 +1745,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
|
|||
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
|
||||
u8 status[DP_LINK_STATUS_SIZE]);
|
||||
|
||||
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
|
||||
enum drm_dp_phy dp_phy,
|
||||
u8 link_status[DP_LINK_STATUS_SIZE]);
|
||||
|
||||
bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
|
||||
u8 real_edid_checksum);
|
||||
|
||||
|
@ -1747,6 +1798,17 @@ bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
|
|||
const struct drm_dp_desc *desc);
|
||||
int drm_dp_read_sink_count(struct drm_dp_aux *aux);
|
||||
|
||||
int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
|
||||
u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
|
||||
int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
|
||||
enum drm_dp_phy dp_phy,
|
||||
u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
|
||||
int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
|
||||
int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
|
||||
int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
|
||||
bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
|
||||
bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
|
||||
|
||||
void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
|
||||
void drm_dp_aux_init(struct drm_dp_aux *aux);
|
||||
int drm_dp_aux_register(struct drm_dp_aux *aux);
|
||||
|
|
|
@ -35,6 +35,11 @@ struct drm_crtc;
|
|||
struct drm_printer;
|
||||
struct drm_modeset_acquire_ctx;
|
||||
|
||||
enum drm_scaling_filter {
|
||||
DRM_SCALING_FILTER_DEFAULT,
|
||||
DRM_SCALING_FILTER_NEAREST_NEIGHBOR,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_plane_state - mutable plane state
|
||||
*
|
||||
|
@ -214,6 +219,13 @@ struct drm_plane_state {
|
|||
*/
|
||||
bool visible;
|
||||
|
||||
/**
|
||||
* @scaling_filter:
|
||||
*
|
||||
* Scaling filter to be applied
|
||||
*/
|
||||
enum drm_scaling_filter scaling_filter;
|
||||
|
||||
/**
|
||||
* @commit: Tracks the pending commit to prevent use-after-free conditions,
|
||||
* and for async plane updates.
|
||||
|
@ -724,6 +736,12 @@ struct drm_plane {
|
|||
* See drm_plane_create_color_properties().
|
||||
*/
|
||||
struct drm_property *color_range_property;
|
||||
|
||||
/**
|
||||
* @scaling_filter_property: property to apply a particular filter while
|
||||
* scaling.
|
||||
*/
|
||||
struct drm_property *scaling_filter_property;
|
||||
};
|
||||
|
||||
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
|
||||
|
@ -862,4 +880,7 @@ drm_plane_get_damage_clips(const struct drm_plane_state *state)
|
|||
state->fb_damage_clips->data : NULL);
|
||||
}
|
||||
|
||||
int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
|
||||
unsigned int supported_filters);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -170,9 +170,9 @@
|
|||
|
||||
#define INTEL_HSW_ULT_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
|
||||
INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0A06, info) /* ULT GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */
|
||||
|
||||
#define INTEL_HSW_ULX_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
|
||||
|
@ -181,26 +181,26 @@
|
|||
INTEL_HSW_ULT_GT1_IDS(info), \
|
||||
INTEL_HSW_ULX_GT1_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
|
||||
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \
|
||||
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
|
||||
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
|
||||
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */
|
||||
|
||||
#define INTEL_HSW_ULT_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
|
||||
INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0A16, info) /* ULT GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \
|
||||
|
||||
#define INTEL_HSW_ULX_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
|
||||
|
@ -209,45 +209,45 @@
|
|||
INTEL_HSW_ULT_GT2_IDS(info), \
|
||||
INTEL_HSW_ULX_GT2_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
|
||||
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \
|
||||
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
|
||||
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
|
||||
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */
|
||||
|
||||
#define INTEL_HSW_ULT_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
|
||||
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
|
||||
|
||||
#define INTEL_HSW_GT3_IDS(info) \
|
||||
INTEL_HSW_ULT_GT3_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
|
||||
INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \
|
||||
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
|
||||
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
|
||||
INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
|
||||
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
|
||||
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
|
||||
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */
|
||||
|
||||
#define INTEL_HSW_IDS(info) \
|
||||
INTEL_HSW_GT1_IDS(info), \
|
||||
|
@ -329,17 +329,20 @@
|
|||
INTEL_VGA_DEVICE(0x22b3, info)
|
||||
|
||||
#define INTEL_SKL_ULT_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1906, info) /* ULT GT1 */
|
||||
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */
|
||||
|
||||
#define INTEL_SKL_ULX_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x190E, info) /* ULX GT1 */
|
||||
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */
|
||||
|
||||
#define INTEL_SKL_GT1_IDS(info) \
|
||||
INTEL_SKL_ULT_GT1_IDS(info), \
|
||||
INTEL_SKL_ULX_GT1_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
|
||||
INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */
|
||||
|
||||
#define INTEL_SKL_ULT_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
|
||||
|
@ -352,26 +355,26 @@
|
|||
INTEL_SKL_ULT_GT2_IDS(info), \
|
||||
INTEL_SKL_ULX_GT2_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
|
||||
|
||||
#define INTEL_SKL_ULT_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1926, info) /* ULT GT3 */
|
||||
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \
|
||||
INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */
|
||||
|
||||
#define INTEL_SKL_GT3_IDS(info) \
|
||||
INTEL_SKL_ULT_GT3_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
|
||||
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \
|
||||
INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */
|
||||
|
||||
#define INTEL_SKL_GT4_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */
|
||||
INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \
|
||||
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \
|
||||
INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */
|
||||
|
||||
#define INTEL_SKL_IDS(info) \
|
||||
INTEL_SKL_GT1_IDS(info), \
|
||||
|
@ -403,8 +406,8 @@
|
|||
INTEL_KBL_ULX_GT1_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
|
||||
INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */
|
||||
|
||||
#define INTEL_KBL_ULT_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
|
||||
|
@ -416,10 +419,10 @@
|
|||
#define INTEL_KBL_GT2_IDS(info) \
|
||||
INTEL_KBL_ULT_GT2_IDS(info), \
|
||||
INTEL_KBL_ULX_GT2_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
|
||||
|
||||
#define INTEL_KBL_ULT_GT3_IDS(info) \
|
||||
|
@ -444,10 +447,10 @@
|
|||
|
||||
/* CML GT1 */
|
||||
#define INTEL_CML_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x9BA5, info), \
|
||||
INTEL_VGA_DEVICE(0x9BA8, info), \
|
||||
INTEL_VGA_DEVICE(0x9BA2, info), \
|
||||
INTEL_VGA_DEVICE(0x9BA4, info), \
|
||||
INTEL_VGA_DEVICE(0x9BA2, info)
|
||||
INTEL_VGA_DEVICE(0x9BA5, info), \
|
||||
INTEL_VGA_DEVICE(0x9BA8, info)
|
||||
|
||||
#define INTEL_CML_U_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x9B21, info), \
|
||||
|
@ -456,11 +459,11 @@
|
|||
|
||||
/* CML GT2 */
|
||||
#define INTEL_CML_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x9BC5, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC8, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC4, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC2, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC4, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC5, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC6, info), \
|
||||
INTEL_VGA_DEVICE(0x9BC8, info), \
|
||||
INTEL_VGA_DEVICE(0x9BE6, info), \
|
||||
INTEL_VGA_DEVICE(0x9BF6, info)
|
||||
|
||||
|
@ -494,8 +497,8 @@
|
|||
INTEL_VGA_DEVICE(0x3E9C, info)
|
||||
|
||||
#define INTEL_CFL_H_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
|
||||
INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */
|
||||
|
||||
/* CFL U GT2 */
|
||||
#define INTEL_CFL_U_GT2_IDS(info) \
|
||||
|
@ -540,54 +543,57 @@
|
|||
|
||||
/* CNL */
|
||||
#define INTEL_CNL_PORT_F_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5A54, info), \
|
||||
INTEL_VGA_DEVICE(0x5A5C, info), \
|
||||
INTEL_VGA_DEVICE(0x5A44, info), \
|
||||
INTEL_VGA_DEVICE(0x5A4C, info)
|
||||
INTEL_VGA_DEVICE(0x5A4C, info), \
|
||||
INTEL_VGA_DEVICE(0x5A54, info), \
|
||||
INTEL_VGA_DEVICE(0x5A5C, info)
|
||||
|
||||
#define INTEL_CNL_IDS(info) \
|
||||
INTEL_CNL_PORT_F_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x5A51, info), \
|
||||
INTEL_VGA_DEVICE(0x5A59, info), \
|
||||
INTEL_VGA_DEVICE(0x5A40, info), \
|
||||
INTEL_VGA_DEVICE(0x5A41, info), \
|
||||
INTEL_VGA_DEVICE(0x5A49, info), \
|
||||
INTEL_VGA_DEVICE(0x5A52, info), \
|
||||
INTEL_VGA_DEVICE(0x5A5A, info), \
|
||||
INTEL_VGA_DEVICE(0x5A42, info), \
|
||||
INTEL_VGA_DEVICE(0x5A49, info), \
|
||||
INTEL_VGA_DEVICE(0x5A4A, info), \
|
||||
INTEL_VGA_DEVICE(0x5A50, info), \
|
||||
INTEL_VGA_DEVICE(0x5A40, info)
|
||||
INTEL_VGA_DEVICE(0x5A51, info), \
|
||||
INTEL_VGA_DEVICE(0x5A52, info), \
|
||||
INTEL_VGA_DEVICE(0x5A59, info), \
|
||||
INTEL_VGA_DEVICE(0x5A5A, info)
|
||||
|
||||
/* ICL */
|
||||
#define INTEL_ICL_PORT_F_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x8A50, info), \
|
||||
INTEL_VGA_DEVICE(0x8A5C, info), \
|
||||
INTEL_VGA_DEVICE(0x8A59, info), \
|
||||
INTEL_VGA_DEVICE(0x8A58, info), \
|
||||
INTEL_VGA_DEVICE(0x8A52, info), \
|
||||
INTEL_VGA_DEVICE(0x8A53, info), \
|
||||
INTEL_VGA_DEVICE(0x8A54, info), \
|
||||
INTEL_VGA_DEVICE(0x8A56, info), \
|
||||
INTEL_VGA_DEVICE(0x8A57, info), \
|
||||
INTEL_VGA_DEVICE(0x8A58, info), \
|
||||
INTEL_VGA_DEVICE(0x8A59, info), \
|
||||
INTEL_VGA_DEVICE(0x8A5A, info), \
|
||||
INTEL_VGA_DEVICE(0x8A5B, info), \
|
||||
INTEL_VGA_DEVICE(0x8A57, info), \
|
||||
INTEL_VGA_DEVICE(0x8A56, info), \
|
||||
INTEL_VGA_DEVICE(0x8A71, info), \
|
||||
INTEL_VGA_DEVICE(0x8A5C, info), \
|
||||
INTEL_VGA_DEVICE(0x8A70, info), \
|
||||
INTEL_VGA_DEVICE(0x8A53, info), \
|
||||
INTEL_VGA_DEVICE(0x8A54, info)
|
||||
INTEL_VGA_DEVICE(0x8A71, info)
|
||||
|
||||
#define INTEL_ICL_11_IDS(info) \
|
||||
INTEL_ICL_PORT_F_IDS(info), \
|
||||
INTEL_VGA_DEVICE(0x8A51, info), \
|
||||
INTEL_VGA_DEVICE(0x8A5D, info)
|
||||
|
||||
/* EHL/JSL */
|
||||
/* EHL */
|
||||
#define INTEL_EHL_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x4500, info), \
|
||||
INTEL_VGA_DEVICE(0x4571, info), \
|
||||
INTEL_VGA_DEVICE(0x4551, info), \
|
||||
INTEL_VGA_DEVICE(0x4541, info), \
|
||||
INTEL_VGA_DEVICE(0x4E71, info), \
|
||||
INTEL_VGA_DEVICE(0x4557, info), \
|
||||
INTEL_VGA_DEVICE(0x4555, info), \
|
||||
INTEL_VGA_DEVICE(0x4555, info)
|
||||
|
||||
/* JSL */
|
||||
#define INTEL_JSL_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x4E71, info), \
|
||||
INTEL_VGA_DEVICE(0x4E61, info), \
|
||||
INTEL_VGA_DEVICE(0x4E57, info), \
|
||||
INTEL_VGA_DEVICE(0x4E55, info), \
|
||||
|
@ -624,6 +630,9 @@
|
|||
|
||||
/* DG1 */
|
||||
#define INTEL_DG1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x4905, info)
|
||||
INTEL_VGA_DEVICE(0x4905, info), \
|
||||
INTEL_VGA_DEVICE(0x4906, info), \
|
||||
INTEL_VGA_DEVICE(0x4907, info), \
|
||||
INTEL_VGA_DEVICE(0x4908, info)
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
|
Loading…
Reference in New Issue