Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
New feature pile for 3.12! Highlights: - Stereo/3d support for hdmi from Damien, both the drm core bits and the i915 integration. - Manual boost/deboost logic for gpu turbo (Chris) - Fixed up clock readout support for vlv (Chris). - Tons of little fixes and improvements for vlv in general (Chon Minng Lee and Jesse Barnes). - Power well support for the legacy vga plane (Ville). - DP impromevents from Jani. - Improvements to the Haswell modeset sequence (Ville+Paulo). - Haswell DDI improvements, using the VBT for some tuning values and to check the configuration (Paulo). - Tons of other small improvements and fixups. * 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (92 commits) drm/i915: Use adjusted_mode in the fastboot hack to disable pfit drm/i915: Add a more detailed comment about the set_base() fastboot hack drm/i915/vlv: Turn off power gate for BIOS-less system. drm/i915/vlv: reset DPIO on load and resume v2 drm/i915: Simplify PSR debugfs drm/i915: Tweak RPS thresholds to more aggressively downclock drm/i915: Boost RPS frequency for CPU stalls drm/i915: Fix __wait_seqno to use true infinite timeouts drm/i915: Add some missing steps to i915_driver_load error path drm/i915: Clean up the ring scaling calculations drm/i915: Don't populate pipe_src_{w,h} multiple times drm/i915: implement the Haswell mode set sequence workaround drm/i915: Disable/enable planes as the first/last thing during modeset on HSW i915/vlv: untangle integrated clock source handling v4 drm/i915: fix typo s/PatherPoint/PantherPoint/ drm/i915: Make intel_resume_power_well() static drm/i915: destroy connector sysfs files earlier drm/i915/dp: do not write DP_TRAINING_PATTERN_SET all the time drm/i915/dp: retry i2c-over-aux seven times on AUX DEFER drm/i915/vlv: reduce GT FIFO error info to a debug message ...
This commit is contained in:
commit
5259c522a0
|
@ -1319,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
|
|||
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
|
||||
return -ERANGE;
|
||||
|
||||
if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
out->clock = in->clock;
|
||||
out->hdisplay = in->hdisplay;
|
||||
out->hsync_start = in->hsync_start;
|
||||
|
@ -1581,6 +1584,19 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
|
||||
const struct drm_file *file_priv)
|
||||
{
|
||||
/*
|
||||
* If user-space hasn't configured the driver to expose the stereo 3D
|
||||
* modes, don't expose them.
|
||||
*/
|
||||
if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_getconnector - get connector configuration
|
||||
* @dev: drm device for the ioctl
|
||||
|
@ -1646,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
|
||||
/* delayed so we get modes regardless of pre-fill_modes state */
|
||||
list_for_each_entry(mode, &connector->modes, head)
|
||||
mode_count++;
|
||||
if (drm_mode_expose_to_userspace(mode, file_priv))
|
||||
mode_count++;
|
||||
|
||||
out_resp->connector_id = connector->base.id;
|
||||
out_resp->connector_type = connector->connector_type;
|
||||
|
@ -1668,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
copied = 0;
|
||||
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
if (!drm_mode_expose_to_userspace(mode, file_priv))
|
||||
continue;
|
||||
|
||||
drm_crtc_convert_to_umode(&u_mode, mode);
|
||||
if (copy_to_user(mode_ptr + copied,
|
||||
&u_mode, sizeof(u_mode))) {
|
||||
|
@ -2042,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_mode_set_config_internal);
|
||||
|
||||
/*
|
||||
* Checks that the framebuffer is big enough for the CRTC viewport
|
||||
* (x, y, hdisplay, vdisplay)
|
||||
*/
|
||||
static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
|
||||
int x, int y,
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_framebuffer *fb)
|
||||
|
||||
{
|
||||
int hdisplay, vdisplay;
|
||||
|
||||
hdisplay = mode->hdisplay;
|
||||
vdisplay = mode->vdisplay;
|
||||
|
||||
if (drm_mode_is_stereo(mode)) {
|
||||
struct drm_display_mode adjusted = *mode;
|
||||
|
||||
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
|
||||
hdisplay = adjusted.crtc_hdisplay;
|
||||
vdisplay = adjusted.crtc_vdisplay;
|
||||
}
|
||||
|
||||
if (crtc->invert_dimensions)
|
||||
swap(hdisplay, vdisplay);
|
||||
|
||||
if (hdisplay > fb->width ||
|
||||
vdisplay > fb->height ||
|
||||
x > fb->width - hdisplay ||
|
||||
y > fb->height - vdisplay) {
|
||||
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
|
||||
fb->width, fb->height, hdisplay, vdisplay, x, y,
|
||||
crtc->invert_dimensions ? " (inverted)" : "");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_mode_setcrtc - set CRTC configuration
|
||||
* @dev: drm device for the ioctl
|
||||
|
@ -2089,7 +2148,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
|
||||
|
||||
if (crtc_req->mode_valid) {
|
||||
int hdisplay, vdisplay;
|
||||
/* If we have a mode we need a framebuffer. */
|
||||
/* If we pass -1, set the mode with the currently bound fb */
|
||||
if (crtc_req->fb_id == -1) {
|
||||
|
@ -2125,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
|
||||
hdisplay = mode->hdisplay;
|
||||
vdisplay = mode->vdisplay;
|
||||
|
||||
if (crtc->invert_dimensions)
|
||||
swap(hdisplay, vdisplay);
|
||||
|
||||
if (hdisplay > fb->width ||
|
||||
vdisplay > fb->height ||
|
||||
crtc_req->x > fb->width - hdisplay ||
|
||||
crtc_req->y > fb->height - vdisplay) {
|
||||
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
|
||||
fb->width, fb->height,
|
||||
hdisplay, vdisplay, crtc_req->x, crtc_req->y,
|
||||
crtc->invert_dimensions ? " (inverted)" : "");
|
||||
ret = -ENOSPC;
|
||||
ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
|
||||
mode, fb);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (crtc_req->count_connectors == 0 && mode) {
|
||||
|
@ -3558,7 +3604,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
|||
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
|
||||
struct drm_pending_vblank_event *e = NULL;
|
||||
unsigned long flags;
|
||||
int hdisplay, vdisplay;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
|
||||
|
@ -3590,22 +3635,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
|||
if (!fb)
|
||||
goto out;
|
||||
|
||||
hdisplay = crtc->mode.hdisplay;
|
||||
vdisplay = crtc->mode.vdisplay;
|
||||
|
||||
if (crtc->invert_dimensions)
|
||||
swap(hdisplay, vdisplay);
|
||||
|
||||
if (hdisplay > fb->width ||
|
||||
vdisplay > fb->height ||
|
||||
crtc->x > fb->width - hdisplay ||
|
||||
crtc->y > fb->height - vdisplay) {
|
||||
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
|
||||
fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
|
||||
crtc->invert_dimensions ? " (inverted)" : "");
|
||||
ret = -ENOSPC;
|
||||
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (crtc->fb->pixel_format != fb->pixel_format) {
|
||||
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
|
||||
|
|
|
@ -76,7 +76,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
|
|||
{
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
|
||||
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
|
||||
DRM_MODE_FLAG_3D_MASK))
|
||||
return;
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
|
@ -86,6 +87,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
|
|||
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
|
||||
!(flags & DRM_MODE_FLAG_DBLSCAN))
|
||||
mode->status = MODE_NO_DBLESCAN;
|
||||
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
|
||||
!(flags & DRM_MODE_FLAG_3D_MASK))
|
||||
mode->status = MODE_NO_STEREO;
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -175,6 +179,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||
mode_flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
if (connector->doublescan_allowed)
|
||||
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
|
||||
if (connector->stereo_allowed)
|
||||
mode_flags |= DRM_MODE_FLAG_3D_MASK;
|
||||
drm_mode_validate_flag(connector, mode_flags);
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
|
|
|
@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
|
|
@ -2416,7 +2416,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
|
|||
|
||||
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
|
||||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
|
||||
drm_mode_equal_no_clocks(to_match, cea_mode))
|
||||
drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
|
||||
return mode + 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2465,7 +2465,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
|
|||
|
||||
if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
|
||||
KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
|
||||
drm_mode_equal_no_clocks(to_match, hdmi_mode))
|
||||
drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
|
||||
return mode + 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2519,6 +2519,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
|
|||
if (!newmode)
|
||||
continue;
|
||||
|
||||
/* Carry over the stereo flags */
|
||||
newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
|
||||
|
||||
/*
|
||||
* The current mode could be either variant. Make
|
||||
* sure to pick the "other" clock for the new mode.
|
||||
|
@ -2565,18 +2568,102 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
|
|||
return modes;
|
||||
}
|
||||
|
||||
struct stereo_mandatory_mode {
|
||||
int width, height, vrefresh;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
|
||||
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
|
||||
{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
|
||||
{ 1920, 1080, 50,
|
||||
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
|
||||
{ 1920, 1080, 60,
|
||||
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
|
||||
{ 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
|
||||
{ 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
|
||||
{ 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
|
||||
{ 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
|
||||
};
|
||||
|
||||
static bool
|
||||
stereo_match_mandatory(const struct drm_display_mode *mode,
|
||||
const struct stereo_mandatory_mode *stereo_mode)
|
||||
{
|
||||
unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
|
||||
|
||||
return mode->hdisplay == stereo_mode->width &&
|
||||
mode->vdisplay == stereo_mode->height &&
|
||||
interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
|
||||
drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
|
||||
}
|
||||
|
||||
static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
const struct drm_display_mode *mode;
|
||||
struct list_head stereo_modes;
|
||||
int modes = 0, i;
|
||||
|
||||
INIT_LIST_HEAD(&stereo_modes);
|
||||
|
||||
list_for_each_entry(mode, &connector->probed_modes, head) {
|
||||
for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
|
||||
const struct stereo_mandatory_mode *mandatory;
|
||||
struct drm_display_mode *new_mode;
|
||||
|
||||
if (!stereo_match_mandatory(mode,
|
||||
&stereo_mandatory_modes[i]))
|
||||
continue;
|
||||
|
||||
mandatory = &stereo_mandatory_modes[i];
|
||||
new_mode = drm_mode_duplicate(dev, mode);
|
||||
if (!new_mode)
|
||||
continue;
|
||||
|
||||
new_mode->flags |= mandatory->flags;
|
||||
list_add_tail(&new_mode->head, &stereo_modes);
|
||||
modes++;
|
||||
}
|
||||
}
|
||||
|
||||
list_splice_tail(&stereo_modes, &connector->probed_modes);
|
||||
|
||||
return modes;
|
||||
}
|
||||
|
||||
static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *newmode;
|
||||
|
||||
vic--; /* VICs start at 1 */
|
||||
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
|
||||
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
|
||||
return 0;
|
||||
}
|
||||
|
||||
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
|
||||
if (!newmode)
|
||||
return 0;
|
||||
|
||||
drm_mode_probed_add(connector, newmode);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
|
||||
* @connector: connector corresponding to the HDMI sink
|
||||
* @db: start of the CEA vendor specific block
|
||||
* @len: length of the CEA block payload, ie. one can access up to db[len]
|
||||
*
|
||||
* Parses the HDMI VSDB looking for modes to add to @connector.
|
||||
* Parses the HDMI VSDB looking for modes to add to @connector. This function
|
||||
* also adds the stereo 3d modes when applicable.
|
||||
*/
|
||||
static int
|
||||
do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
int modes = 0, offset = 0, i;
|
||||
u8 vic_len;
|
||||
|
||||
|
@ -2597,30 +2684,22 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
|
|||
|
||||
/* the declared length is not long enough for the 2 first bytes
|
||||
* of additional video format capabilities */
|
||||
offset += 2;
|
||||
if (len < (8 + offset))
|
||||
if (len < (8 + offset + 2))
|
||||
goto out;
|
||||
|
||||
/* 3D_Present */
|
||||
offset++;
|
||||
if (db[8 + offset] & (1 << 7))
|
||||
modes += add_hdmi_mandatory_stereo_modes(connector);
|
||||
|
||||
offset++;
|
||||
vic_len = db[8 + offset] >> 5;
|
||||
|
||||
for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
|
||||
struct drm_display_mode *newmode;
|
||||
u8 vic;
|
||||
|
||||
vic = db[9 + offset + i];
|
||||
|
||||
vic--; /* VICs start at 1 */
|
||||
if (vic >= ARRAY_SIZE(edid_4k_modes)) {
|
||||
DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
|
||||
continue;
|
||||
}
|
||||
|
||||
newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
|
||||
if (!newmode)
|
||||
continue;
|
||||
|
||||
drm_mode_probed_add(connector, newmode);
|
||||
modes++;
|
||||
modes += add_hdmi_mode(connector, vic);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -2680,8 +2759,8 @@ static int
|
|||
add_cea_modes(struct drm_connector *connector, struct edid *edid)
|
||||
{
|
||||
const u8 *cea = drm_find_cea_extension(edid);
|
||||
const u8 *db;
|
||||
u8 dbl;
|
||||
const u8 *db, *hdmi = NULL;
|
||||
u8 dbl, hdmi_len;
|
||||
int modes = 0;
|
||||
|
||||
if (cea && cea_revision(cea) >= 3) {
|
||||
|
@ -2696,11 +2775,20 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
|
|||
|
||||
if (cea_db_tag(db) == VIDEO_BLOCK)
|
||||
modes += do_cea_modes(connector, db + 1, dbl);
|
||||
else if (cea_db_is_hdmi_vsdb(db))
|
||||
modes += do_hdmi_vsdb_modes(connector, db, dbl);
|
||||
else if (cea_db_is_hdmi_vsdb(db)) {
|
||||
hdmi = db;
|
||||
hdmi_len = dbl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We parse the HDMI VSDB after having added the cea modes as we will
|
||||
* be patching their flags when the sink supports stereo 3D.
|
||||
*/
|
||||
if (hdmi)
|
||||
modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len);
|
||||
|
||||
return modes;
|
||||
}
|
||||
|
||||
|
@ -3333,6 +3421,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
|
||||
|
||||
static enum hdmi_3d_structure
|
||||
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
|
||||
{
|
||||
u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
|
||||
|
||||
switch (layout) {
|
||||
case DRM_MODE_FLAG_3D_FRAME_PACKING:
|
||||
return HDMI_3D_STRUCTURE_FRAME_PACKING;
|
||||
case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
|
||||
return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
|
||||
case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
|
||||
return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
|
||||
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
|
||||
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
|
||||
case DRM_MODE_FLAG_3D_L_DEPTH:
|
||||
return HDMI_3D_STRUCTURE_L_DEPTH;
|
||||
case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
|
||||
return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
|
||||
case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
|
||||
return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
|
||||
case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
|
||||
return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
|
||||
default:
|
||||
return HDMI_3D_STRUCTURE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
|
||||
* data from a DRM display mode
|
||||
|
@ -3350,20 +3465,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
|
|||
const struct drm_display_mode *mode)
|
||||
{
|
||||
int err;
|
||||
u32 s3d_flags;
|
||||
u8 vic;
|
||||
|
||||
if (!frame || !mode)
|
||||
return -EINVAL;
|
||||
|
||||
vic = drm_match_hdmi_mode(mode);
|
||||
if (!vic)
|
||||
s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
|
||||
|
||||
if (!vic && !s3d_flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (vic && s3d_flags)
|
||||
return -EINVAL;
|
||||
|
||||
err = hdmi_vendor_infoframe_init(frame);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
frame->vic = vic;
|
||||
if (vic)
|
||||
frame->vic = vic;
|
||||
else
|
||||
frame->s3d_struct = s3d_structure_from_display_mode(mode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -302,6 +302,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set device/driver capabilities
|
||||
*/
|
||||
int
|
||||
drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_set_client_cap *req = data;
|
||||
|
||||
switch (req->capability) {
|
||||
case DRM_CLIENT_CAP_STEREO_3D:
|
||||
if (req->value > 1)
|
||||
return -EINVAL;
|
||||
file_priv->stereo_allowed = req->value;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setversion ioctl.
|
||||
*
|
||||
|
|
|
@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
|
|||
/**
|
||||
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
|
||||
* @p: mode
|
||||
* @adjust_flags: unused? (FIXME)
|
||||
* @adjust_flags: a combination of adjustment flags
|
||||
*
|
||||
* LOCKING:
|
||||
* None.
|
||||
*
|
||||
* Setup the CRTC modesetting parameters for @p, adjusting if necessary.
|
||||
*
|
||||
* - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
|
||||
* interlaced modes.
|
||||
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
|
||||
* buffers containing two eyes (only adjust the timings when needed, eg. for
|
||||
* "frame packing" or "side by side full").
|
||||
*/
|
||||
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
|
||||
{
|
||||
if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
|
||||
return;
|
||||
|
||||
p->crtc_clock = p->clock;
|
||||
p->crtc_hdisplay = p->hdisplay;
|
||||
p->crtc_hsync_start = p->hsync_start;
|
||||
p->crtc_hsync_end = p->hsync_end;
|
||||
|
@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
|
|||
p->crtc_vtotal *= p->vscan;
|
||||
}
|
||||
|
||||
if (adjust_flags & CRTC_STEREO_DOUBLE) {
|
||||
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
|
||||
|
||||
switch (layout) {
|
||||
case DRM_MODE_FLAG_3D_FRAME_PACKING:
|
||||
p->crtc_clock *= 2;
|
||||
p->crtc_vdisplay += p->crtc_vtotal;
|
||||
p->crtc_vsync_start += p->crtc_vtotal;
|
||||
p->crtc_vsync_end += p->crtc_vtotal;
|
||||
p->crtc_vtotal += p->crtc_vtotal;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
|
||||
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
|
||||
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
|
||||
|
@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
|
|||
} else if (mode1->clock != mode2->clock)
|
||||
return false;
|
||||
|
||||
return drm_mode_equal_no_clocks(mode1, mode2);
|
||||
if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
|
||||
(mode2->flags & DRM_MODE_FLAG_3D_MASK))
|
||||
return false;
|
||||
|
||||
return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_equal);
|
||||
|
||||
/**
|
||||
* drm_mode_equal_no_clocks - test modes for equality
|
||||
* drm_mode_equal_no_clocks_no_stereo - test modes for equality
|
||||
* @mode1: first mode
|
||||
* @mode2: second mode
|
||||
*
|
||||
|
@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
|
|||
* None.
|
||||
*
|
||||
* Check to see if @mode1 and @mode2 are equivalent, but
|
||||
* don't check the pixel clocks.
|
||||
* don't check the pixel clocks nor the stereo layout.
|
||||
*
|
||||
* RETURNS:
|
||||
* True if the modes are equal, false otherwise.
|
||||
*/
|
||||
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
|
||||
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
|
||||
const struct drm_display_mode *mode2)
|
||||
{
|
||||
if (mode1->hdisplay == mode2->hdisplay &&
|
||||
mode1->hsync_start == mode2->hsync_start &&
|
||||
|
@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
|
|||
mode1->vsync_end == mode2->vsync_end &&
|
||||
mode1->vtotal == mode2->vtotal &&
|
||||
mode1->vscan == mode2->vscan &&
|
||||
mode1->flags == mode2->flags)
|
||||
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
|
||||
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_equal_no_clocks);
|
||||
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
|
||||
|
||||
/**
|
||||
* drm_mode_validate_size - make sure modes adhere to size constraints
|
||||
|
|
|
@ -1666,126 +1666,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 psrstat, psrperf;
|
||||
u32 psrperf = 0;
|
||||
bool enabled = false;
|
||||
|
||||
if (!IS_HASWELL(dev)) {
|
||||
seq_puts(m, "PSR not supported on this platform\n");
|
||||
} else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
|
||||
seq_puts(m, "PSR enabled\n");
|
||||
} else {
|
||||
seq_puts(m, "PSR disabled: ");
|
||||
switch (dev_priv->no_psr_reason) {
|
||||
case PSR_NO_SOURCE:
|
||||
seq_puts(m, "not supported on this platform");
|
||||
break;
|
||||
case PSR_NO_SINK:
|
||||
seq_puts(m, "not supported by panel");
|
||||
break;
|
||||
case PSR_MODULE_PARAM:
|
||||
seq_puts(m, "disabled by flag");
|
||||
break;
|
||||
case PSR_CRTC_NOT_ACTIVE:
|
||||
seq_puts(m, "crtc not active");
|
||||
break;
|
||||
case PSR_PWR_WELL_ENABLED:
|
||||
seq_puts(m, "power well enabled");
|
||||
break;
|
||||
case PSR_NOT_TILED:
|
||||
seq_puts(m, "not tiled");
|
||||
break;
|
||||
case PSR_SPRITE_ENABLED:
|
||||
seq_puts(m, "sprite enabled");
|
||||
break;
|
||||
case PSR_S3D_ENABLED:
|
||||
seq_puts(m, "stereo 3d enabled");
|
||||
break;
|
||||
case PSR_INTERLACED_ENABLED:
|
||||
seq_puts(m, "interlaced enabled");
|
||||
break;
|
||||
case PSR_HSW_NOT_DDIA:
|
||||
seq_puts(m, "HSW ties PSR to DDI A (eDP)");
|
||||
break;
|
||||
default:
|
||||
seq_puts(m, "unknown reason");
|
||||
}
|
||||
seq_puts(m, "\n");
|
||||
return 0;
|
||||
}
|
||||
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
|
||||
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
|
||||
|
||||
psrstat = I915_READ(EDP_PSR_STATUS_CTL);
|
||||
enabled = HAS_PSR(dev) &&
|
||||
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
|
||||
seq_printf(m, "Enabled: %s\n", yesno(enabled));
|
||||
|
||||
seq_puts(m, "PSR Current State: ");
|
||||
switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
|
||||
case EDP_PSR_STATUS_STATE_IDLE:
|
||||
seq_puts(m, "Reset state\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_STATE_SRDONACK:
|
||||
seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_STATE_SRDENT:
|
||||
seq_puts(m, "SRD entry\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_STATE_BUFOFF:
|
||||
seq_puts(m, "Wait for buffer turn off\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_STATE_BUFON:
|
||||
seq_puts(m, "Wait for buffer turn on\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_STATE_AUXACK:
|
||||
seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_STATE_SRDOFFACK:
|
||||
seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
|
||||
break;
|
||||
default:
|
||||
seq_puts(m, "Unknown\n");
|
||||
break;
|
||||
}
|
||||
|
||||
seq_puts(m, "Link Status: ");
|
||||
switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
|
||||
case EDP_PSR_STATUS_LINK_FULL_OFF:
|
||||
seq_puts(m, "Link is fully off\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_LINK_FULL_ON:
|
||||
seq_puts(m, "Link is fully on\n");
|
||||
break;
|
||||
case EDP_PSR_STATUS_LINK_STANDBY:
|
||||
seq_puts(m, "Link is in standby\n");
|
||||
break;
|
||||
default:
|
||||
seq_puts(m, "Unknown\n");
|
||||
break;
|
||||
}
|
||||
|
||||
seq_printf(m, "PSR Entry Count: %u\n",
|
||||
psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
|
||||
EDP_PSR_STATUS_COUNT_MASK);
|
||||
|
||||
seq_printf(m, "Max Sleep Timer Counter: %u\n",
|
||||
psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
|
||||
EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
|
||||
|
||||
seq_printf(m, "Had AUX error: %s\n",
|
||||
yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
|
||||
|
||||
seq_printf(m, "Sending AUX: %s\n",
|
||||
yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
|
||||
|
||||
seq_printf(m, "Sending Idle: %s\n",
|
||||
yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
|
||||
|
||||
seq_printf(m, "Sending TP2 TP3: %s\n",
|
||||
yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
|
||||
|
||||
seq_printf(m, "Sending TP1: %s\n",
|
||||
yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
|
||||
|
||||
seq_printf(m, "Idle Count: %u\n",
|
||||
psrstat & EDP_PSR_STATUS_IDLE_MASK);
|
||||
|
||||
psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
|
||||
seq_printf(m, "Performance Counter: %u\n", psrperf);
|
||||
if (HAS_PSR(dev))
|
||||
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
|
||||
EDP_PSR_PERF_CNT_MASK;
|
||||
seq_printf(m, "Performance_Counter: %u\n", psrperf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1896,6 +1790,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
|
|||
i915_ring_stop_get, i915_ring_stop_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
static int
|
||||
i915_ring_missed_irq_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
*val = dev_priv->gpu_error.missed_irq_rings;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_ring_missed_irq_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/* Lock against concurrent debugfs callers */
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
dev_priv->gpu_error.missed_irq_rings = val;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
|
||||
i915_ring_missed_irq_get, i915_ring_missed_irq_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
static int
|
||||
i915_ring_test_irq_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
*val = dev_priv->gpu_error.test_irq_rings;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_ring_test_irq_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
|
||||
|
||||
/* Lock against concurrent debugfs callers */
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->gpu_error.test_irq_rings = val;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
|
||||
i915_ring_test_irq_get, i915_ring_test_irq_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
#define DROP_UNBOUND 0x1
|
||||
#define DROP_BOUND 0x2
|
||||
#define DROP_RETIRE 0x4
|
||||
|
@ -2156,7 +2116,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
|
|||
{
|
||||
struct drm_info_node *node;
|
||||
|
||||
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
|
||||
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (node == NULL) {
|
||||
debugfs_remove(ent);
|
||||
return -ENOMEM;
|
||||
|
@ -2289,6 +2249,8 @@ static struct i915_debugfs_files {
|
|||
{"i915_min_freq", &i915_min_freq_fops},
|
||||
{"i915_cache_sharing", &i915_cache_sharing_fops},
|
||||
{"i915_ring_stop", &i915_ring_stop_fops},
|
||||
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
|
||||
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
|
||||
{"i915_gem_drop_caches", &i915_drop_caches_fops},
|
||||
{"i915_error_state", &i915_error_state_fops},
|
||||
{"i915_next_seqno", &i915_next_seqno_fops},
|
||||
|
|
|
@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
if (batch->num_cliprects) {
|
||||
cliprects = kcalloc(batch->num_cliprects,
|
||||
sizeof(struct drm_clip_rect),
|
||||
sizeof(*cliprects),
|
||||
GFP_KERNEL);
|
||||
if (cliprects == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
if (cmdbuf->num_cliprects) {
|
||||
cliprects = kcalloc(cmdbuf->num_cliprects,
|
||||
sizeof(struct drm_clip_rect), GFP_KERNEL);
|
||||
sizeof(*cliprects), GFP_KERNEL);
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_batch_free;
|
||||
|
@ -1314,25 +1314,30 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
|
||||
intel_init_power_well(dev);
|
||||
|
||||
/* Keep VGA alive until i915_disable_vga_mem() */
|
||||
intel_display_power_get(dev, POWER_DOMAIN_VGA);
|
||||
|
||||
/* Important: The output setup functions called by modeset_init need
|
||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||
intel_modeset_init(dev);
|
||||
|
||||
ret = i915_gem_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
goto cleanup_power;
|
||||
|
||||
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
|
||||
|
||||
intel_init_power_well(dev);
|
||||
|
||||
intel_modeset_gem_init(dev);
|
||||
|
||||
/* Always safe in the mode setting case. */
|
||||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
dev->vblank_disable_allowed = true;
|
||||
if (INTEL_INFO(dev)->num_pipes == 0)
|
||||
if (INTEL_INFO(dev)->num_pipes == 0) {
|
||||
intel_display_power_put(dev, POWER_DOMAIN_VGA);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = intel_fbdev_init(dev);
|
||||
if (ret)
|
||||
|
@ -1358,6 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
* vgacon_save_screen() works during the handover.
|
||||
*/
|
||||
i915_disable_vga_mem(dev);
|
||||
intel_display_power_put(dev, POWER_DOMAIN_VGA);
|
||||
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
|
@ -1373,7 +1379,8 @@ cleanup_gem:
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_aliasing_ppgtt(dev);
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
cleanup_irq:
|
||||
cleanup_power:
|
||||
intel_display_power_put(dev, POWER_DOMAIN_VGA);
|
||||
drm_irq_uninstall(dev);
|
||||
cleanup_gem_stolen:
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
@ -1473,7 +1480,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
||||
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1547,7 +1554,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
ret = i915_gem_gtt_init(dev);
|
||||
if (ret)
|
||||
goto put_bridge;
|
||||
goto out_regs;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
i915_kick_out_firmware_fb(dev_priv);
|
||||
|
@ -1576,7 +1583,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
aperture_size);
|
||||
if (dev_priv->gtt.mappable == NULL) {
|
||||
ret = -EIO;
|
||||
goto out_rmmap;
|
||||
goto out_gtt;
|
||||
}
|
||||
|
||||
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
|
||||
|
@ -1650,7 +1657,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
ret = i915_load_modeset_init(dev);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to init modeset\n");
|
||||
goto out_gem_unload;
|
||||
goto out_power_well;
|
||||
}
|
||||
} else {
|
||||
/* Start out suspended in ums mode. */
|
||||
|
@ -1670,6 +1677,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
return 0;
|
||||
|
||||
out_power_well:
|
||||
if (HAS_POWER_WELL(dev))
|
||||
i915_remove_power_well(dev);
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
if (dev_priv->mm.inactive_shrinker.scan_objects)
|
||||
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
|
@ -1683,12 +1694,17 @@ out_gem_unload:
|
|||
out_mtrrfree:
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
out_gtt:
|
||||
list_del(&dev_priv->gtt.base.global_link);
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
|
||||
out_rmmap:
|
||||
out_regs:
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
put_bridge:
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
free_priv:
|
||||
if (dev_priv->slab)
|
||||
kmem_cache_destroy(dev_priv->slab);
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1778,8 +1794,8 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
list_del(&dev_priv->gtt.base.global_link);
|
||||
WARN_ON(!list_empty(&dev_priv->vm_list));
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
if (dev_priv->regs != NULL)
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
|
@ -1789,6 +1805,10 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
|
||||
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
|
||||
|
||||
intel_uncore_fini(dev);
|
||||
if (dev_priv->regs != NULL)
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
|
||||
if (dev_priv->slab)
|
||||
kmem_cache_destroy(dev_priv->slab);
|
||||
|
||||
|
@ -1800,19 +1820,11 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
|
||||
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("\n");
|
||||
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
|
||||
if (!file_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
file->driver_priv = file_priv;
|
||||
|
||||
spin_lock_init(&file_priv->mm.lock);
|
||||
INIT_LIST_HEAD(&file_priv->mm.request_list);
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
ret = i915_gem_open(dev, file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -416,7 +416,7 @@ void intel_detect_pch(struct drm_device *dev)
|
|||
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
|
||||
/* PantherPoint is CPT compatible */
|
||||
dev_priv->pch_type = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
|
||||
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
|
||||
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
|
||||
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
|
@ -581,6 +581,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int error = 0;
|
||||
|
||||
intel_uncore_early_sanitize(dev);
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
|
||||
|
@ -590,6 +592,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
intel_init_power_well(dev);
|
||||
|
||||
i915_restore_state(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
|
@ -605,8 +609,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
/* We need working interrupts for modeset enabling ... */
|
||||
drm_irq_install(dev);
|
||||
|
||||
intel_init_power_well(dev);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
|
|
@ -324,7 +324,7 @@ struct drm_i915_error_state {
|
|||
u32 dirty:1;
|
||||
u32 purgeable:1;
|
||||
s32 ring:4;
|
||||
u32 cache_level:2;
|
||||
u32 cache_level:3;
|
||||
} **active_bo, **pinned_bo;
|
||||
u32 *active_bo_count, *pinned_bo_count;
|
||||
struct intel_overlay_error_state *overlay;
|
||||
|
@ -408,6 +408,8 @@ struct intel_uncore {
|
|||
|
||||
unsigned fifo_count;
|
||||
unsigned forcewake_count;
|
||||
|
||||
struct delayed_work force_wake_work;
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
|
||||
|
@ -642,17 +644,9 @@ struct i915_fbc {
|
|||
} no_fbc_reason;
|
||||
};
|
||||
|
||||
enum no_psr_reason {
|
||||
PSR_NO_SOURCE, /* Not supported on platform */
|
||||
PSR_NO_SINK, /* Not supported by panel */
|
||||
PSR_MODULE_PARAM,
|
||||
PSR_CRTC_NOT_ACTIVE,
|
||||
PSR_PWR_WELL_ENABLED,
|
||||
PSR_NOT_TILED,
|
||||
PSR_SPRITE_ENABLED,
|
||||
PSR_S3D_ENABLED,
|
||||
PSR_INTERLACED_ENABLED,
|
||||
PSR_HSW_NOT_DDIA,
|
||||
struct i915_psr {
|
||||
bool sink_support;
|
||||
bool source_ok;
|
||||
};
|
||||
|
||||
enum intel_pch {
|
||||
|
@ -842,17 +836,19 @@ struct intel_gen6_power_mgmt {
|
|||
struct work_struct work;
|
||||
u32 pm_iir;
|
||||
|
||||
/* On vlv we need to manually drop to Vmin with a delayed work. */
|
||||
struct delayed_work vlv_work;
|
||||
|
||||
/* The below variables an all the rps hw state are protected by
|
||||
* dev->struct mutext. */
|
||||
u8 cur_delay;
|
||||
u8 min_delay;
|
||||
u8 max_delay;
|
||||
u8 rpe_delay;
|
||||
u8 rp1_delay;
|
||||
u8 rp0_delay;
|
||||
u8 hw_max;
|
||||
|
||||
int last_adj;
|
||||
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
|
||||
|
||||
struct delayed_work delayed_resume_work;
|
||||
|
||||
/*
|
||||
|
@ -962,6 +958,15 @@ struct i915_gem_mm {
|
|||
*/
|
||||
struct delayed_work retire_work;
|
||||
|
||||
/**
|
||||
* When we detect an idle GPU, we want to turn on
|
||||
* powersaving features. So once we see that there
|
||||
* are no more requests outstanding and no more
|
||||
* arrive within a small period of time, we fire
|
||||
* off the idle_work.
|
||||
*/
|
||||
struct delayed_work idle_work;
|
||||
|
||||
/**
|
||||
* Are we in a non-interruptible section of code like
|
||||
* modesetting?
|
||||
|
@ -1011,6 +1016,9 @@ struct i915_gpu_error {
|
|||
struct drm_i915_error_state *first_error;
|
||||
struct work_struct work;
|
||||
|
||||
|
||||
unsigned long missed_irq_rings;
|
||||
|
||||
/**
|
||||
* State variable and reset counter controlling the reset flow
|
||||
*
|
||||
|
@ -1049,6 +1057,9 @@ struct i915_gpu_error {
|
|||
|
||||
/* For gpu hang simulation. */
|
||||
unsigned int stop_rings;
|
||||
|
||||
/* For missed irq/seqno simulation. */
|
||||
unsigned int test_irq_rings;
|
||||
};
|
||||
|
||||
enum modeset_restore {
|
||||
|
@ -1057,6 +1068,14 @@ enum modeset_restore {
|
|||
MODESET_SUSPENDED,
|
||||
};
|
||||
|
||||
struct ddi_vbt_port_info {
|
||||
uint8_t hdmi_level_shift;
|
||||
|
||||
uint8_t supports_dvi:1;
|
||||
uint8_t supports_hdmi:1;
|
||||
uint8_t supports_dp:1;
|
||||
};
|
||||
|
||||
struct intel_vbt_data {
|
||||
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
|
||||
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
|
||||
|
@ -1090,7 +1109,9 @@ struct intel_vbt_data {
|
|||
int crt_ddc_pin;
|
||||
|
||||
int child_dev_num;
|
||||
struct child_device_config *child_dev;
|
||||
union child_device_config *child_dev;
|
||||
|
||||
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
|
||||
};
|
||||
|
||||
enum intel_ddb_partitioning {
|
||||
|
@ -1327,7 +1348,7 @@ typedef struct drm_i915_private {
|
|||
/* Haswell power well */
|
||||
struct i915_power_well power_well;
|
||||
|
||||
enum no_psr_reason no_psr_reason;
|
||||
struct i915_psr psr;
|
||||
|
||||
struct i915_gpu_error gpu_error;
|
||||
|
||||
|
@ -1579,13 +1600,17 @@ struct drm_i915_gem_request {
|
|||
};
|
||||
|
||||
struct drm_i915_file_private {
|
||||
struct drm_i915_private *dev_priv;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
struct list_head request_list;
|
||||
struct delayed_work idle_work;
|
||||
} mm;
|
||||
struct idr context_idr;
|
||||
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
atomic_t rps_wait_boost;
|
||||
};
|
||||
|
||||
#define INTEL_INFO(dev) (to_i915(dev)->info)
|
||||
|
@ -1662,7 +1687,6 @@ struct drm_i915_file_private {
|
|||
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
|
||||
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
|
||||
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
|
||||
|
||||
|
@ -1675,6 +1699,7 @@ struct drm_i915_file_private {
|
|||
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
|
||||
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
|
||||
#define HAS_PSR(dev) (IS_HASWELL(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
|
@ -1791,6 +1816,7 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
|
|||
extern void intel_uncore_init(struct drm_device *dev);
|
||||
extern void intel_uncore_clear_errors(struct drm_device *dev);
|
||||
extern void intel_uncore_check_errors(struct drm_device *dev);
|
||||
extern void intel_uncore_fini(struct drm_device *dev);
|
||||
|
||||
void
|
||||
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
|
||||
|
@ -1891,9 +1917,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
|||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to);
|
||||
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring);
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct intel_ring_buffer *ring);
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -1934,7 +1959,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
}
|
||||
|
||||
void i915_gem_retire_requests(struct drm_device *dev);
|
||||
bool i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
|
@ -1985,6 +2010,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
|
|||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj);
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
uint32_t
|
||||
|
@ -2016,6 +2042,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
|
|||
struct i915_vma *
|
||||
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm);
|
||||
|
||||
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* Some GGTT VM helpers */
|
||||
#define obj_to_ggtt(obj) \
|
||||
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
|
||||
|
@ -2052,7 +2081,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
|
|||
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
|
||||
map_and_fenceable, nonblocking);
|
||||
}
|
||||
#undef obj_to_ggtt
|
||||
|
||||
/* i915_gem_context.c */
|
||||
void i915_gem_context_init(struct drm_device *dev);
|
||||
|
|
|
@ -971,6 +971,25 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void fake_irq(unsigned long data)
|
||||
{
|
||||
wake_up_process((struct task_struct *)data);
|
||||
}
|
||||
|
||||
static bool missed_irq(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
|
||||
}
|
||||
|
||||
static bool can_wait_boost(struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
if (file_priv == NULL)
|
||||
return true;
|
||||
|
||||
return !atomic_xchg(&file_priv->rps_wait_boost, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* __wait_seqno - wait until execution of seqno has finished
|
||||
* @ring: the ring expected to report seqno
|
||||
|
@ -991,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
|
|||
*/
|
||||
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
||||
unsigned reset_counter,
|
||||
bool interruptible, struct timespec *timeout)
|
||||
bool interruptible,
|
||||
struct timespec *timeout,
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
struct timespec before, now, wait_time={1,0};
|
||||
unsigned long timeout_jiffies;
|
||||
long end;
|
||||
bool wait_forever = true;
|
||||
struct timespec before, now;
|
||||
DEFINE_WAIT(wait);
|
||||
long timeout_jiffies;
|
||||
int ret;
|
||||
|
||||
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
|
||||
|
@ -1005,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
|||
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
|
||||
return 0;
|
||||
|
||||
trace_i915_gem_request_wait_begin(ring, seqno);
|
||||
timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
|
||||
|
||||
if (timeout != NULL) {
|
||||
wait_time = *timeout;
|
||||
wait_forever = false;
|
||||
if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
|
||||
gen6_rps_boost(dev_priv);
|
||||
if (file_priv)
|
||||
mod_delayed_work(dev_priv->wq,
|
||||
&file_priv->mm.idle_work,
|
||||
msecs_to_jiffies(100));
|
||||
}
|
||||
|
||||
timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
|
||||
|
||||
if (WARN_ON(!ring->irq_get(ring)))
|
||||
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
|
||||
WARN_ON(!ring->irq_get(ring)))
|
||||
return -ENODEV;
|
||||
|
||||
/* Record current time in case interrupted by signal, or wedged * */
|
||||
/* Record current time in case interrupted by signal, or wedged */
|
||||
trace_i915_gem_request_wait_begin(ring, seqno);
|
||||
getrawmonotonic(&before);
|
||||
for (;;) {
|
||||
struct timer_list timer;
|
||||
unsigned long expire;
|
||||
|
||||
#define EXIT_COND \
|
||||
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
|
||||
i915_reset_in_progress(&dev_priv->gpu_error) || \
|
||||
reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
|
||||
do {
|
||||
if (interruptible)
|
||||
end = wait_event_interruptible_timeout(ring->irq_queue,
|
||||
EXIT_COND,
|
||||
timeout_jiffies);
|
||||
else
|
||||
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
|
||||
timeout_jiffies);
|
||||
prepare_to_wait(&ring->irq_queue, &wait,
|
||||
interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/* We need to check whether any gpu reset happened in between
|
||||
* the caller grabbing the seqno and now ... */
|
||||
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
|
||||
end = -EAGAIN;
|
||||
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
|
||||
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
|
||||
* is truely gone. */
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
|
||||
if (ret == 0)
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
|
||||
* gone. */
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
|
||||
if (ret)
|
||||
end = ret;
|
||||
} while (end == 0 && wait_forever);
|
||||
if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (timeout_jiffies <= 0) {
|
||||
ret = -ETIME;
|
||||
break;
|
||||
}
|
||||
|
||||
timer.function = NULL;
|
||||
if (timeout || missed_irq(dev_priv, ring)) {
|
||||
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
|
||||
expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
|
||||
mod_timer(&timer, expire);
|
||||
}
|
||||
|
||||
schedule();
|
||||
|
||||
if (timeout)
|
||||
timeout_jiffies = expire - jiffies;
|
||||
|
||||
if (timer.function) {
|
||||
del_singleshot_timer_sync(&timer);
|
||||
destroy_timer_on_stack(&timer);
|
||||
}
|
||||
}
|
||||
getrawmonotonic(&now);
|
||||
trace_i915_gem_request_wait_end(ring, seqno);
|
||||
|
||||
ring->irq_put(ring);
|
||||
trace_i915_gem_request_wait_end(ring, seqno);
|
||||
#undef EXIT_COND
|
||||
|
||||
finish_wait(&ring->irq_queue, &wait);
|
||||
|
||||
if (timeout) {
|
||||
struct timespec sleep_time = timespec_sub(now, before);
|
||||
|
@ -1058,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
|||
set_normalized_timespec(timeout, 0, 0);
|
||||
}
|
||||
|
||||
switch (end) {
|
||||
case -EIO:
|
||||
case -EAGAIN: /* Wedged */
|
||||
case -ERESTARTSYS: /* Signal */
|
||||
return (int)end;
|
||||
case 0: /* Timeout */
|
||||
return -ETIME;
|
||||
default: /* Completed */
|
||||
WARN_ON(end < 0); /* We're not aware of other errors */
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1096,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
|
|||
|
||||
return __wait_seqno(ring, seqno,
|
||||
atomic_read(&dev_priv->gpu_error.reset_counter),
|
||||
interruptible, NULL);
|
||||
interruptible, NULL, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1146,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
|||
*/
|
||||
static __must_check int
|
||||
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||
struct drm_file *file,
|
||||
bool readonly)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
|
@ -1172,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|||
|
||||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
|
||||
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1221,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
* We will repeat the flush holding the lock in the normal manner
|
||||
* to catch cases where we are gazumped.
|
||||
*/
|
||||
ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
|
||||
ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
|
||||
if (ret)
|
||||
goto unref;
|
||||
|
||||
|
@ -1917,7 +1956,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
|
@ -1956,6 +1995,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
}
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
list_move_tail(&vma->mm_list, &vma->vm->active_list);
|
||||
return i915_gem_object_move_to_active(vma->obj, ring);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
|
@ -2135,6 +2181,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
|
|||
i915_queue_hangcheck(ring->dev);
|
||||
|
||||
if (was_empty) {
|
||||
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
|
@ -2156,10 +2203,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
|||
return;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
if (request->file_priv) {
|
||||
list_del(&request->client_list);
|
||||
request->file_priv = NULL;
|
||||
}
|
||||
list_del(&request->client_list);
|
||||
request->file_priv = NULL;
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
}
|
||||
|
||||
|
@ -2423,57 +2468,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
|||
WARN_ON(i915_verify_lists(ring->dev));
|
||||
}
|
||||
|
||||
void
|
||||
bool
|
||||
i915_gem_retire_requests(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
bool idle = true;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
idle &= list_empty(&ring->request_list);
|
||||
}
|
||||
|
||||
if (idle)
|
||||
mod_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.idle_work,
|
||||
msecs_to_jiffies(100));
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_retire_work_handler(struct work_struct *work)
|
||||
{
|
||||
drm_i915_private_t *dev_priv;
|
||||
struct drm_device *dev;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), mm.retire_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
bool idle;
|
||||
int i;
|
||||
|
||||
dev_priv = container_of(work, drm_i915_private_t,
|
||||
mm.retire_work.work);
|
||||
dev = dev_priv->dev;
|
||||
|
||||
/* Come back later if the device is busy... */
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
idle = false;
|
||||
if (mutex_trylock(&dev->struct_mutex)) {
|
||||
idle = i915_gem_retire_requests(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
if (!idle)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
static void
|
||||
i915_gem_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), mm.idle_work.work);
|
||||
|
||||
/* Send a periodic flush down the ring so we don't hold onto GEM
|
||||
* objects indefinitely.
|
||||
*/
|
||||
idle = true;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->gpu_caches_dirty)
|
||||
i915_add_request(ring, NULL);
|
||||
|
||||
idle &= list_empty(&ring->request_list);
|
||||
}
|
||||
|
||||
if (!dev_priv->ums.mm_suspended && !idle)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
if (idle)
|
||||
intel_mark_idle(dev);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_mark_idle(dev_priv->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2571,7 +2612,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
|
||||
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
|
||||
if (timeout)
|
||||
args->timeout_ns = timespec_to_ns(timeout);
|
||||
return ret;
|
||||
|
@ -2618,6 +2659,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_sync_to(from, to, seqno);
|
||||
ret = to->sync_to(to, from, seqno);
|
||||
if (!ret)
|
||||
/* We use last_read_seqno because sync_to()
|
||||
|
@ -3410,8 +3452,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
|
||||
/* And bump the LRU for this access */
|
||||
if (i915_gem_object_is_inactive(obj)) {
|
||||
struct i915_vma *vma = i915_gem_obj_to_vma(obj,
|
||||
&dev_priv->gtt.base);
|
||||
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
|
||||
if (vma)
|
||||
list_move_tail(&vma->mm_list,
|
||||
&dev_priv->gtt.base.inactive_list);
|
||||
|
@ -3782,7 +3823,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|||
if (seqno == 0)
|
||||
return 0;
|
||||
|
||||
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
|
||||
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
|
||||
if (ret == 0)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
|
||||
|
||||
|
@ -4225,16 +4266,13 @@ i915_gem_idle(struct drm_device *dev)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->ums.mm_suspended) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (dev_priv->ums.mm_suspended)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Under UMS, be paranoid and evict. */
|
||||
|
@ -4248,6 +4286,7 @@ i915_gem_idle(struct drm_device *dev)
|
|||
|
||||
/* Cancel the retire work handler, which should be idle now. */
|
||||
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
|
||||
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4581,6 +4620,8 @@ i915_gem_load(struct drm_device *dev)
|
|||
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||
i915_gem_retire_work_handler);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
|
||||
i915_gem_idle_work_handler);
|
||||
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
|
||||
|
||||
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
|
||||
|
@ -4631,7 +4672,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
|
|||
if (dev_priv->mm.phys_objs[id - 1] || !size)
|
||||
return 0;
|
||||
|
||||
phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
|
||||
phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
|
||||
if (!phys_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -4805,6 +4846,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
|||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
cancel_delayed_work_sync(&file_priv->mm.idle_work);
|
||||
|
||||
/* Clean up our request list when the client is going away, so that
|
||||
* later retire_requests won't dereference our soon-to-be-gone
|
||||
* file_priv.
|
||||
|
@ -4822,6 +4865,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
|||
spin_unlock(&file_priv->mm.lock);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_file_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv =
|
||||
container_of(work, typeof(*file_priv), mm.idle_work.work);
|
||||
|
||||
atomic_set(&file_priv->rps_wait_boost, false);
|
||||
}
|
||||
|
||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
DRM_DEBUG_DRIVER("\n");
|
||||
|
||||
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
|
||||
if (!file_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
file->driver_priv = file_priv;
|
||||
file_priv->dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock_init(&file_priv->mm.lock);
|
||||
INIT_LIST_HEAD(&file_priv->mm.request_list);
|
||||
INIT_DELAYED_WORK(&file_priv->mm.idle_work,
|
||||
i915_gem_file_idle_work_handler);
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
||||
{
|
||||
if (!mutex_is_locked(mutex))
|
||||
|
@ -4968,3 +5043,17 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return freed;
|
||||
}
|
||||
|
||||
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
if (WARN_ON(list_empty(&obj->vma_list)))
|
||||
return NULL;
|
||||
|
||||
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
|
||||
if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
|
||||
return NULL;
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
|
|
@ -453,11 +453,8 @@ static int do_switch(struct i915_hw_context *to)
|
|||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from != NULL) {
|
||||
struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
|
||||
struct i915_address_space *ggtt = &dev_priv->gtt.base;
|
||||
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
|
||||
i915_gem_object_move_to_active(from->obj, ring);
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
|
|
|
@ -175,6 +175,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
|||
struct i915_vma *vma, *next;
|
||||
int ret;
|
||||
|
||||
trace_i915_gem_evict_vm(vm);
|
||||
|
||||
if (do_idle) {
|
||||
ret = i915_gpu_idle(vm->dev);
|
||||
if (ret)
|
||||
|
|
|
@ -48,15 +48,15 @@ eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
|
|||
struct eb_vmas *eb = NULL;
|
||||
|
||||
if (args->flags & I915_EXEC_HANDLE_LUT) {
|
||||
int size = args->buffer_count;
|
||||
unsigned size = args->buffer_count;
|
||||
size *= sizeof(struct i915_vma *);
|
||||
size += sizeof(struct eb_vmas);
|
||||
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
}
|
||||
|
||||
if (eb == NULL) {
|
||||
int size = args->buffer_count;
|
||||
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
||||
unsigned size = args->buffer_count;
|
||||
unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
|
||||
while (count > 2*size)
|
||||
count >>= 1;
|
||||
|
@ -667,7 +667,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
bool need_relocs;
|
||||
int *reloc_offset;
|
||||
int i, total, ret;
|
||||
int count = args->buffer_count;
|
||||
unsigned count = args->buffer_count;
|
||||
|
||||
if (WARN_ON(list_empty(&eb->vmas)))
|
||||
return 0;
|
||||
|
@ -818,8 +818,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|||
int count)
|
||||
{
|
||||
int i;
|
||||
int relocs_total = 0;
|
||||
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||
unsigned relocs_total = 0;
|
||||
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
||||
|
@ -872,8 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
||||
|
||||
list_move_tail(&vma->mm_list, &vma->vm->active_list);
|
||||
i915_gem_object_move_to_active(obj, ring);
|
||||
i915_vma_move_to_active(vma, ring);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
obj->last_write_seqno = intel_ring_get_seqno(ring);
|
||||
|
@ -1047,7 +1046,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
|
||||
cliprects = kcalloc(args->num_cliprects,
|
||||
sizeof(*cliprects),
|
||||
GFP_KERNEL);
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -336,7 +336,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
|
||||
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
|
||||
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
|
||||
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
|
||||
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_pages)
|
||||
return -ENOMEM;
|
||||
|
@ -347,7 +347,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||
goto err_pt_alloc;
|
||||
}
|
||||
|
||||
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
|
||||
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!ppgtt->pt_dma_addr)
|
||||
goto err_pt_alloc;
|
||||
|
|
|
@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
/* Try to preallocate memory required to save swizzling on put-pages */
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||
if (obj->bit_17 == NULL) {
|
||||
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
|
||||
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
}
|
||||
} else {
|
||||
|
@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
|||
int i;
|
||||
|
||||
if (obj->bit_17 == NULL) {
|
||||
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
|
||||
sizeof(long), GFP_KERNEL);
|
||||
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (obj->bit_17 == NULL) {
|
||||
DRM_ERROR("Failed to allocate memory for bit 17 "
|
||||
"record\n");
|
||||
|
|
|
@ -311,6 +311,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
|
||||
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
|
||||
err_printf(m, "CCID: 0x%08x\n", error->ccid);
|
||||
err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
|
||||
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
|
||||
|
@ -793,7 +794,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
|
||||
error->ring[i].num_requests = count;
|
||||
error->ring[i].requests =
|
||||
kmalloc(count*sizeof(struct drm_i915_error_request),
|
||||
kcalloc(count, sizeof(*error->ring[i].requests),
|
||||
GFP_ATOMIC);
|
||||
if (error->ring[i].requests == NULL) {
|
||||
error->ring[i].num_requests = 0;
|
||||
|
@ -835,7 +836,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
|
|||
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
|
||||
|
||||
if (i) {
|
||||
active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
|
||||
active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
|
||||
if (active_bo)
|
||||
pinned_bo = active_bo + error->active_bo_count[ndx];
|
||||
}
|
||||
|
@ -1012,6 +1013,7 @@ const char *i915_cache_level_str(int type)
|
|||
case I915_CACHE_NONE: return " uncached";
|
||||
case I915_CACHE_LLC: return " snooped or LLC";
|
||||
case I915_CACHE_L3_LLC: return " L3+LLC";
|
||||
case I915_CACHE_WT: return " WT";
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -807,7 +807,7 @@ static void notify_ring(struct drm_device *dev,
|
|||
if (ring->obj == NULL)
|
||||
return;
|
||||
|
||||
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
|
||||
trace_i915_gem_request_complete(ring);
|
||||
|
||||
wake_up_all(&ring->irq_queue);
|
||||
i915_queue_hangcheck(dev);
|
||||
|
@ -818,7 +818,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||
rps.work);
|
||||
u32 pm_iir;
|
||||
u8 new_delay;
|
||||
int new_delay, adj;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
pm_iir = dev_priv->rps.pm_iir;
|
||||
|
@ -835,40 +835,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
adj = dev_priv->rps.last_adj;
|
||||
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
|
||||
new_delay = dev_priv->rps.cur_delay + 1;
|
||||
if (adj > 0)
|
||||
adj *= 2;
|
||||
else
|
||||
adj = 1;
|
||||
new_delay = dev_priv->rps.cur_delay + adj;
|
||||
|
||||
/*
|
||||
* For better performance, jump directly
|
||||
* to RPe if we're below it.
|
||||
*/
|
||||
if (IS_VALLEYVIEW(dev_priv->dev) &&
|
||||
dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
|
||||
if (new_delay < dev_priv->rps.rpe_delay)
|
||||
new_delay = dev_priv->rps.rpe_delay;
|
||||
} else
|
||||
new_delay = dev_priv->rps.cur_delay - 1;
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
|
||||
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
|
||||
new_delay = dev_priv->rps.rpe_delay;
|
||||
else
|
||||
new_delay = dev_priv->rps.min_delay;
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
|
||||
if (adj < 0)
|
||||
adj *= 2;
|
||||
else
|
||||
adj = -1;
|
||||
new_delay = dev_priv->rps.cur_delay + adj;
|
||||
} else { /* unknown event */
|
||||
new_delay = dev_priv->rps.cur_delay;
|
||||
}
|
||||
|
||||
/* sysfs frequency interfaces may have snuck in while servicing the
|
||||
* interrupt
|
||||
*/
|
||||
if (new_delay >= dev_priv->rps.min_delay &&
|
||||
new_delay <= dev_priv->rps.max_delay) {
|
||||
if (IS_VALLEYVIEW(dev_priv->dev))
|
||||
valleyview_set_rps(dev_priv->dev, new_delay);
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, new_delay);
|
||||
}
|
||||
if (new_delay < (int)dev_priv->rps.min_delay)
|
||||
new_delay = dev_priv->rps.min_delay;
|
||||
if (new_delay > (int)dev_priv->rps.max_delay)
|
||||
new_delay = dev_priv->rps.max_delay;
|
||||
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
/*
|
||||
* On VLV, when we enter RC6 we may not be at the minimum
|
||||
* voltage level, so arm a timer to check. It should only
|
||||
* fire when there's activity or once after we've entered
|
||||
* RC6, and then won't be re-armed until the next RPS interrupt.
|
||||
*/
|
||||
mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
|
||||
msecs_to_jiffies(100));
|
||||
}
|
||||
if (IS_VALLEYVIEW(dev_priv->dev))
|
||||
valleyview_set_rps(dev_priv->dev, new_delay);
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, new_delay);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
@ -2039,10 +2048,13 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|||
|
||||
if (waitqueue_active(&ring->irq_queue)) {
|
||||
/* Issue a wake-up to catch stuck h/w. */
|
||||
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
|
||||
ring->name);
|
||||
wake_up_all(&ring->irq_queue);
|
||||
ring->hangcheck.score += HUNG;
|
||||
if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
|
||||
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
|
||||
ring->name);
|
||||
wake_up_all(&ring->irq_queue);
|
||||
}
|
||||
/* Safeguard against driver failure */
|
||||
ring->hangcheck.score += BUSY;
|
||||
} else
|
||||
busy = false;
|
||||
} else {
|
||||
|
|
|
@ -361,6 +361,15 @@
|
|||
#define PUNIT_OPCODE_REG_READ 6
|
||||
#define PUNIT_OPCODE_REG_WRITE 7
|
||||
|
||||
#define PUNIT_REG_PWRGT_CTRL 0x60
|
||||
#define PUNIT_REG_PWRGT_STATUS 0x61
|
||||
#define PUNIT_CLK_GATE 1
|
||||
#define PUNIT_PWR_RESET 2
|
||||
#define PUNIT_PWR_GATE 3
|
||||
#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
|
||||
#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
|
||||
#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
|
||||
|
||||
#define PUNIT_REG_GPU_LFM 0xd3
|
||||
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
|
||||
#define PUNIT_REG_GPU_FREQ_STS 0xd8
|
||||
|
@ -382,6 +391,8 @@
|
|||
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
|
||||
|
||||
/* vlv2 north clock has */
|
||||
#define CCK_FUSE_REG 0x8
|
||||
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
|
||||
#define CCK_REG_DSI_PLL_FUSE 0x44
|
||||
#define CCK_REG_DSI_PLL_CONTROL 0x48
|
||||
#define DSI_PLL_VCO_EN (1 << 31)
|
||||
|
@ -428,7 +439,7 @@
|
|||
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
|
||||
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
|
||||
#define DPIO_SFR_BYPASS (1<<1)
|
||||
#define DPIO_RESET (1<<0)
|
||||
#define DPIO_CMNRST (1<<0)
|
||||
|
||||
#define _DPIO_TX3_SWING_CTL4_A 0x690
|
||||
#define _DPIO_TX3_SWING_CTL4_B 0x2a90
|
||||
|
@ -940,7 +951,7 @@
|
|||
|
||||
#define GT_PARITY_ERROR(dev) \
|
||||
(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
|
||||
IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0)
|
||||
(IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
|
||||
|
||||
/* These are all the "old" interrupts */
|
||||
#define ILK_BSD_USER_INTERRUPT (1<<5)
|
||||
|
@ -1429,6 +1440,12 @@
|
|||
|
||||
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
|
||||
|
||||
#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
|
||||
#define CDCLK_FREQ_SHIFT 4
|
||||
#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
|
||||
#define CZCLK_FREQ_MASK 0xf
|
||||
#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
|
||||
|
||||
/*
|
||||
* Palette regs
|
||||
*/
|
||||
|
@ -1797,6 +1814,9 @@
|
|||
*/
|
||||
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
|
||||
|
||||
#define VLV_CLK_CTL2 0x101104
|
||||
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
|
||||
|
||||
/*
|
||||
* Overlay regs
|
||||
*/
|
||||
|
@ -1848,7 +1868,8 @@
|
|||
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
|
||||
|
||||
/* HSW eDP PSR registers */
|
||||
#define EDP_PSR_CTL 0x64800
|
||||
#define EDP_PSR_BASE(dev) 0x64800
|
||||
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
|
||||
#define EDP_PSR_ENABLE (1<<31)
|
||||
#define EDP_PSR_LINK_DISABLE (0<<27)
|
||||
#define EDP_PSR_LINK_STANDBY (1<<27)
|
||||
|
@ -1871,16 +1892,16 @@
|
|||
#define EDP_PSR_TP1_TIME_0us (3<<4)
|
||||
#define EDP_PSR_IDLE_FRAME_SHIFT 0
|
||||
|
||||
#define EDP_PSR_AUX_CTL 0x64810
|
||||
#define EDP_PSR_AUX_DATA1 0x64814
|
||||
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
|
||||
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
|
||||
#define EDP_PSR_DPCD_COMMAND 0x80060000
|
||||
#define EDP_PSR_AUX_DATA2 0x64818
|
||||
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
|
||||
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
|
||||
#define EDP_PSR_AUX_DATA3 0x6481c
|
||||
#define EDP_PSR_AUX_DATA4 0x64820
|
||||
#define EDP_PSR_AUX_DATA5 0x64824
|
||||
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
|
||||
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
|
||||
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
|
||||
|
||||
#define EDP_PSR_STATUS_CTL 0x64840
|
||||
#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
|
||||
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
|
||||
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
|
||||
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
|
||||
|
@ -1904,10 +1925,10 @@
|
|||
#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
|
||||
#define EDP_PSR_STATUS_IDLE_MASK 0xf
|
||||
|
||||
#define EDP_PSR_PERF_CNT 0x64844
|
||||
#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
|
||||
#define EDP_PSR_PERF_CNT_MASK 0xffffff
|
||||
|
||||
#define EDP_PSR_DEBUG_CTL 0x64860
|
||||
#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
|
||||
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
|
||||
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
|
||||
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
|
||||
|
@ -4675,7 +4696,7 @@
|
|||
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
|
||||
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
|
||||
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
|
||||
#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
|
||||
#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
|
||||
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
|
||||
#define GEN6_RP_UP_THRESHOLD 0xA02C
|
||||
#define GEN6_RP_DOWN_THRESHOLD 0xA030
|
||||
|
@ -4720,6 +4741,10 @@
|
|||
GEN6_PM_RP_DOWN_TIMEOUT)
|
||||
|
||||
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
|
||||
#define VLV_COUNTER_CONTROL 0x138104
|
||||
#define VLV_COUNT_RANGE_HIGH (1<<15)
|
||||
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
|
||||
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
|
||||
#define GEN6_GT_GFX_RC6 0x138108
|
||||
#define GEN6_GT_GFX_RC6p 0x13810C
|
||||
#define GEN6_GT_GFX_RC6pp 0x138110
|
||||
|
|
|
@ -37,12 +37,30 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 raw_time; /* 32b value may overflow during fixed point math */
|
||||
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
|
||||
|
||||
if (!intel_enable_rc6(dev))
|
||||
return 0;
|
||||
|
||||
raw_time = I915_READ(reg) * 128ULL;
|
||||
return DIV_ROUND_UP_ULL(raw_time, 100000);
|
||||
/* On VLV, residency time is in CZ units rather than 1.28us */
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
u32 clkctl2;
|
||||
|
||||
clkctl2 = I915_READ(VLV_CLK_CTL2) >>
|
||||
CLK_CTL2_CZCOUNT_30NS_SHIFT;
|
||||
if (!clkctl2) {
|
||||
WARN(!clkctl2, "bogus CZ count value");
|
||||
return 0;
|
||||
}
|
||||
units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
|
||||
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
||||
units <<= 8;
|
||||
|
||||
div = 1000000ULL * bias;
|
||||
}
|
||||
|
||||
raw_time = I915_READ(reg) * units;
|
||||
return DIV_ROUND_UP_ULL(raw_time, div);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
|
|
@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
|
|||
TP_printk("dev=%d", __entry->dev)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_evict_vm,
|
||||
TP_PROTO(struct i915_address_space *vm),
|
||||
TP_ARGS(vm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct i915_address_space *, vm)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vm = vm;
|
||||
),
|
||||
|
||||
TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_PROTO(struct intel_ring_buffer *from,
|
||||
struct intel_ring_buffer *to,
|
||||
u32 seqno),
|
||||
TP_ARGS(from, to, seqno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, sync_from)
|
||||
__field(u32, sync_to)
|
||||
__field(u32, seqno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = from->dev->primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to->id;
|
||||
__entry->seqno = seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
|
||||
__entry->dev,
|
||||
__entry->sync_from, __entry->sync_to,
|
||||
__entry->seqno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_dispatch,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
|
||||
TP_ARGS(ring, seqno, flags),
|
||||
|
@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
|||
TP_ARGS(ring, seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
||||
TP_ARGS(ring, seqno)
|
||||
TRACE_EVENT(i915_gem_request_complete,
|
||||
TP_PROTO(struct intel_ring_buffer *ring),
|
||||
TP_ARGS(ring),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, ring)
|
||||
__field(u32, seqno)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->seqno = ring->get_seqno(ring, false);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u",
|
||||
__entry->dev, __entry->ring, __entry->seqno)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
|
||||
|
|
|
@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
struct sdvo_device_mapping *p_mapping;
|
||||
struct bdb_general_definitions *p_defs;
|
||||
struct child_device_config *p_child;
|
||||
union child_device_config *p_child;
|
||||
int i, child_device_num, count;
|
||||
u16 block_size;
|
||||
|
||||
|
@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
|
|||
count = 0;
|
||||
for (i = 0; i < child_device_num; i++) {
|
||||
p_child = &(p_defs->devices[i]);
|
||||
if (!p_child->device_type) {
|
||||
if (!p_child->old.device_type) {
|
||||
/* skip the device block if device type is invalid */
|
||||
continue;
|
||||
}
|
||||
if (p_child->slave_addr != SLAVE_ADDR1 &&
|
||||
p_child->slave_addr != SLAVE_ADDR2) {
|
||||
if (p_child->old.slave_addr != SLAVE_ADDR1 &&
|
||||
p_child->old.slave_addr != SLAVE_ADDR2) {
|
||||
/*
|
||||
* If the slave address is neither 0x70 nor 0x72,
|
||||
* it is not a SDVO device. Skip it.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
|
||||
p_child->dvo_port != DEVICE_PORT_DVOC) {
|
||||
if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
|
||||
p_child->old.dvo_port != DEVICE_PORT_DVOC) {
|
||||
/* skip the incorrect SDVO port */
|
||||
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
|
||||
continue;
|
||||
}
|
||||
DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
|
||||
" %s port\n",
|
||||
p_child->slave_addr,
|
||||
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
|
||||
p_child->old.slave_addr,
|
||||
(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
|
||||
"SDVOB" : "SDVOC");
|
||||
p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
|
||||
p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
|
||||
if (!p_mapping->initialized) {
|
||||
p_mapping->dvo_port = p_child->dvo_port;
|
||||
p_mapping->slave_addr = p_child->slave_addr;
|
||||
p_mapping->dvo_wiring = p_child->dvo_wiring;
|
||||
p_mapping->ddc_pin = p_child->ddc_pin;
|
||||
p_mapping->i2c_pin = p_child->i2c_pin;
|
||||
p_mapping->dvo_port = p_child->old.dvo_port;
|
||||
p_mapping->slave_addr = p_child->old.slave_addr;
|
||||
p_mapping->dvo_wiring = p_child->old.dvo_wiring;
|
||||
p_mapping->ddc_pin = p_child->old.ddc_pin;
|
||||
p_mapping->i2c_pin = p_child->old.i2c_pin;
|
||||
p_mapping->initialized = 1;
|
||||
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
|
||||
p_mapping->dvo_port,
|
||||
|
@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
|
|||
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
|
||||
"two SDVO device.\n");
|
||||
}
|
||||
if (p_child->slave2_addr) {
|
||||
if (p_child->old.slave2_addr) {
|
||||
/* Maybe this is a SDVO device with multiple inputs */
|
||||
/* And the mapping info is not added */
|
||||
DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
|
||||
|
@ -477,15 +477,13 @@ static void
|
|||
parse_driver_features(struct drm_i915_private *dev_priv,
|
||||
struct bdb_header *bdb)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct bdb_driver_features *driver;
|
||||
|
||||
driver = find_section(bdb, BDB_DRIVER_FEATURES);
|
||||
if (!driver)
|
||||
return;
|
||||
|
||||
if (SUPPORTS_EDP(dev) &&
|
||||
driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
|
||||
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
|
||||
dev_priv->vbt.edp_support = 1;
|
||||
|
||||
if (driver->dual_frequency)
|
||||
|
@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
|
||||
edp = find_section(bdb, BDB_EDP);
|
||||
if (!edp) {
|
||||
if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
|
||||
if (dev_priv->vbt.edp_support)
|
||||
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
|
||||
return;
|
||||
}
|
||||
|
@ -583,12 +581,135 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
|
||||
}
|
||||
|
||||
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
struct bdb_header *bdb)
|
||||
{
|
||||
union child_device_config *it, *child = NULL;
|
||||
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
|
||||
uint8_t hdmi_level_shift;
|
||||
int i, j;
|
||||
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
|
||||
uint8_t aux_channel;
|
||||
/* Each DDI port can have more than one value on the "DVO Port" field,
|
||||
* so look for all the possible values for each port and abort if more
|
||||
* than one is found. */
|
||||
int dvo_ports[][2] = {
|
||||
{DVO_PORT_HDMIA, DVO_PORT_DPA},
|
||||
{DVO_PORT_HDMIB, DVO_PORT_DPB},
|
||||
{DVO_PORT_HDMIC, DVO_PORT_DPC},
|
||||
{DVO_PORT_HDMID, DVO_PORT_DPD},
|
||||
{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
|
||||
};
|
||||
|
||||
/* Find the child device to use, abort if more than one found. */
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
it = dev_priv->vbt.child_dev + i;
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (dvo_ports[port][j] == -1)
|
||||
break;
|
||||
|
||||
if (it->common.dvo_port == dvo_ports[port][j]) {
|
||||
if (child) {
|
||||
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
|
||||
port_name(port));
|
||||
return;
|
||||
}
|
||||
child = it;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!child)
|
||||
return;
|
||||
|
||||
aux_channel = child->raw[25];
|
||||
|
||||
is_dvi = child->common.device_type & (1 << 4);
|
||||
is_dp = child->common.device_type & (1 << 2);
|
||||
is_crt = child->common.device_type & (1 << 0);
|
||||
is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
|
||||
is_edp = is_dp && (child->common.device_type & (1 << 12));
|
||||
|
||||
info->supports_dvi = is_dvi;
|
||||
info->supports_hdmi = is_hdmi;
|
||||
info->supports_dp = is_dp;
|
||||
|
||||
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
|
||||
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
|
||||
|
||||
if (is_edp && is_dvi)
|
||||
DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
|
||||
port_name(port));
|
||||
if (is_crt && port != PORT_E)
|
||||
DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
|
||||
if (is_crt && (is_dvi || is_dp))
|
||||
DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
|
||||
port_name(port));
|
||||
if (is_dvi && (port == PORT_A || port == PORT_E))
|
||||
DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
|
||||
if (!is_dvi && !is_dp && !is_crt)
|
||||
DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
|
||||
port_name(port));
|
||||
if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
|
||||
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
|
||||
|
||||
if (is_dvi) {
|
||||
if (child->common.ddc_pin == 0x05 && port != PORT_B)
|
||||
DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
|
||||
if (child->common.ddc_pin == 0x04 && port != PORT_C)
|
||||
DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
|
||||
if (child->common.ddc_pin == 0x06 && port != PORT_D)
|
||||
DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
|
||||
}
|
||||
|
||||
if (is_dp) {
|
||||
if (aux_channel == 0x40 && port != PORT_A)
|
||||
DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
|
||||
if (aux_channel == 0x10 && port != PORT_B)
|
||||
DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
|
||||
if (aux_channel == 0x20 && port != PORT_C)
|
||||
DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
|
||||
if (aux_channel == 0x30 && port != PORT_D)
|
||||
DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
|
||||
}
|
||||
|
||||
if (bdb->version >= 158) {
|
||||
/* The VBT HDMI level shift values match the table we have. */
|
||||
hdmi_level_shift = child->raw[7] & 0xF;
|
||||
if (hdmi_level_shift < 0xC) {
|
||||
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
|
||||
port_name(port),
|
||||
hdmi_level_shift);
|
||||
info->hdmi_level_shift = hdmi_level_shift;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
|
||||
struct bdb_header *bdb)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum port port;
|
||||
|
||||
if (!HAS_DDI(dev))
|
||||
return;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return;
|
||||
|
||||
if (bdb->version < 155)
|
||||
return;
|
||||
|
||||
for (port = PORT_A; port < I915_MAX_PORTS; port++)
|
||||
parse_ddi_port(dev_priv, port, bdb);
|
||||
}
|
||||
|
||||
static void
|
||||
parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
struct bdb_header *bdb)
|
||||
{
|
||||
struct bdb_general_definitions *p_defs;
|
||||
struct child_device_config *p_child, *child_dev_ptr;
|
||||
union child_device_config *p_child, *child_dev_ptr;
|
||||
int i, child_device_num, count;
|
||||
u16 block_size;
|
||||
|
||||
|
@ -616,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
|||
/* get the number of child device that is present */
|
||||
for (i = 0; i < child_device_num; i++) {
|
||||
p_child = &(p_defs->devices[i]);
|
||||
if (!p_child->device_type) {
|
||||
if (!p_child->common.device_type) {
|
||||
/* skip the device block if device type is invalid */
|
||||
continue;
|
||||
}
|
||||
|
@ -636,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
|||
count = 0;
|
||||
for (i = 0; i < child_device_num; i++) {
|
||||
p_child = &(p_defs->devices[i]);
|
||||
if (!p_child->device_type) {
|
||||
if (!p_child->common.device_type) {
|
||||
/* skip the device block if device type is invalid */
|
||||
continue;
|
||||
}
|
||||
|
@ -652,6 +773,7 @@ static void
|
|||
init_vbt_defaults(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum port port;
|
||||
|
||||
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
|
||||
|
||||
|
@ -670,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|||
dev_priv->vbt.lvds_use_ssc = 1;
|
||||
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
|
||||
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
|
||||
|
||||
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
|
||||
struct ddi_vbt_port_info *info =
|
||||
&dev_priv->vbt.ddi_port_info[port];
|
||||
|
||||
/* Recommended BSpec default: 800mV 0dB. */
|
||||
info->hdmi_level_shift = 6;
|
||||
|
||||
info->supports_dvi = (port != PORT_A && port != PORT_E);
|
||||
info->supports_hdmi = info->supports_dvi;
|
||||
info->supports_dp = (port != PORT_E);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
||||
|
@ -761,6 +895,7 @@ intel_parse_bios(struct drm_device *dev)
|
|||
parse_driver_features(dev_priv, bdb);
|
||||
parse_edp(dev_priv, bdb);
|
||||
parse_mipi(dev_priv, bdb);
|
||||
parse_ddi_ports(dev_priv, bdb);
|
||||
|
||||
if (bios)
|
||||
pci_unmap_rom(pdev, bios);
|
||||
|
|
|
@ -202,7 +202,10 @@ struct bdb_general_features {
|
|||
#define DEVICE_PORT_DVOB 0x01
|
||||
#define DEVICE_PORT_DVOC 0x02
|
||||
|
||||
struct child_device_config {
|
||||
/* We used to keep this struct but without any version control. We should avoid
|
||||
* using it in the future, but it should be safe to keep using it in the old
|
||||
* code. */
|
||||
struct old_child_dev_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 device_id[10]; /* ascii string */
|
||||
|
@ -224,6 +227,32 @@ struct child_device_config {
|
|||
u8 dvo_function;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* This one contains field offsets that are known to be common for all BDB
|
||||
* versions. Notice that the meaning of the contents contents may still change,
|
||||
* but at least the offsets are consistent. */
|
||||
struct common_child_dev_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 not_common1[12];
|
||||
u8 dvo_port;
|
||||
u8 not_common2[2];
|
||||
u8 ddc_pin;
|
||||
u16 edid_ptr;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* This field changes depending on the BDB version, so the most reliable way to
|
||||
* read it is by checking the BDB version and reading the raw pointer. */
|
||||
union child_device_config {
|
||||
/* This one is safe to be used anywhere, but the code should still check
|
||||
* the BDB version. */
|
||||
u8 raw[33];
|
||||
/* This one should only be kept for legacy code. */
|
||||
struct old_child_dev_config old;
|
||||
/* This one should also be safe to use anywhere, even without version
|
||||
* checks. */
|
||||
struct common_child_dev_config common;
|
||||
};
|
||||
|
||||
struct bdb_general_definitions {
|
||||
/* DDC GPIO */
|
||||
u8 crt_ddc_gmbus_pin;
|
||||
|
@ -249,7 +278,7 @@ struct bdb_general_definitions {
|
|||
* number = (block_size - sizeof(bdb_general_definitions))/
|
||||
* sizeof(child_device_config);
|
||||
*/
|
||||
struct child_device_config devices[0];
|
||||
union child_device_config devices[0];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct bdb_lvds_options {
|
||||
|
@ -619,6 +648,19 @@ int intel_parse_bios(struct drm_device *dev);
|
|||
#define PORT_IDPC 8
|
||||
#define PORT_IDPD 9
|
||||
|
||||
/* Possible values for the "DVO Port" field for versions >= 155: */
|
||||
#define DVO_PORT_HDMIA 0
|
||||
#define DVO_PORT_HDMIB 1
|
||||
#define DVO_PORT_HDMIC 2
|
||||
#define DVO_PORT_HDMID 3
|
||||
#define DVO_PORT_LVDS 4
|
||||
#define DVO_PORT_TV 5
|
||||
#define DVO_PORT_CRT 6
|
||||
#define DVO_PORT_DPB 7
|
||||
#define DVO_PORT_DPC 8
|
||||
#define DVO_PORT_DPD 9
|
||||
#define DVO_PORT_DPA 10
|
||||
|
||||
/* MIPI DSI panel info */
|
||||
struct bdb_mipi {
|
||||
u16 panel_id;
|
||||
|
|
|
@ -83,13 +83,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
tmp = I915_READ(crt->adpa_reg);
|
||||
|
||||
|
@ -103,14 +101,35 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
|
|||
else
|
||||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
return flags;
|
||||
}
|
||||
|
||||
static void intel_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
int dotclock;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
pipe_config->adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
static void hsw_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
intel_ddi_get_config(encoder, pipe_config);
|
||||
|
||||
pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
|
||||
DRM_MODE_FLAG_NHSYNC |
|
||||
DRM_MODE_FLAG_PVSYNC |
|
||||
DRM_MODE_FLAG_NVSYNC);
|
||||
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
}
|
||||
|
||||
/* Note: The caller is required to filter out dpms modes not supported by the
|
||||
|
@ -658,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
static void intel_crt_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -764,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||
if (!crt)
|
||||
return;
|
||||
|
||||
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
|
||||
if (!intel_connector) {
|
||||
kfree(crt);
|
||||
return;
|
||||
|
@ -804,7 +822,10 @@ void intel_crt_init(struct drm_device *dev)
|
|||
crt->base.mode_set = intel_crt_mode_set;
|
||||
crt->base.disable = intel_disable_crt;
|
||||
crt->base.enable = intel_enable_crt;
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
if (IS_HASWELL(dev))
|
||||
crt->base.get_config = hsw_crt_get_config;
|
||||
else
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
if (I915_HAS_HOTPLUG(dev))
|
||||
crt->base.hpd_pin = HPD_CRT;
|
||||
if (HAS_DDI(dev))
|
||||
|
|
|
@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
|
|||
0x80C30FFF, 0x000B0000,
|
||||
0x00FFFFFF, 0x00040006,
|
||||
0x80D75FFF, 0x000B0000,
|
||||
0x00FFFFFF, 0x00040006 /* HDMI parameters */
|
||||
};
|
||||
|
||||
static const u32 hsw_ddi_translations_fdi[] = {
|
||||
|
@ -55,7 +54,22 @@ static const u32 hsw_ddi_translations_fdi[] = {
|
|||
0x00C30FFF, 0x001E0000,
|
||||
0x00FFFFFF, 0x00060006,
|
||||
0x00D75FFF, 0x001E0000,
|
||||
0x00FFFFFF, 0x00040006 /* HDMI parameters */
|
||||
};
|
||||
|
||||
static const u32 hsw_ddi_translations_hdmi[] = {
|
||||
/* Idx NT mV diff T mV diff db */
|
||||
0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
|
||||
0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
|
||||
0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
|
||||
0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
|
||||
0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
|
||||
0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
|
||||
0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
|
||||
0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
|
||||
0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
|
||||
0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
|
||||
0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
|
||||
0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
|
||||
};
|
||||
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
|
@ -92,12 +106,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
|||
const u32 *ddi_translations = (port == PORT_E) ?
|
||||
hsw_ddi_translations_fdi :
|
||||
hsw_ddi_translations_dp;
|
||||
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
|
||||
for (i = 0, reg = DDI_BUF_TRANS(port);
|
||||
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
|
||||
I915_WRITE(reg, ddi_translations[i]);
|
||||
reg += 4;
|
||||
}
|
||||
/* Entry 9 is for HDMI: */
|
||||
for (i = 0; i < 2; i++) {
|
||||
I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
|
||||
reg += 4;
|
||||
}
|
||||
}
|
||||
|
||||
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
|
||||
|
@ -1246,8 +1266,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
|
|||
intel_dp_check_link_status(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
|
@ -1333,12 +1353,23 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
|||
struct drm_encoder *encoder;
|
||||
struct intel_connector *hdmi_connector = NULL;
|
||||
struct intel_connector *dp_connector = NULL;
|
||||
bool init_hdmi, init_dp;
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
|
||||
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
|
||||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
|
||||
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
|
||||
if (!init_dp && !init_hdmi) {
|
||||
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
|
||||
port_name(port));
|
||||
init_hdmi = true;
|
||||
init_dp = true;
|
||||
}
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
return;
|
||||
|
||||
dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
dp_connector = kzalloc(sizeof(*dp_connector), GFP_KERNEL);
|
||||
if (!dp_connector) {
|
||||
kfree(intel_dig_port);
|
||||
return;
|
||||
|
@ -1370,19 +1401,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
|||
intel_encoder->cloneable = false;
|
||||
intel_encoder->hot_plug = intel_ddi_hot_plug;
|
||||
|
||||
if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
|
||||
if (init_dp && !intel_dp_init_connector(intel_dig_port, dp_connector)) {
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_dig_port);
|
||||
kfree(dp_connector);
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_encoder->type != INTEL_OUTPUT_EDP) {
|
||||
hdmi_connector = kzalloc(sizeof(struct intel_connector),
|
||||
/* In theory we don't need the encoder->type check, but leave it just in
|
||||
* case we have some really bad VBTs... */
|
||||
if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
|
||||
hdmi_connector = kzalloc(sizeof(*hdmi_connector),
|
||||
GFP_KERNEL);
|
||||
if (!hdmi_connector) {
|
||||
if (!hdmi_connector)
|
||||
return;
|
||||
}
|
||||
|
||||
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
|
||||
intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#include <drm/drm_crtc_helper.h>
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
|
||||
static void intel_increase_pllclock(struct drm_crtc *crtc);
|
||||
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
|
||||
|
||||
|
@ -336,6 +335,21 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
|
|||
.p2_slow = 2, .p2_fast = 20 },
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns whether any output on the specified pipe is of the specified type
|
||||
*/
|
||||
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
if (encoder->type == type)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
|
||||
int refclk)
|
||||
{
|
||||
|
@ -438,21 +452,6 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
|
|||
clock->dot = clock->vco / clock->p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether any output on the specified pipe is of the specified type
|
||||
*/
|
||||
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
if (encoder->type == type)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
|
||||
/**
|
||||
* Returns whether the given set of divisors are valid for a given refclk with
|
||||
|
@ -696,29 +695,30 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|||
p = p1 * p2;
|
||||
/* based on hardware requirement, prefer bigger m1,m2 values */
|
||||
for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
|
||||
m2 = (((2*(fastclk * p * n / m1 )) +
|
||||
refclk) / (2*refclk));
|
||||
m2 = DIV_ROUND_CLOSEST(fastclk * p * n, refclk * m1);
|
||||
m = m1 * m2;
|
||||
vco = updrate * m;
|
||||
if (vco >= limit->vco.min && vco < limit->vco.max) {
|
||||
ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
|
||||
absppm = (ppm > 0) ? ppm : (-ppm);
|
||||
if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
|
||||
bestppm = 0;
|
||||
flag = 1;
|
||||
}
|
||||
if (absppm < bestppm - 10) {
|
||||
bestppm = absppm;
|
||||
flag = 1;
|
||||
}
|
||||
if (flag) {
|
||||
bestn = n;
|
||||
bestm1 = m1;
|
||||
bestm2 = m2;
|
||||
bestp1 = p1;
|
||||
bestp2 = p2;
|
||||
flag = 0;
|
||||
}
|
||||
|
||||
if (vco < limit->vco.min || vco >= limit->vco.max)
|
||||
continue;
|
||||
|
||||
ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
|
||||
absppm = (ppm > 0) ? ppm : (-ppm);
|
||||
if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
|
||||
bestppm = 0;
|
||||
flag = 1;
|
||||
}
|
||||
if (absppm < bestppm - 10) {
|
||||
bestppm = absppm;
|
||||
flag = 1;
|
||||
}
|
||||
if (flag) {
|
||||
bestn = n;
|
||||
bestm1 = m1;
|
||||
bestm2 = m2;
|
||||
bestp1 = p1;
|
||||
bestp2 = p2;
|
||||
flag = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -740,14 +740,14 @@ bool intel_crtc_active(struct drm_crtc *crtc)
|
|||
/* Be paranoid as we can arrive here with only partial
|
||||
* state retrieved from the hardware during setup.
|
||||
*
|
||||
* We can ditch the adjusted_mode.clock check as soon
|
||||
* We can ditch the adjusted_mode.crtc_clock check as soon
|
||||
* as Haswell has gained clock readout/fastboot support.
|
||||
*
|
||||
* We can ditch the crtc->fb check as soon as we can
|
||||
* properly reconstruct framebuffers.
|
||||
*/
|
||||
return intel_crtc->active && crtc->fb &&
|
||||
intel_crtc->config.adjusted_mode.clock;
|
||||
intel_crtc->config.adjusted_mode.crtc_clock;
|
||||
}
|
||||
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
|
@ -1360,6 +1360,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
|
|||
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
|
||||
}
|
||||
|
||||
static void intel_init_dpio(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
|
||||
/*
|
||||
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
|
||||
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
|
||||
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
|
||||
* b. The other bits such as sfr settings / modesel may all be set
|
||||
* to 0.
|
||||
*
|
||||
* This should only be done on init and resume from S3 with both
|
||||
* PLLs disabled, or we risk losing DPIO and PLL synchronization.
|
||||
*/
|
||||
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
|
||||
}
|
||||
|
||||
static void vlv_enable_pll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
|
@ -1466,6 +1486,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
POSTING_READ(DPLL(pipe));
|
||||
}
|
||||
|
||||
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
/* Make sure the pipe isn't still relying on us */
|
||||
assert_pipe_disabled(dev_priv, pipe);
|
||||
|
||||
/* Leave integrated clock source enabled */
|
||||
if (pipe == PIPE_B)
|
||||
val = DPLL_INTEGRATED_CRI_CLK_VLV;
|
||||
I915_WRITE(DPLL(pipe), val);
|
||||
POSTING_READ(DPLL(pipe));
|
||||
}
|
||||
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
|
||||
{
|
||||
u32 port_mask;
|
||||
|
@ -2286,11 +2320,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Update pipe size and adjust fitter if needed */
|
||||
/*
|
||||
* Update pipe size and adjust fitter if needed: the reason for this is
|
||||
* that in compute_mode_changes we check the native mode (not the pfit
|
||||
* mode) to see if we can flip rather than do a full mode set. In the
|
||||
* fastboot case, we'll flip, but if we don't update the pipesrc and
|
||||
* pfit state, we'll end up with a big fb scanned out into the wrong
|
||||
* sized surface.
|
||||
*
|
||||
* To fix this properly, we need to hoist the checks up into
|
||||
* compute_mode_changes (or above), check the actual pfit state and
|
||||
* whether the platform allows pfit disable with pipe active, and only
|
||||
* then update the pipesrc and pfit state, even on the flip path.
|
||||
*/
|
||||
if (i915_fastboot) {
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
|
||||
I915_WRITE(PIPESRC(intel_crtc->pipe),
|
||||
((crtc->mode.hdisplay - 1) << 16) |
|
||||
(crtc->mode.vdisplay - 1));
|
||||
((adjusted_mode->crtc_hdisplay - 1) << 16) |
|
||||
(adjusted_mode->crtc_vdisplay - 1));
|
||||
if (!intel_crtc->config.pch_pfit.enabled &&
|
||||
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
|
||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
|
||||
|
@ -2914,7 +2963,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
|
|||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
|
||||
u32 divsel, phaseinc, auxdiv, phasedir = 0;
|
||||
u32 temp;
|
||||
|
||||
|
@ -2938,8 +2987,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
|
|||
phaseinc = 0x20;
|
||||
} else {
|
||||
/* The iCLK virtual clock root frequency is in MHz,
|
||||
* but the adjusted_mode->clock in in KHz. To get the divisors,
|
||||
* it is necessary to divide one by another, so we
|
||||
* but the adjusted_mode->crtc_clock in in KHz. To get the
|
||||
* divisors, it is necessary to divide one by another, so we
|
||||
* convert the virtual clock precision to KHz here for higher
|
||||
* precision.
|
||||
*/
|
||||
|
@ -3283,6 +3332,84 @@ static void intel_disable_planes(struct drm_crtc *crtc)
|
|||
intel_plane_disable(&intel_plane->base);
|
||||
}
|
||||
|
||||
static void hsw_enable_ips(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
|
||||
if (!crtc->config.ips_enabled)
|
||||
return;
|
||||
|
||||
/* We can only enable IPS after we enable a plane and wait for a vblank.
|
||||
* We guarantee that the plane is enabled by calling intel_enable_ips
|
||||
* only after intel_enable_plane. And intel_enable_plane already waits
|
||||
* for a vblank, so all we need to do here is to enable the IPS bit. */
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
I915_WRITE(IPS_CTL, IPS_ENABLE);
|
||||
}
|
||||
|
||||
static void hsw_disable_ips(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!crtc->config.ips_enabled)
|
||||
return;
|
||||
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
I915_WRITE(IPS_CTL, 0);
|
||||
POSTING_READ(IPS_CTL);
|
||||
|
||||
/* We need to wait for a vblank before we can disable the plane. */
|
||||
intel_wait_for_vblank(dev, crtc->pipe);
|
||||
}
|
||||
|
||||
/** Loads the palette/gamma unit for the CRTC with the prepared values */
|
||||
static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int palreg = PALETTE(pipe);
|
||||
int i;
|
||||
bool reenable_ips = false;
|
||||
|
||||
/* The clocks have to be on to load the palette. */
|
||||
if (!crtc->enabled || !intel_crtc->active)
|
||||
return;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
|
||||
assert_dsi_pll_enabled(dev_priv);
|
||||
else
|
||||
assert_pll_enabled(dev_priv, pipe);
|
||||
}
|
||||
|
||||
/* use legacy palette for Ironlake */
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
palreg = LGC_PALETTE(pipe);
|
||||
|
||||
/* Workaround : Do not read or write the pipe palette/gamma data while
|
||||
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
|
||||
*/
|
||||
if (intel_crtc->config.ips_enabled &&
|
||||
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
|
||||
GAMMA_MODE_MODE_SPLIT)) {
|
||||
hsw_disable_ips(intel_crtc);
|
||||
reenable_ips = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
I915_WRITE(palreg + 4 * i,
|
||||
(intel_crtc->lut_r[i] << 16) |
|
||||
(intel_crtc->lut_g[i] << 8) |
|
||||
intel_crtc->lut_b[i]);
|
||||
}
|
||||
|
||||
if (reenable_ips)
|
||||
hsw_enable_ips(intel_crtc);
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -3361,35 +3488,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
|
|||
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
|
||||
}
|
||||
|
||||
static void hsw_enable_ips(struct intel_crtc *crtc)
|
||||
static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
if (!crtc->config.ips_enabled)
|
||||
return;
|
||||
intel_enable_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
||||
/* We can only enable IPS after we enable a plane and wait for a vblank.
|
||||
* We guarantee that the plane is enabled by calling intel_enable_ips
|
||||
* only after intel_enable_plane. And intel_enable_plane already waits
|
||||
* for a vblank, so all we need to do here is to enable the IPS bit. */
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
I915_WRITE(IPS_CTL, IPS_ENABLE);
|
||||
hsw_enable_ips(intel_crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_update_fbc(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void hsw_disable_ips(struct intel_crtc *crtc)
|
||||
static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
drm_vblank_off(dev, pipe);
|
||||
|
||||
/* FBC must be disabled before disabling the plane on HSW. */
|
||||
if (dev_priv->fbc.plane == plane)
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
hsw_disable_ips(intel_crtc);
|
||||
|
||||
intel_crtc_update_cursor(crtc, false);
|
||||
intel_disable_planes(crtc);
|
||||
intel_disable_plane(dev_priv, plane, pipe);
|
||||
}
|
||||
|
||||
/*
|
||||
* This implements the workaround described in the "notes" section of the mode
|
||||
* set sequence documentation. When going from no pipes or single pipe to
|
||||
* multiple pipes, and planes are enabled after the pipe, we need to wait at
|
||||
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
|
||||
*/
|
||||
static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc_it, *other_active_crtc = NULL;
|
||||
|
||||
if (!crtc->config.ips_enabled)
|
||||
/* We want to get the other_active_crtc only if there's only 1 other
|
||||
* active crtc. */
|
||||
list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
|
||||
if (!crtc_it->active || crtc_it == crtc)
|
||||
continue;
|
||||
|
||||
if (other_active_crtc)
|
||||
return;
|
||||
|
||||
other_active_crtc = crtc_it;
|
||||
}
|
||||
if (!other_active_crtc)
|
||||
return;
|
||||
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
I915_WRITE(IPS_CTL, 0);
|
||||
POSTING_READ(IPS_CTL);
|
||||
|
||||
/* We need to wait for a vblank before we can disable the plane. */
|
||||
intel_wait_for_vblank(dev, crtc->pipe);
|
||||
intel_wait_for_vblank(dev, other_active_crtc->pipe);
|
||||
intel_wait_for_vblank(dev, other_active_crtc->pipe);
|
||||
}
|
||||
|
||||
static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
|
@ -3399,7 +3565,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
WARN_ON(!crtc->enabled);
|
||||
|
||||
|
@ -3435,24 +3600,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|||
intel_update_watermarks(crtc);
|
||||
intel_enable_pipe(dev_priv, pipe,
|
||||
intel_crtc->config.has_pch_encoder, false);
|
||||
intel_enable_plane(dev_priv, plane, pipe);
|
||||
intel_enable_planes(crtc);
|
||||
intel_crtc_update_cursor(crtc, true);
|
||||
|
||||
hsw_enable_ips(intel_crtc);
|
||||
|
||||
if (intel_crtc->config.has_pch_encoder)
|
||||
lpt_pch_enable(crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_update_fbc(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
encoder->enable(encoder);
|
||||
intel_opregion_notify_encoder(encoder, true);
|
||||
}
|
||||
|
||||
/* If we change the relative order between pipe/planes enabling, we need
|
||||
* to change the workaround. */
|
||||
haswell_mode_set_planes_workaround(intel_crtc);
|
||||
haswell_crtc_enable_planes(crtc);
|
||||
|
||||
/*
|
||||
* There seems to be a race in PCH platform hw (at least on some
|
||||
* outputs) where an enabled pipe still completes any pageflip right
|
||||
|
@ -3559,30 +3720,18 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
|
||||
|
||||
if (!intel_crtc->active)
|
||||
return;
|
||||
|
||||
haswell_crtc_disable_planes(crtc);
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
intel_opregion_notify_encoder(encoder, false);
|
||||
encoder->disable(encoder);
|
||||
}
|
||||
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
drm_vblank_off(dev, pipe);
|
||||
|
||||
/* FBC must be disabled before disabling the plane on HSW. */
|
||||
if (dev_priv->fbc.plane == plane)
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
hsw_disable_ips(intel_crtc);
|
||||
|
||||
intel_crtc_update_cursor(crtc, false);
|
||||
intel_disable_planes(crtc);
|
||||
intel_disable_plane(dev_priv, plane, pipe);
|
||||
|
||||
if (intel_crtc->config.has_pch_encoder)
|
||||
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
|
||||
intel_disable_pipe(dev_priv, pipe);
|
||||
|
@ -3828,7 +3977,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
|||
if (encoder->post_disable)
|
||||
encoder->post_disable(encoder);
|
||||
|
||||
if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
|
||||
if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
|
||||
vlv_disable_pll(dev_priv, pipe);
|
||||
else if (!IS_VALLEYVIEW(dev))
|
||||
i9xx_disable_pll(dev_priv, pipe);
|
||||
|
||||
intel_crtc->active = false;
|
||||
|
@ -4102,7 +4253,7 @@ retry:
|
|||
*/
|
||||
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
|
||||
|
||||
fdi_dotclock = adjusted_mode->clock;
|
||||
fdi_dotclock = adjusted_mode->crtc_clock;
|
||||
|
||||
lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
|
||||
pipe_config->pipe_bpp);
|
||||
|
@ -4158,12 +4309,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
|
|||
* otherwise pipe A only.
|
||||
*/
|
||||
if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
|
||||
adjusted_mode->clock > clock_limit * 9 / 10) {
|
||||
adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
|
||||
clock_limit *= 2;
|
||||
pipe_config->double_wide = true;
|
||||
}
|
||||
|
||||
if (adjusted_mode->clock > clock_limit * 9 / 10)
|
||||
if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -4568,9 +4719,9 @@ static void vlv_update_pll(struct intel_crtc *crtc)
|
|||
/* Enable DPIO clock input */
|
||||
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
|
||||
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
|
||||
if (pipe)
|
||||
/* We should never disable this, set it here for state tracking */
|
||||
if (pipe == PIPE_B)
|
||||
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
|
||||
|
||||
dpll |= DPLL_VCO_ENABLE;
|
||||
crtc->config.dpll_hw_state.dpll = dpll;
|
||||
|
||||
|
@ -4823,7 +4974,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
|
|||
|
||||
crtc->mode.flags = pipe_config->adjusted_mode.flags;
|
||||
|
||||
crtc->mode.clock = pipe_config->adjusted_mode.clock;
|
||||
crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
|
||||
crtc->mode.flags |= pipe_config->adjusted_mode.flags;
|
||||
}
|
||||
|
||||
|
@ -4918,9 +5069,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
|||
num_connectors++;
|
||||
}
|
||||
|
||||
refclk = i9xx_get_refclk(crtc, num_connectors);
|
||||
if (is_dsi)
|
||||
goto skip_dpll;
|
||||
|
||||
if (!intel_crtc->config.clock_set) {
|
||||
refclk = i9xx_get_refclk(crtc, num_connectors);
|
||||
|
||||
if (!is_dsi && !intel_crtc->config.clock_set) {
|
||||
/*
|
||||
* Returns a set of divisors for the desired target clock with
|
||||
* the given refclk, or FALSE. The returned values represent
|
||||
|
@ -4931,28 +5085,25 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
|||
ok = dev_priv->display.find_dpll(limit, crtc,
|
||||
intel_crtc->config.port_clock,
|
||||
refclk, NULL, &clock);
|
||||
if (!ok && !intel_crtc->config.clock_set) {
|
||||
if (!ok) {
|
||||
DRM_ERROR("Couldn't find PLL settings for mode!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_lvds && dev_priv->lvds_downclock_avail) {
|
||||
/*
|
||||
* Ensure we match the reduced clock's P to the target clock.
|
||||
* If the clocks don't match, we can't switch the display clock
|
||||
* by using the FP0/FP1. In such case we will disable the LVDS
|
||||
* downclock feature.
|
||||
*/
|
||||
limit = intel_limit(crtc, refclk);
|
||||
has_reduced_clock =
|
||||
dev_priv->display.find_dpll(limit, crtc,
|
||||
dev_priv->lvds_downclock,
|
||||
refclk, &clock,
|
||||
&reduced_clock);
|
||||
}
|
||||
/* Compat-code for transition, will disappear. */
|
||||
if (!intel_crtc->config.clock_set) {
|
||||
if (is_lvds && dev_priv->lvds_downclock_avail) {
|
||||
/*
|
||||
* Ensure we match the reduced clock's P to the target
|
||||
* clock. If the clocks don't match, we can't switch
|
||||
* the display clock by using the FP0/FP1. In such case
|
||||
* we will disable the LVDS downclock feature.
|
||||
*/
|
||||
has_reduced_clock =
|
||||
dev_priv->display.find_dpll(limit, crtc,
|
||||
dev_priv->lvds_downclock,
|
||||
refclk, &clock,
|
||||
&reduced_clock);
|
||||
}
|
||||
/* Compat-code for transition, will disappear. */
|
||||
intel_crtc->config.dpll.n = clock.n;
|
||||
intel_crtc->config.dpll.m1 = clock.m1;
|
||||
intel_crtc->config.dpll.m2 = clock.m2;
|
||||
|
@ -4965,14 +5116,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
|||
has_reduced_clock ? &reduced_clock : NULL,
|
||||
num_connectors);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
if (!is_dsi)
|
||||
vlv_update_pll(intel_crtc);
|
||||
vlv_update_pll(intel_crtc);
|
||||
} else {
|
||||
i9xx_update_pll(intel_crtc,
|
||||
has_reduced_clock ? &reduced_clock : NULL,
|
||||
num_connectors);
|
||||
}
|
||||
|
||||
skip_dpll:
|
||||
/* Set up the display plane register */
|
||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||
|
||||
|
@ -5030,6 +5181,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
|
|||
I915_READ(LVDS) & LVDS_BORDER_ENABLE;
|
||||
}
|
||||
|
||||
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe = pipe_config->cpu_transcoder;
|
||||
intel_clock_t clock;
|
||||
u32 mdiv;
|
||||
int refclk = 100000;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
|
||||
clock.m2 = mdiv & DPIO_M2DIV_MASK;
|
||||
clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
|
||||
clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
|
||||
clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
|
||||
|
||||
clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
|
||||
clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
|
||||
|
||||
pipe_config->port_clock = clock.dot / 10;
|
||||
}
|
||||
|
||||
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
|
@ -5095,7 +5272,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
|||
DPLL_PORTB_READY_MASK);
|
||||
}
|
||||
|
||||
i9xx_crtc_clock_get(crtc, pipe_config);
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
vlv_crtc_clock_get(crtc, pipe_config);
|
||||
else
|
||||
i9xx_crtc_clock_get(crtc, pipe_config);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -6111,8 +6291,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|||
* register. Callers should take care of disabling all the display engine
|
||||
* functions, doing the mode unset, fixing interrupts, etc.
|
||||
*/
|
||||
void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
||||
bool switch_to_fclk, bool allow_power_down)
|
||||
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
||||
bool switch_to_fclk, bool allow_power_down)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
|
@ -6162,7 +6342,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
|||
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
|
||||
* source.
|
||||
*/
|
||||
void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
||||
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
|
@ -6787,53 +6967,6 @@ void intel_write_eld(struct drm_encoder *encoder,
|
|||
dev_priv->display.write_eld(connector, crtc);
|
||||
}
|
||||
|
||||
/** Loads the palette/gamma unit for the CRTC with the prepared values */
|
||||
void intel_crtc_load_lut(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int palreg = PALETTE(pipe);
|
||||
int i;
|
||||
bool reenable_ips = false;
|
||||
|
||||
/* The clocks have to be on to load the palette. */
|
||||
if (!crtc->enabled || !intel_crtc->active)
|
||||
return;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
|
||||
assert_dsi_pll_enabled(dev_priv);
|
||||
else
|
||||
assert_pll_enabled(dev_priv, pipe);
|
||||
}
|
||||
|
||||
/* use legacy palette for Ironlake */
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
palreg = LGC_PALETTE(pipe);
|
||||
|
||||
/* Workaround : Do not read or write the pipe palette/gamma data while
|
||||
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
|
||||
*/
|
||||
if (intel_crtc->config.ips_enabled &&
|
||||
((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
|
||||
GAMMA_MODE_MODE_SPLIT)) {
|
||||
hsw_disable_ips(intel_crtc);
|
||||
reenable_ips = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
I915_WRITE(palreg + 4 * i,
|
||||
(intel_crtc->lut_r[i] << 16) |
|
||||
(intel_crtc->lut_g[i] << 8) |
|
||||
intel_crtc->lut_b[i]);
|
||||
}
|
||||
|
||||
if (reenable_ips)
|
||||
hsw_enable_ips(intel_crtc);
|
||||
}
|
||||
|
||||
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -7103,27 +7236,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** Sets the color ramps on behalf of RandR */
|
||||
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
intel_crtc->lut_r[regno] = red >> 8;
|
||||
intel_crtc->lut_g[regno] = green >> 8;
|
||||
intel_crtc->lut_b[regno] = blue >> 8;
|
||||
}
|
||||
|
||||
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
*red = intel_crtc->lut_r[regno] << 8;
|
||||
*green = intel_crtc->lut_g[regno] << 8;
|
||||
*blue = intel_crtc->lut_b[regno] << 8;
|
||||
}
|
||||
|
||||
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, uint32_t start, uint32_t size)
|
||||
{
|
||||
|
@ -7466,7 +7578,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
|||
|
||||
/*
|
||||
* This value includes pixel_multiplier. We will use
|
||||
* port_clock to compute adjusted_mode.clock in the
|
||||
* port_clock to compute adjusted_mode.crtc_clock in the
|
||||
* encoder's get_config() function.
|
||||
*/
|
||||
pipe_config->port_clock = clock.dot;
|
||||
|
@ -7501,11 +7613,11 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
|
|||
|
||||
/*
|
||||
* This value does not include pixel_multiplier.
|
||||
* We will check that port_clock and adjusted_mode.clock
|
||||
* We will check that port_clock and adjusted_mode.crtc_clock
|
||||
* agree once we know their relationship in the encoder's
|
||||
* get_config() function.
|
||||
*/
|
||||
pipe_config->adjusted_mode.clock =
|
||||
pipe_config->adjusted_mode.crtc_clock =
|
||||
intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
|
||||
&pipe_config->fdi_m_n);
|
||||
}
|
||||
|
@ -7543,7 +7655,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
|||
pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
|
||||
i9xx_crtc_clock_get(intel_crtc, &pipe_config);
|
||||
|
||||
mode->clock = pipe_config.adjusted_mode.clock;
|
||||
mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
|
||||
mode->hdisplay = (htot & 0xffff) + 1;
|
||||
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
|
||||
mode->hsync_start = (hsync & 0xffff) + 1;
|
||||
|
@ -7649,6 +7761,9 @@ void intel_mark_idle(struct drm_device *dev)
|
|||
|
||||
intel_decrease_pllclock(crtc);
|
||||
}
|
||||
|
||||
if (dev_priv->info->gen >= 6)
|
||||
gen6_rps_idle(dev->dev_private);
|
||||
}
|
||||
|
||||
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
||||
|
@ -8097,7 +8212,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
fb->pitches[0] != crtc->fb->pitches[0]))
|
||||
return -EINVAL;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -8336,7 +8451,7 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
|
|||
{
|
||||
DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
|
||||
"type: 0x%x flags: 0x%x\n",
|
||||
mode->clock,
|
||||
mode->crtc_clock,
|
||||
mode->crtc_hdisplay, mode->crtc_hsync_start,
|
||||
mode->crtc_hsync_end, mode->crtc_htotal,
|
||||
mode->crtc_vdisplay, mode->crtc_vsync_start,
|
||||
|
@ -8426,9 +8541,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
|
|||
drm_mode_copy(&pipe_config->adjusted_mode, mode);
|
||||
drm_mode_copy(&pipe_config->requested_mode, mode);
|
||||
|
||||
pipe_config->pipe_src_w = mode->hdisplay;
|
||||
pipe_config->pipe_src_h = mode->vdisplay;
|
||||
|
||||
pipe_config->cpu_transcoder =
|
||||
(enum transcoder) to_intel_crtc(crtc)->pipe;
|
||||
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
|
||||
|
@ -8455,13 +8567,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
|
|||
if (plane_bpp < 0)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Determine the real pipe dimensions. Note that stereo modes can
|
||||
* increase the actual pipe size due to the frame doubling and
|
||||
* insertion of additional space for blanks between the frame. This
|
||||
* is stored in the crtc timings. We use the requested mode to do this
|
||||
* computation to clearly distinguish it from the adjusted mode, which
|
||||
* can be changed by the connectors in the below retry loop.
|
||||
*/
|
||||
drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
|
||||
pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
|
||||
pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
|
||||
|
||||
encoder_retry:
|
||||
/* Ensure the port clock defaults are reset when retrying. */
|
||||
pipe_config->port_clock = 0;
|
||||
pipe_config->pixel_multiplier = 1;
|
||||
|
||||
/* Fill in default crtc timings, allow encoders to overwrite them. */
|
||||
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
|
||||
drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
|
||||
|
||||
/* Pass our mode to the connectors and the CRTC to give them a chance to
|
||||
* adjust it according to limitations or connector properties, and also
|
||||
|
@ -8482,8 +8606,8 @@ encoder_retry:
|
|||
/* Set default port clock if not overwritten by the encoder. Needs to be
|
||||
* done afterwards in case the encoder adjusts the mode. */
|
||||
if (!pipe_config->port_clock)
|
||||
pipe_config->port_clock = pipe_config->adjusted_mode.clock *
|
||||
pipe_config->pixel_multiplier;
|
||||
pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
|
||||
* pipe_config->pixel_multiplier;
|
||||
|
||||
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
|
||||
if (ret < 0) {
|
||||
|
@ -8813,7 +8937,7 @@ intel_pipe_config_compare(struct drm_device *dev,
|
|||
PIPE_CONF_CHECK_I(pipe_bpp);
|
||||
|
||||
if (!IS_HASWELL(dev)) {
|
||||
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.clock);
|
||||
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
|
||||
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
|
||||
}
|
||||
|
||||
|
@ -9035,9 +9159,9 @@ void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config
|
|||
* FDI already provided one idea for the dotclock.
|
||||
* Yell if the encoder disagrees.
|
||||
*/
|
||||
WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.clock, dotclock),
|
||||
WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
|
||||
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
|
||||
pipe_config->adjusted_mode.clock, dotclock);
|
||||
pipe_config->adjusted_mode.crtc_clock, dotclock);
|
||||
}
|
||||
|
||||
static int __intel_set_mode(struct drm_crtc *crtc,
|
||||
|
@ -9052,7 +9176,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
|
|||
unsigned disable_pipes, prepare_pipes, modeset_pipes;
|
||||
int ret = 0;
|
||||
|
||||
saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
|
||||
saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
|
||||
if (!saved_mode)
|
||||
return -ENOMEM;
|
||||
saved_hwmode = saved_mode + 1;
|
||||
|
@ -9591,7 +9715,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
|||
struct intel_crtc *intel_crtc;
|
||||
int i;
|
||||
|
||||
intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
|
||||
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
|
||||
if (intel_crtc == NULL)
|
||||
return;
|
||||
|
||||
|
@ -10270,10 +10394,19 @@ void i915_disable_vga_mem(struct drm_device *dev)
|
|||
|
||||
void intel_modeset_init_hw(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
intel_prepare_ddi(dev);
|
||||
|
||||
intel_init_clock_gating(dev);
|
||||
|
||||
/* Enable the CRI clock source so we can get at the display */
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
|
||||
DPLL_INTEGRATED_CRI_CLK_VLV);
|
||||
|
||||
intel_init_dpio(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_enable_gt_powersave(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -10636,7 +10769,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
struct drm_plane *plane;
|
||||
struct intel_crtc *crtc;
|
||||
struct intel_encoder *encoder;
|
||||
int i;
|
||||
|
@ -10684,6 +10816,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
}
|
||||
|
||||
if (force_restore) {
|
||||
i915_redisable_vga(dev);
|
||||
|
||||
/*
|
||||
* We need to use raw interfaces for restoring state to avoid
|
||||
* checking (bogus) intermediate states.
|
||||
|
@ -10695,10 +10829,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
|
|||
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
|
||||
crtc->fb);
|
||||
}
|
||||
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
|
||||
intel_plane_restore(plane);
|
||||
|
||||
i915_redisable_vga(dev);
|
||||
} else {
|
||||
intel_modeset_update_staged_output_state(dev);
|
||||
}
|
||||
|
@ -10721,6 +10851,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_connector *connector;
|
||||
|
||||
/*
|
||||
* Interrupts and polling as the first thing to avoid creating havoc.
|
||||
|
@ -10763,6 +10894,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
/* destroy backlight, if any, before the connectors */
|
||||
intel_panel_destroy_backlight(dev);
|
||||
|
||||
/* destroy the sysfs files before encoders/connectors */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
|
||||
drm_sysfs_connector_remove(connector);
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
intel_cleanup_overlay(dev);
|
||||
|
|
|
@ -59,7 +59,7 @@ static const struct dp_link_dpll pch_dpll[] = {
|
|||
|
||||
static const struct dp_link_dpll vlv_dpll[] = {
|
||||
{ DP_LINK_BW_1_62,
|
||||
{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 5, .m2 = 3 } },
|
||||
{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
|
||||
{ DP_LINK_BW_2_7,
|
||||
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
|
||||
};
|
||||
|
@ -654,7 +654,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
|||
break;
|
||||
}
|
||||
|
||||
for (retry = 0; retry < 5; retry++) {
|
||||
/*
|
||||
* DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
|
||||
* required to retry at least seven times upon receiving AUX_DEFER
|
||||
* before giving up the AUX transaction.
|
||||
*/
|
||||
for (retry = 0; retry < 7; retry++) {
|
||||
ret = intel_dp_aux_ch(intel_dp,
|
||||
msg, msg_bytes,
|
||||
reply, reply_bytes);
|
||||
|
@ -811,7 +816,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
|
||||
DRM_DEBUG_KMS("DP link computation with max lane count %i "
|
||||
"max bw %02x pixel clock %iKHz\n",
|
||||
max_lane_count, bws[max_clock], adjusted_mode->clock);
|
||||
max_lane_count, bws[max_clock],
|
||||
adjusted_mode->crtc_clock);
|
||||
|
||||
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
|
||||
* bpc in between. */
|
||||
|
@ -823,7 +829,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
for (; bpp >= 6*3; bpp -= 2*3) {
|
||||
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
|
||||
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
|
||||
bpp);
|
||||
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
|
@ -868,7 +875,8 @@ found:
|
|||
mode_rate, link_avail);
|
||||
|
||||
intel_link_compute_m_n(bpp, lane_count,
|
||||
adjusted_mode->clock, pipe_config->port_clock,
|
||||
adjusted_mode->crtc_clock,
|
||||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
|
||||
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
|
||||
|
@ -1466,23 +1474,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
pipe_config->adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
static bool is_edp_psr(struct intel_dp *intel_dp)
|
||||
static bool is_edp_psr(struct drm_device *dev)
|
||||
{
|
||||
return is_edp(intel_dp) &&
|
||||
intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return dev_priv->psr.sink_support;
|
||||
}
|
||||
|
||||
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_HASWELL(dev))
|
||||
if (!HAS_PSR(dev))
|
||||
return false;
|
||||
|
||||
return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
|
||||
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
|
||||
}
|
||||
|
||||
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
|
||||
|
@ -1532,7 +1541,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
|||
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
|
||||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD);
|
||||
|
||||
intel_dp->psr_setup_done = true;
|
||||
|
@ -1557,9 +1566,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
|
|||
DP_PSR_MAIN_LINK_ACTIVE);
|
||||
|
||||
/* Setup AUX registers */
|
||||
I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
|
||||
I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
|
||||
I915_WRITE(EDP_PSR_AUX_CTL,
|
||||
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
|
||||
I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
|
||||
I915_WRITE(EDP_PSR_AUX_CTL(dev),
|
||||
DP_AUX_CH_CTL_TIME_OUT_400us |
|
||||
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
|
@ -1582,7 +1591,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
|
|||
} else
|
||||
val |= EDP_PSR_LINK_DISABLE;
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL, val |
|
||||
I915_WRITE(EDP_PSR_CTL(dev), val |
|
||||
EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
|
||||
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
|
||||
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
|
||||
|
@ -1599,42 +1608,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
|
||||
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
|
||||
if (!IS_HASWELL(dev)) {
|
||||
dev_priv->psr.source_ok = false;
|
||||
|
||||
if (!HAS_PSR(dev)) {
|
||||
DRM_DEBUG_KMS("PSR not supported on this platform\n");
|
||||
dev_priv->no_psr_reason = PSR_NO_SOURCE;
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
|
||||
(dig_port->port != PORT_A)) {
|
||||
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
|
||||
dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!is_edp_psr(intel_dp)) {
|
||||
DRM_DEBUG_KMS("PSR not supported by this panel\n");
|
||||
dev_priv->no_psr_reason = PSR_NO_SINK;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!i915_enable_psr) {
|
||||
DRM_DEBUG_KMS("PSR disable by flag\n");
|
||||
dev_priv->no_psr_reason = PSR_MODULE_PARAM;
|
||||
return false;
|
||||
}
|
||||
|
||||
crtc = dig_port->base.base.crtc;
|
||||
if (crtc == NULL) {
|
||||
DRM_DEBUG_KMS("crtc not active for PSR\n");
|
||||
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
|
||||
return false;
|
||||
}
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
if (!intel_crtc_active(crtc)) {
|
||||
DRM_DEBUG_KMS("crtc not active for PSR\n");
|
||||
dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1642,29 +1642,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
if (obj->tiling_mode != I915_TILING_X ||
|
||||
obj->fence_reg == I915_FENCE_REG_NONE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
|
||||
dev_priv->no_psr_reason = PSR_NOT_TILED;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
|
||||
dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
|
||||
S3D_ENABLE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
|
||||
dev_priv->no_psr_reason = PSR_S3D_ENABLED;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
|
||||
dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
|
||||
return false;
|
||||
}
|
||||
|
||||
dev_priv->psr.source_ok = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1703,10 +1700,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
|
|||
if (!intel_edp_is_psr_enabled(dev))
|
||||
return;
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
|
||||
I915_WRITE(EDP_PSR_CTL(dev),
|
||||
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
}
|
||||
|
@ -1720,7 +1718,7 @@ void intel_edp_psr_update(struct drm_device *dev)
|
|||
if (encoder->type == INTEL_OUTPUT_EDP) {
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (!is_edp_psr(intel_dp))
|
||||
if (!is_edp_psr(dev))
|
||||
return;
|
||||
|
||||
if (!intel_edp_psr_match_conditions(intel_dp))
|
||||
|
@ -2292,7 +2290,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
|||
|
||||
static bool
|
||||
intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
uint32_t dp_reg_value,
|
||||
uint32_t *DP,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
|
@ -2328,50 +2326,51 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
|||
I915_WRITE(DP_TP_CTL(port), temp);
|
||||
|
||||
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
|
||||
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
|
||||
*DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
|
||||
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
DRM_ERROR("DP training pattern 3 not supported\n");
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
dp_reg_value &= ~DP_LINK_TRAIN_MASK;
|
||||
*DP &= ~DP_LINK_TRAIN_MASK;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
dp_reg_value |= DP_LINK_TRAIN_OFF;
|
||||
*DP |= DP_LINK_TRAIN_OFF;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_1;
|
||||
*DP |= DP_LINK_TRAIN_PAT_1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
|
||||
*DP |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
DRM_ERROR("DP training pattern 3 not supported\n");
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
|
||||
*DP |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE(intel_dp->output_reg, dp_reg_value);
|
||||
I915_WRITE(intel_dp->output_reg, *DP);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
intel_dp_aux_native_write_1(intel_dp,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
dp_train_pat);
|
||||
ret = intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET,
|
||||
dp_train_pat);
|
||||
if (ret != 1)
|
||||
return false;
|
||||
|
||||
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
|
||||
DP_TRAINING_PATTERN_DISABLE) {
|
||||
|
@ -2386,6 +2385,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
memset(intel_dp->train_set, 0, 4);
|
||||
intel_dp_set_signal_levels(intel_dp, DP);
|
||||
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
intel_dp_set_signal_levels(intel_dp, DP);
|
||||
|
||||
I915_WRITE(intel_dp->output_reg, *DP);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
|
||||
intel_dp->train_set,
|
||||
intel_dp->lane_count);
|
||||
|
||||
return ret == intel_dp->lane_count;
|
||||
}
|
||||
|
||||
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
|
@ -2445,21 +2475,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
|
||||
DP |= DP_PORT_EN;
|
||||
|
||||
memset(intel_dp->train_set, 0, 4);
|
||||
/* clock recovery */
|
||||
if (!intel_dp_reset_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to enable link training\n");
|
||||
return;
|
||||
}
|
||||
|
||||
voltage = 0xff;
|
||||
voltage_tries = 0;
|
||||
loop_tries = 0;
|
||||
for (;;) {
|
||||
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
intel_dp_set_signal_levels(intel_dp, &DP);
|
||||
|
||||
/* Set training pattern 1 */
|
||||
if (!intel_dp_set_link_train(intel_dp, DP,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE))
|
||||
break;
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status)) {
|
||||
|
@ -2482,7 +2510,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
DRM_DEBUG_KMS("too many full retries, give up\n");
|
||||
break;
|
||||
}
|
||||
memset(intel_dp->train_set, 0, 4);
|
||||
intel_dp_reset_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
voltage_tries = 0;
|
||||
continue;
|
||||
}
|
||||
|
@ -2498,8 +2528,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
voltage_tries = 0;
|
||||
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Compute new intel_dp->train_set as requested by target */
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
/* Update training set as requested by target */
|
||||
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
|
||||
DRM_ERROR("failed to update link training\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
intel_dp->DP = DP;
|
||||
|
@ -2513,11 +2546,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
uint32_t DP = intel_dp->DP;
|
||||
|
||||
/* channel equalization */
|
||||
if (!intel_dp_set_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_2 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to start channel equalization\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tries = 0;
|
||||
cr_tries = 0;
|
||||
channel_eq = false;
|
||||
for (;;) {
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
if (cr_tries > 5) {
|
||||
DRM_ERROR("failed to train DP, aborting\n");
|
||||
|
@ -2525,21 +2565,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
break;
|
||||
}
|
||||
|
||||
intel_dp_set_signal_levels(intel_dp, &DP);
|
||||
|
||||
/* channel eq pattern */
|
||||
if (!intel_dp_set_link_train(intel_dp, DP,
|
||||
DP_TRAINING_PATTERN_2 |
|
||||
DP_LINK_SCRAMBLING_DISABLE))
|
||||
break;
|
||||
|
||||
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status))
|
||||
if (!intel_dp_get_link_status(intel_dp, link_status)) {
|
||||
DRM_ERROR("failed to get link status\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_2 |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
cr_tries++;
|
||||
continue;
|
||||
}
|
||||
|
@ -2553,13 +2590,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
if (tries > 5) {
|
||||
intel_dp_link_down(intel_dp);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_2 |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
tries = 0;
|
||||
cr_tries++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Compute new intel_dp->train_set as requested by target */
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
/* Update training set as requested by target */
|
||||
if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
|
||||
DRM_ERROR("failed to update link training\n");
|
||||
break;
|
||||
}
|
||||
++tries;
|
||||
}
|
||||
|
||||
|
@ -2574,7 +2617,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp_set_link_train(intel_dp, intel_dp->DP,
|
||||
intel_dp_set_link_train(intel_dp, &intel_dp->DP,
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
}
|
||||
|
||||
|
@ -2661,6 +2704,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
static bool
|
||||
intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
|
||||
|
||||
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
|
||||
|
@ -2676,11 +2723,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
|||
|
||||
/* Check if the panel supports PSR */
|
||||
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
|
||||
intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
|
||||
intel_dp->psr_dpcd,
|
||||
sizeof(intel_dp->psr_dpcd));
|
||||
if (is_edp_psr(intel_dp))
|
||||
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
|
||||
if (is_edp(intel_dp)) {
|
||||
intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
|
||||
intel_dp->psr_dpcd,
|
||||
sizeof(intel_dp->psr_dpcd));
|
||||
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
|
||||
dev_priv->psr.sink_support = true;
|
||||
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
|
||||
DP_DWN_STRM_PORT_PRESENT))
|
||||
return true; /* native DP sink */
|
||||
|
@ -3122,7 +3174,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
|
|||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
intel_panel_fini(&intel_connector->panel);
|
||||
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -3193,7 +3244,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
|
|||
bool intel_dpd_is_edp(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct child_device_config *p_child;
|
||||
union child_device_config *p_child;
|
||||
int i;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
|
@ -3202,8 +3253,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
|
|||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
|
||||
if (p_child->dvo_port == PORT_IDPD &&
|
||||
p_child->device_type == DEVICE_TYPE_eDP)
|
||||
if (p_child->common.dvo_port == PORT_IDPD &&
|
||||
p_child->common.device_type == DEVICE_TYPE_eDP)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -3615,11 +3666,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
struct drm_encoder *encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
return;
|
||||
|
||||
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
|
||||
if (!intel_connector) {
|
||||
kfree(intel_dig_port);
|
||||
return;
|
||||
|
|
|
@ -77,7 +77,6 @@
|
|||
/* the i915, i945 have a single sDVO i2c bus - which is different */
|
||||
#define MAX_OUTPUTS 6
|
||||
/* maximum connectors per crtcs in the mode set */
|
||||
#define INTELFB_CONN_LIMIT 4
|
||||
|
||||
#define INTEL_I2C_BUS_DVO 1
|
||||
#define INTEL_I2C_BUS_SDVO 2
|
||||
|
@ -218,7 +217,7 @@ struct intel_crtc_config {
|
|||
* preferred input timings. */
|
||||
struct drm_display_mode requested_mode;
|
||||
/* Actual pipe timings ie. what we program into the pipe timing
|
||||
* registers. adjusted_mode.clock is the pipe pixel clock. */
|
||||
* registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
|
||||
struct drm_display_mode adjusted_mode;
|
||||
|
||||
/* Pipe source size (ie. panel fitter input size)
|
||||
|
@ -513,80 +512,6 @@ struct intel_unpin_work {
|
|||
bool enable_stall_check;
|
||||
};
|
||||
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
|
||||
extern void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
|
||||
extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
|
||||
extern void intel_crt_init(struct drm_device *dev);
|
||||
extern void intel_hdmi_init(struct drm_device *dev,
|
||||
int hdmi_reg, enum port port);
|
||||
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
|
||||
bool is_sdvob);
|
||||
extern void intel_dvo_init(struct drm_device *dev);
|
||||
extern void intel_tv_init(struct drm_device *dev);
|
||||
extern void intel_mark_busy(struct drm_device *dev);
|
||||
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring);
|
||||
extern void intel_mark_idle(struct drm_device *dev);
|
||||
extern void intel_lvds_init(struct drm_device *dev);
|
||||
extern bool intel_dsi_init(struct drm_device *dev);
|
||||
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
|
||||
extern void intel_dp_init(struct drm_device *dev, int output_reg,
|
||||
enum port port);
|
||||
extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
|
||||
extern bool intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
extern bool intel_dpd_is_edp(struct drm_device *dev);
|
||||
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
|
||||
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
|
||||
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
|
||||
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
|
||||
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
|
||||
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
|
||||
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane plane);
|
||||
|
||||
/* intel_panel.c */
|
||||
extern int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode);
|
||||
extern void intel_panel_fini(struct intel_panel *panel);
|
||||
|
||||
extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode);
|
||||
extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode);
|
||||
extern void intel_panel_set_backlight(struct drm_device *dev,
|
||||
u32 level, u32 max);
|
||||
extern int intel_panel_setup_backlight(struct drm_connector *connector);
|
||||
extern void intel_panel_enable_backlight(struct drm_device *dev,
|
||||
enum pipe pipe);
|
||||
extern void intel_panel_disable_backlight(struct drm_device *dev);
|
||||
extern void intel_panel_destroy_backlight(struct drm_device *dev);
|
||||
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
|
||||
|
||||
struct intel_set_config {
|
||||
struct drm_encoder **save_connector_encoders;
|
||||
struct drm_crtc **save_encoder_crtcs;
|
||||
|
@ -595,18 +520,14 @@ struct intel_set_config {
|
|||
bool mode_changed;
|
||||
};
|
||||
|
||||
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
|
||||
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||
extern void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
extern void intel_connector_dpms(struct drm_connector *, int mode);
|
||||
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
|
||||
extern void intel_modeset_check_state(struct drm_device *dev);
|
||||
extern void intel_plane_restore(struct drm_plane *plane);
|
||||
extern void intel_plane_disable(struct drm_plane *plane);
|
||||
struct intel_load_detect_pipe {
|
||||
struct drm_framebuffer *release_fb;
|
||||
bool load_detect_temp;
|
||||
int dpms_mode;
|
||||
};
|
||||
|
||||
|
||||
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
|
||||
static inline struct intel_encoder *
|
||||
intel_attached_encoder(struct drm_connector *connector)
|
||||
{
|
||||
return to_intel_connector(connector)->encoder;
|
||||
}
|
||||
|
@ -634,73 +555,94 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
|||
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
|
||||
}
|
||||
|
||||
|
||||
/* i915_irq.c */
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable);
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable);
|
||||
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void hsw_pc8_disable_interrupts(struct drm_device *dev);
|
||||
void hsw_pc8_restore_interrupts(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_crt.c */
|
||||
void intel_crt_init(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_ddi.c */
|
||||
void intel_prepare_ddi(struct drm_device *dev);
|
||||
void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
|
||||
void intel_ddi_pll_init(struct drm_device *dev);
|
||||
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder);
|
||||
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
|
||||
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
|
||||
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
|
||||
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
|
||||
|
||||
/* intel_display.c */
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
void intel_connector_dpms(struct drm_connector *, int mode);
|
||||
bool intel_connector_get_hw_state(struct intel_connector *connector);
|
||||
void intel_modeset_check_state(struct drm_device *dev);
|
||||
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port);
|
||||
|
||||
extern void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
|
||||
|
||||
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
||||
struct drm_crtc *crtc);
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
|
||||
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
||||
struct drm_crtc *crtc);
|
||||
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern enum transcoder
|
||||
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
|
||||
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
|
||||
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
|
||||
|
||||
struct intel_load_detect_pipe {
|
||||
struct drm_framebuffer *release_fb;
|
||||
bool load_detect_temp;
|
||||
int dpms_mode;
|
||||
};
|
||||
extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct intel_load_detect_pipe *old);
|
||||
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old);
|
||||
|
||||
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno);
|
||||
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno);
|
||||
|
||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
|
||||
|
||||
extern int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *ifb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config(struct drm_device *dev);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
|
||||
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
|
||||
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
|
||||
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
|
||||
|
||||
extern void intel_setup_overlay(struct drm_device *dev);
|
||||
extern void intel_cleanup_overlay(struct drm_device *dev);
|
||||
extern int intel_overlay_switch_off(struct intel_overlay *overlay);
|
||||
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern void intel_fb_output_poll_changed(struct drm_device *dev);
|
||||
extern void intel_fb_restore_mode(struct drm_device *dev);
|
||||
|
||||
struct intel_shared_dpll *
|
||||
intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
|
||||
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
|
||||
bool intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode,
|
||||
struct intel_load_detect_pipe *old);
|
||||
void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old);
|
||||
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
|
||||
int intel_framebuffer_init(struct drm_device *dev,
|
||||
struct intel_framebuffer *ifb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_i915_gem_object *obj);
|
||||
void intel_framebuffer_fini(struct intel_framebuffer *fb);
|
||||
void intel_prepare_page_flip(struct drm_device *dev, int plane);
|
||||
void intel_finish_page_flip(struct drm_device *dev, int pipe);
|
||||
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
|
||||
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
bool state);
|
||||
|
@ -714,117 +656,173 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
|||
enum pipe pipe, bool state);
|
||||
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
|
||||
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
|
||||
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
bool state);
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
void intel_write_eld(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
|
||||
unsigned int tiling_mode,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch);
|
||||
void intel_display_handle_reset(struct drm_device *dev);
|
||||
void hsw_enable_pc8_work(struct work_struct *__work);
|
||||
void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
|
||||
void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
void
|
||||
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
||||
int dotclock);
|
||||
bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
void i915_disable_vga_mem(struct drm_device *dev);
|
||||
|
||||
extern void intel_init_clock_gating(struct drm_device *dev);
|
||||
extern void intel_suspend_hw(struct drm_device *dev);
|
||||
extern void intel_write_eld(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
extern void intel_prepare_ddi(struct drm_device *dev);
|
||||
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
extern void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
extern enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
|
||||
/* For use by IVB LP watermark workaround in intel_sprite.c */
|
||||
extern void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
extern void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, int pixel_size,
|
||||
bool enabled, bool scaled);
|
||||
|
||||
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
|
||||
unsigned int tiling_mode,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch);
|
||||
|
||||
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* Power-related functions, located in intel_pm.c */
|
||||
extern void intel_init_pm(struct drm_device *dev);
|
||||
/* FBC */
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void intel_update_fbc(struct drm_device *dev);
|
||||
/* IPS */
|
||||
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
extern void intel_gpu_ips_teardown(void);
|
||||
|
||||
/* Power well */
|
||||
extern int i915_init_power_well(struct drm_device *dev);
|
||||
extern void i915_remove_power_well(struct drm_device *dev);
|
||||
|
||||
extern bool intel_display_power_enabled(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
extern void intel_display_power_get(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
extern void intel_display_power_put(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
extern void intel_init_power_well(struct drm_device *dev);
|
||||
extern void intel_set_power_well(struct drm_device *dev, bool enable);
|
||||
extern void intel_resume_power_well(struct drm_device *dev);
|
||||
extern void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
extern void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
extern void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
void gen6_update_ring_freq(struct drm_device *dev);
|
||||
|
||||
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
||||
enum pipe *pipe);
|
||||
extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
|
||||
extern void intel_ddi_pll_init(struct drm_device *dev);
|
||||
extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
|
||||
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder);
|
||||
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
|
||||
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
|
||||
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
|
||||
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
|
||||
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
|
||||
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
|
||||
extern bool
|
||||
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
|
||||
|
||||
extern void intel_display_handle_reset(struct drm_device *dev);
|
||||
extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
bool enable);
|
||||
extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable);
|
||||
|
||||
extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
||||
extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
||||
extern void intel_edp_psr_update(struct drm_device *dev);
|
||||
extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
||||
bool switch_to_fclk, bool allow_power_down);
|
||||
extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
|
||||
extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t mask);
|
||||
extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t mask);
|
||||
extern void hsw_enable_pc8_work(struct work_struct *__work);
|
||||
extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
|
||||
extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
|
||||
extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
|
||||
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
|
||||
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
extern void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
/* intel_dp.c */
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
void intel_dp_check_link_status(struct intel_dp *intel_dp);
|
||||
bool intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
extern int intel_dotclock_calculate(int link_freq,
|
||||
const struct intel_link_m_n *m_n);
|
||||
extern void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
||||
int dotclock);
|
||||
bool intel_dpd_is_edp(struct drm_device *dev);
|
||||
void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
|
||||
void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
|
||||
void ironlake_edp_panel_on(struct intel_dp *intel_dp);
|
||||
void ironlake_edp_panel_off(struct intel_dp *intel_dp);
|
||||
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
|
||||
void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_update(struct drm_device *dev);
|
||||
|
||||
extern bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
extern void i915_disable_vga_mem(struct drm_device *dev);
|
||||
|
||||
/* intel_dsi.c */
|
||||
bool intel_dsi_init(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_dvo.c */
|
||||
void intel_dvo_init(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_fb.c */
|
||||
int intel_fbdev_init(struct drm_device *dev);
|
||||
void intel_fbdev_initial_config(struct drm_device *dev);
|
||||
void intel_fbdev_fini(struct drm_device *dev);
|
||||
void intel_fbdev_set_suspend(struct drm_device *dev, int state);
|
||||
void intel_fb_output_poll_changed(struct drm_device *dev);
|
||||
void intel_fb_restore_mode(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_hdmi.c */
|
||||
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
|
||||
|
||||
/* intel_lvds.c */
|
||||
void intel_lvds_init(struct drm_device *dev);
|
||||
bool intel_is_dual_link_lvds(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_modes.c */
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
|
||||
|
||||
/* intel_overlay.c */
|
||||
void intel_setup_overlay(struct drm_device *dev);
|
||||
void intel_cleanup_overlay(struct drm_device *dev);
|
||||
int intel_overlay_switch_off(struct intel_overlay *overlay);
|
||||
int intel_overlay_put_image(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int intel_overlay_attrs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
|
||||
/* intel_panel.c */
|
||||
int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode);
|
||||
void intel_panel_fini(struct intel_panel *panel);
|
||||
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
void intel_pch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode);
|
||||
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode);
|
||||
void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector);
|
||||
void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
|
||||
void intel_panel_disable_backlight(struct drm_device *dev);
|
||||
void intel_panel_destroy_backlight(struct drm_device *dev);
|
||||
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_pm.c */
|
||||
void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, int pixel_size,
|
||||
bool enabled, bool scaled);
|
||||
void intel_init_pm(struct drm_device *dev);
|
||||
bool intel_fbc_enabled(struct drm_device *dev);
|
||||
void intel_update_fbc(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
int i915_init_power_well(struct drm_device *dev);
|
||||
void i915_remove_power_well(struct drm_device *dev);
|
||||
bool intel_display_power_enabled(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_device *dev,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_init_power_well(struct drm_device *dev);
|
||||
void intel_set_power_well(struct drm_device *dev, bool enable);
|
||||
void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
void intel_disable_gt_powersave(struct drm_device *dev);
|
||||
void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
void gen6_update_ring_freq(struct drm_device *dev);
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
||||
|
||||
|
||||
/* intel_sprite.c */
|
||||
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
|
||||
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane plane);
|
||||
void intel_plane_restore(struct drm_plane *plane);
|
||||
void intel_plane_disable(struct drm_plane *plane);
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_device *dev);
|
||||
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
|
|
@ -504,7 +504,6 @@ static void intel_dsi_destroy(struct drm_connector *connector)
|
|||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
intel_panel_fini(&intel_connector->panel);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
|
|||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
pipe_config->adjusted_mode.clock = pipe_config->port_clock;
|
||||
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
|
||||
}
|
||||
|
||||
static void intel_disable_dvo(struct intel_encoder *encoder)
|
||||
|
@ -367,7 +367,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
|
|||
|
||||
static void intel_dvo_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -448,11 +447,11 @@ void intel_dvo_init(struct drm_device *dev)
|
|||
int i;
|
||||
int encoder_type = DRM_MODE_ENCODER_NONE;
|
||||
|
||||
intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
|
||||
intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
|
||||
if (!intel_dvo)
|
||||
return;
|
||||
|
||||
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
|
||||
if (!intel_connector) {
|
||||
kfree(intel_dvo);
|
||||
return;
|
||||
|
|
|
@ -184,6 +184,27 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/** Sets the color ramps on behalf of RandR */
|
||||
static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
intel_crtc->lut_r[regno] = red >> 8;
|
||||
intel_crtc->lut_g[regno] = green >> 8;
|
||||
intel_crtc->lut_b[regno] = blue >> 8;
|
||||
}
|
||||
|
||||
static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
*red = intel_crtc->lut_r[regno] << 8;
|
||||
*green = intel_crtc->lut_g[regno] << 8;
|
||||
*blue = intel_crtc->lut_b[regno] << 8;
|
||||
}
|
||||
|
||||
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
||||
.gamma_set = intel_crtc_fb_gamma_set,
|
||||
.gamma_get = intel_crtc_fb_gamma_get,
|
||||
|
@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
|
||||
ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
|
||||
if (!ifbdev)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
|
||||
ret = drm_fb_helper_init(dev, &ifbdev->helper,
|
||||
INTEL_INFO(dev)->num_pipes,
|
||||
INTELFB_CONN_LIMIT);
|
||||
4);
|
||||
if (ret) {
|
||||
kfree(ifbdev);
|
||||
return ret;
|
||||
|
|
|
@ -737,7 +737,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
|||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
pipe_config->adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
static void intel_enable_hdmi(struct intel_encoder *encoder)
|
||||
|
@ -873,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
|
||||
int clock_12bpc = pipe_config->adjusted_mode.clock * 3 / 2;
|
||||
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
|
||||
int portclock_limit = hdmi_portclock_limit(intel_hdmi);
|
||||
int desired_bpp;
|
||||
|
||||
|
@ -915,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
pipe_config->pipe_bpp = desired_bpp;
|
||||
}
|
||||
|
||||
if (adjusted_mode->clock > portclock_limit) {
|
||||
if (adjusted_mode->crtc_clock > portclock_limit) {
|
||||
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -1181,7 +1181,6 @@ static void intel_hdmi_post_disable(struct intel_encoder *encoder)
|
|||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -1228,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
|
||||
connector->interlace_allowed = 1;
|
||||
connector->doublescan_allowed = 0;
|
||||
connector->stereo_allowed = 1;
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
|
@ -1292,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
|||
struct intel_encoder *intel_encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
return;
|
||||
|
||||
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
|
||||
if (!intel_connector) {
|
||||
kfree(intel_dig_port);
|
||||
return;
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
enum disp_clk {
|
||||
CDCLK,
|
||||
CZCLK
|
||||
};
|
||||
|
||||
struct gmbus_port {
|
||||
const char *name;
|
||||
int reg;
|
||||
|
@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
|
|||
return container_of(i2c, struct intel_gmbus, adapter);
|
||||
}
|
||||
|
||||
static int get_disp_clk_div(struct drm_i915_private *dev_priv,
|
||||
enum disp_clk clk)
|
||||
{
|
||||
u32 reg_val;
|
||||
int clk_ratio;
|
||||
|
||||
reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
|
||||
|
||||
if (clk == CDCLK)
|
||||
clk_ratio =
|
||||
((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
|
||||
else
|
||||
clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
|
||||
|
||||
return clk_ratio;
|
||||
}
|
||||
|
||||
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int vco_freq[] = { 800, 1600, 2000, 2400 };
|
||||
int gmbus_freq = 0, cdclk_div, hpll_freq;
|
||||
|
||||
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
|
||||
|
||||
/* Skip setting the gmbus freq if BIOS has already programmed it */
|
||||
if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
|
||||
return;
|
||||
|
||||
/* Obtain SKU information */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
hpll_freq =
|
||||
vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Get the CDCLK divide ratio */
|
||||
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
|
||||
|
||||
/*
|
||||
* Program the gmbus_freq based on the cdclk frequency.
|
||||
* BSpec erroneously claims we should aim for 4MHz, but
|
||||
* in fact 1MHz is the correct frequency.
|
||||
*/
|
||||
if (cdclk_div)
|
||||
gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
|
||||
|
||||
if (WARN_ON(gmbus_freq == 0))
|
||||
return;
|
||||
|
||||
I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
|
||||
}
|
||||
|
||||
void
|
||||
intel_i2c_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* In BIOS-less system, program the correct gmbus frequency
|
||||
* before reading edid.
|
||||
*/
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
gmbus_set_freq(dev_priv);
|
||||
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
|
||||
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
pipe_config->adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
|
||||
|
@ -474,7 +474,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
|
|||
|
||||
intel_panel_fini(&lvds_connector->base.panel);
|
||||
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -794,7 +793,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
|
|||
return true;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
struct child_device_config *child = dev_priv->vbt.child_dev + i;
|
||||
union child_device_config *uchild = dev_priv->vbt.child_dev + i;
|
||||
struct old_child_dev_config *child = &uchild->old;
|
||||
|
||||
/* If the device type is not LFP, continue.
|
||||
* We have to check both the new identifiers as well as the
|
||||
|
@ -948,11 +948,11 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
|
||||
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
|
||||
if (!lvds_encoder)
|
||||
return;
|
||||
|
||||
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
|
||||
lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
|
||||
if (!lvds_connector) {
|
||||
kfree(lvds_encoder);
|
||||
return;
|
||||
|
|
|
@ -1053,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
|
||||
params = kmalloc(sizeof(*params), GFP_KERNEL);
|
||||
if (!params)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1320,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
|
|||
if (!HAS_OVERLAY(dev))
|
||||
return;
|
||||
|
||||
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
|
||||
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
|
||||
if (!overlay)
|
||||
return;
|
||||
|
||||
|
|
|
@ -329,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
if (IS_GEN4(dev))
|
||||
return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
|
@ -372,6 +372,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
|
|||
I915_WRITE(BLC_PWM_CTL2,
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
}
|
||||
|
||||
if (IS_VALLEYVIEW(dev) && !val)
|
||||
val = 0x0f42ffff;
|
||||
}
|
||||
|
||||
return val;
|
||||
|
@ -629,10 +632,24 @@ set_level:
|
|||
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
|
||||
}
|
||||
|
||||
/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
|
||||
static void intel_panel_init_backlight_regs(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
u32 cur_val = I915_READ(BLC_PWM_CTL) &
|
||||
BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_panel_init_backlight(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
intel_panel_init_backlight_regs(dev);
|
||||
|
||||
dev_priv->backlight.level = intel_panel_get_backlight(dev);
|
||||
dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
|
||||
}
|
||||
|
|
|
@ -370,7 +370,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
|
|||
|
||||
intel_cancel_fbc_work(dev_priv);
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
work = kzalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work == NULL) {
|
||||
DRM_ERROR("Failed to allocate FBC work structure\n");
|
||||
dev_priv->display.enable_fbc(crtc, interval);
|
||||
|
@ -1100,8 +1100,12 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
|
|||
|
||||
crtc = single_enabled_crtc(dev);
|
||||
if (crtc) {
|
||||
int clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
int clock;
|
||||
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
|
||||
/* Display SR */
|
||||
wm = intel_calculate_wm(clock, &pineview_display_wm,
|
||||
|
@ -1174,7 +1178,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
|
|||
}
|
||||
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->clock;
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
@ -1261,7 +1265,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
|
|||
|
||||
crtc = intel_get_crtc_for_plane(dev, plane);
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->clock;
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
@ -1302,7 +1306,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
|
|||
if (!intel_crtc_active(crtc))
|
||||
return false;
|
||||
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
|
||||
|
||||
entries = (clock / 1000) * pixel_size;
|
||||
|
@ -1492,7 +1496,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
|
|||
static const int sr_latency_ns = 12000;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
int clock = adjusted_mode->clock;
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
int htotal = adjusted_mode->htotal;
|
||||
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
int pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
@ -1567,11 +1571,13 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
|
||||
crtc = intel_get_crtc_for_plane(dev, 0);
|
||||
if (intel_crtc_active(crtc)) {
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int cpp = crtc->fb->bits_per_pixel / 8;
|
||||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
planea_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock,
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
|
||||
wm_info, fifo_size, cpp,
|
||||
latency_ns);
|
||||
enabled = crtc;
|
||||
|
@ -1581,11 +1587,13 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
|
||||
crtc = intel_get_crtc_for_plane(dev, 1);
|
||||
if (intel_crtc_active(crtc)) {
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
int cpp = crtc->fb->bits_per_pixel / 8;
|
||||
if (IS_GEN2(dev))
|
||||
cpp = 4;
|
||||
|
||||
planeb_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock,
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
|
||||
wm_info, fifo_size, cpp,
|
||||
latency_ns);
|
||||
if (enabled == NULL)
|
||||
|
@ -1614,7 +1622,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
static const int sr_latency_ns = 6000;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&to_intel_crtc(enabled)->config.adjusted_mode;
|
||||
int clock = adjusted_mode->clock;
|
||||
int clock = adjusted_mode->crtc_clock;
|
||||
int htotal = adjusted_mode->htotal;
|
||||
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
int pixel_size = enabled->fb->bits_per_pixel / 8;
|
||||
|
@ -1670,6 +1678,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
|
|||
struct drm_device *dev = unused_crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
uint32_t fwater_lo;
|
||||
int planea_wm;
|
||||
|
||||
|
@ -1677,7 +1686,8 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
|
|||
if (crtc == NULL)
|
||||
return;
|
||||
|
||||
planea_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock,
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
|
||||
&i830_wm_info,
|
||||
dev_priv->display.get_fifo_size(dev, 0),
|
||||
4, latency_ns);
|
||||
|
@ -1764,7 +1774,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
|
|||
|
||||
crtc = intel_get_crtc_for_plane(dev, plane);
|
||||
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
|
||||
clock = adjusted_mode->clock;
|
||||
clock = adjusted_mode->crtc_clock;
|
||||
htotal = adjusted_mode->htotal;
|
||||
hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
@ -2112,7 +2122,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
|
|||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pixel_rate;
|
||||
|
||||
pixel_rate = intel_crtc->config.adjusted_mode.clock;
|
||||
pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
|
||||
|
||||
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
|
||||
* adjust the pixel_rate here. */
|
||||
|
@ -2913,7 +2923,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
|
|||
return false;
|
||||
}
|
||||
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
|
||||
|
||||
/* Use the small buffer method to calculate the sprite watermark */
|
||||
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
|
||||
|
@ -2948,7 +2958,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
|
|||
}
|
||||
|
||||
crtc = intel_get_crtc_for_plane(dev, plane);
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.clock;
|
||||
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
|
||||
if (!clock) {
|
||||
*sprite_wm = 0;
|
||||
return false;
|
||||
|
@ -3302,6 +3312,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
|
|||
return limits;
|
||||
}
|
||||
|
||||
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
||||
{
|
||||
int new_power;
|
||||
|
||||
new_power = dev_priv->rps.power;
|
||||
switch (dev_priv->rps.power) {
|
||||
case LOW_POWER:
|
||||
if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
|
||||
new_power = BETWEEN;
|
||||
break;
|
||||
|
||||
case BETWEEN:
|
||||
if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
|
||||
new_power = LOW_POWER;
|
||||
else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
|
||||
new_power = HIGH_POWER;
|
||||
break;
|
||||
|
||||
case HIGH_POWER:
|
||||
if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
|
||||
new_power = BETWEEN;
|
||||
break;
|
||||
}
|
||||
/* Max/min bins are special */
|
||||
if (val == dev_priv->rps.min_delay)
|
||||
new_power = LOW_POWER;
|
||||
if (val == dev_priv->rps.max_delay)
|
||||
new_power = HIGH_POWER;
|
||||
if (new_power == dev_priv->rps.power)
|
||||
return;
|
||||
|
||||
/* Note the units here are not exactly 1us, but 1280ns. */
|
||||
switch (new_power) {
|
||||
case LOW_POWER:
|
||||
/* Upclock if more than 95% busy over 16ms */
|
||||
I915_WRITE(GEN6_RP_UP_EI, 12500);
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
|
||||
|
||||
/* Downclock if less than 85% busy over 32ms */
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
|
||||
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
break;
|
||||
|
||||
case BETWEEN:
|
||||
/* Upclock if more than 90% busy over 13ms */
|
||||
I915_WRITE(GEN6_RP_UP_EI, 10250);
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
|
||||
|
||||
/* Downclock if less than 75% busy over 32ms */
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
|
||||
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
break;
|
||||
|
||||
case HIGH_POWER:
|
||||
/* Upclock if more than 85% busy over 10ms */
|
||||
I915_WRITE(GEN6_RP_UP_EI, 8000);
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
|
||||
|
||||
/* Downclock if less than 60% busy over 32ms */
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 25000);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
|
||||
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
break;
|
||||
}
|
||||
|
||||
dev_priv->rps.power = new_power;
|
||||
dev_priv->rps.last_adj = 0;
|
||||
}
|
||||
|
||||
void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3314,6 +3416,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|||
if (val == dev_priv->rps.cur_delay)
|
||||
return;
|
||||
|
||||
gen6_set_rps_thresholds(dev_priv, val);
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
HSW_FREQUENCY(val));
|
||||
|
@ -3335,6 +3439,28 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
|||
trace_intel_gpu_freq_change(val * 50);
|
||||
}
|
||||
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (dev_priv->info->is_valleyview)
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
|
||||
dev_priv->rps.last_adj = 0;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (dev_priv->info->is_valleyview)
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
|
||||
dev_priv->rps.last_adj = 0;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until the previous freq change has completed,
|
||||
* or the timeout elapsed, and then update our notion
|
||||
|
@ -3516,7 +3642,10 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
|
||||
/* In units of 50MHz */
|
||||
dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
|
||||
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
|
||||
dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
|
||||
dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
|
||||
dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
|
||||
dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
|
||||
dev_priv->rps.cur_delay = 0;
|
||||
|
||||
/* disable the counters and set deterministic thresholds */
|
||||
|
@ -3564,38 +3693,9 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
GEN6_RC_CTL_EI_MODE(1) |
|
||||
GEN6_RC_CTL_HW_ENABLE);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
HSW_FREQUENCY(10));
|
||||
I915_WRITE(GEN6_RC_VIDEO_FREQ,
|
||||
HSW_FREQUENCY(12));
|
||||
} else {
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
GEN6_FREQUENCY(10) |
|
||||
GEN6_OFFSET(0) |
|
||||
GEN6_AGGRESSIVE_TURBO);
|
||||
I915_WRITE(GEN6_RC_VIDEO_FREQ,
|
||||
GEN6_FREQUENCY(12));
|
||||
}
|
||||
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||
dev_priv->rps.max_delay << 24 |
|
||||
dev_priv->rps.min_delay << 16);
|
||||
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
|
||||
I915_WRITE(GEN6_RP_UP_EI, 66000);
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
|
||||
|
||||
/* Power down if completely idle for over 50ms */
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
|
||||
|
||||
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
|
||||
if (!ret) {
|
||||
|
@ -3611,7 +3711,8 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
|
||||
}
|
||||
|
||||
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
|
||||
dev_priv->rps.power = HIGH_POWER; /* force a reset */
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
|
||||
|
||||
gen6_enable_rps_interrupts(dev);
|
||||
|
||||
|
@ -3653,9 +3754,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
|
|||
/* Convert from kHz to MHz */
|
||||
max_ia_freq /= 1000;
|
||||
|
||||
min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
|
||||
/* convert DDR frequency from units of 133.3MHz to bandwidth */
|
||||
min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
|
||||
min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
|
||||
/* convert DDR frequency from units of 266.6MHz to bandwidth */
|
||||
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
|
||||
|
||||
/*
|
||||
* For each potential GPU frequency, load a ring frequency we'd like
|
||||
|
@ -3668,7 +3769,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
|
|||
unsigned int ia_freq = 0, ring_freq = 0;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
ring_freq = (gpu_freq * 5 + 3) / 4;
|
||||
ring_freq = mult_frac(gpu_freq, 5, 4);
|
||||
ring_freq = max(min_ring_freq, ring_freq);
|
||||
/* leave ia_freq as the default, chosen by cpufreq */
|
||||
} else {
|
||||
|
@ -3724,24 +3825,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
|
|||
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
|
||||
}
|
||||
|
||||
static void vlv_rps_timer_work(struct work_struct *work)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||
rps.vlv_work.work);
|
||||
|
||||
/*
|
||||
* Timer fired, we must be idle. Drop to min voltage state.
|
||||
* Note: we use RPe here since it should match the
|
||||
* Vmin we were shooting for. That should give us better
|
||||
* perf when we come back out of RC6 than if we used the
|
||||
* min freq available.
|
||||
*/
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void valleyview_setup_pctx(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3794,7 +3877,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
|
||||
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
|
||||
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
|
||||
gtfifodbg);
|
||||
I915_WRITE(GTFIFODBG, gtfifodbg);
|
||||
}
|
||||
|
||||
|
@ -3827,7 +3911,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
|
||||
|
||||
/* allows RC6 residency counter to work */
|
||||
I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
|
||||
I915_WRITE(VLV_COUNTER_CONTROL,
|
||||
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
|
||||
VLV_MEDIA_RC6_COUNT_EN |
|
||||
VLV_RENDER_RC6_COUNT_EN));
|
||||
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
|
||||
rc6_mode = GEN7_RC_CTL_TO_MODE;
|
||||
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
|
||||
|
@ -3880,8 +3967,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
dev_priv->rps.rpe_delay),
|
||||
dev_priv->rps.rpe_delay);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
|
||||
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
|
||||
|
||||
gen6_enable_rps_interrupts(dev);
|
||||
|
@ -4621,8 +4706,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
|||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_disable_rps(dev);
|
||||
|
@ -5498,7 +5581,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
|
|||
spin_unlock_irq(&power_well->lock);
|
||||
}
|
||||
|
||||
void intel_resume_power_well(struct drm_device *dev)
|
||||
static void intel_resume_power_well(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_well *power_well = &dev_priv->power_well;
|
||||
|
|
|
@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
|
|||
goto log_fail;
|
||||
|
||||
while ((status == SDVO_CMD_STATUS_PENDING ||
|
||||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
|
||||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
|
||||
if (retry < 10)
|
||||
msleep(15);
|
||||
else
|
||||
|
@ -1369,7 +1369,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->adjusted_mode.clock = dotclock;
|
||||
pipe_config->adjusted_mode.crtc_clock = dotclock;
|
||||
|
||||
/* Cross check the port pixel multiplier with the sdvo encoder state. */
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
|
||||
|
@ -1773,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
|
|||
{
|
||||
struct edid *edid;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, drm_get_connector_name(connector));
|
||||
|
||||
/* set the bus switch and get the modes */
|
||||
edid = intel_sdvo_get_edid(connector);
|
||||
|
||||
|
@ -1868,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
|
|||
uint32_t reply = 0, format_map = 0;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, drm_get_connector_name(connector));
|
||||
|
||||
/* Read the list of supported input resolutions for the selected TV
|
||||
* format.
|
||||
*/
|
||||
|
@ -1902,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
|||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_display_mode *newmode;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, drm_get_connector_name(connector));
|
||||
|
||||
/*
|
||||
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
|
||||
* SDVO->LVDS transcoders can't cope with the EDID mode.
|
||||
|
@ -1933,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int intel_sdvo_get_modes(struct drm_connector *connector)
|
||||
|
@ -2001,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
|
|||
intel_sdvo_connector->tv_format);
|
||||
|
||||
intel_sdvo_destroy_enhance_property(connector);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(intel_sdvo_connector);
|
||||
}
|
||||
|
@ -2397,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
|||
struct intel_connector *intel_connector;
|
||||
struct intel_sdvo_connector *intel_sdvo_connector;
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
|
||||
DRM_DEBUG_KMS("initialising DVI device %d\n", device);
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
|
||||
if (!intel_sdvo_connector)
|
||||
return false;
|
||||
|
||||
|
@ -2445,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
|
|||
struct intel_connector *intel_connector;
|
||||
struct intel_sdvo_connector *intel_sdvo_connector;
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
|
||||
DRM_DEBUG_KMS("initialising TV type %d\n", type);
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
|
||||
if (!intel_sdvo_connector)
|
||||
return false;
|
||||
|
||||
|
@ -2470,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
|
|||
return true;
|
||||
|
||||
err:
|
||||
drm_sysfs_connector_remove(connector);
|
||||
intel_sdvo_destroy(connector);
|
||||
return false;
|
||||
}
|
||||
|
@ -2482,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
|
|||
struct intel_connector *intel_connector;
|
||||
struct intel_sdvo_connector *intel_sdvo_connector;
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
|
||||
DRM_DEBUG_KMS("initialising analog device %d\n", device);
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
|
||||
if (!intel_sdvo_connector)
|
||||
return false;
|
||||
|
||||
|
@ -2513,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
|
|||
struct intel_connector *intel_connector;
|
||||
struct intel_sdvo_connector *intel_sdvo_connector;
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
|
||||
DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
|
||||
|
||||
intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
|
||||
if (!intel_sdvo_connector)
|
||||
return false;
|
||||
|
||||
|
@ -2537,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
|
|||
return true;
|
||||
|
||||
err:
|
||||
drm_sysfs_connector_remove(connector);
|
||||
intel_sdvo_destroy(connector);
|
||||
return false;
|
||||
}
|
||||
|
@ -2608,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
|
|||
|
||||
list_for_each_entry_safe(connector, tmp,
|
||||
&dev->mode_config.connector_list, head) {
|
||||
if (intel_attached_encoder(connector) == &intel_sdvo->base)
|
||||
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
|
||||
drm_sysfs_connector_remove(connector);
|
||||
intel_sdvo_destroy(connector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2879,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
|
|||
struct intel_encoder *intel_encoder;
|
||||
struct intel_sdvo *intel_sdvo;
|
||||
int i;
|
||||
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
|
||||
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
|
||||
if (!intel_sdvo)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -1034,7 +1034,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
|||
if (INTEL_INFO(dev)->gen < 5)
|
||||
return -ENODEV;
|
||||
|
||||
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
|
||||
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
|
||||
if (!intel_plane)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
|
|||
if (!tv_mode)
|
||||
return false;
|
||||
|
||||
pipe_config->adjusted_mode.clock = tv_mode->clock;
|
||||
pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
|
||||
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
|
||||
pipe_config->pipe_bpp = 8*3;
|
||||
|
||||
|
@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
|
|||
static void
|
||||
intel_tv_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
|
@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
|
|||
static int tv_is_present_in_vbt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct child_device_config *p_child;
|
||||
union child_device_config *p_child;
|
||||
int i, ret;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
|
@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
|
|||
/*
|
||||
* If the device type is not TV, continue.
|
||||
*/
|
||||
if (p_child->device_type != DEVICE_TYPE_INT_TV &&
|
||||
p_child->device_type != DEVICE_TYPE_TV)
|
||||
if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
|
||||
p_child->old.device_type != DEVICE_TYPE_TV)
|
||||
continue;
|
||||
/* Only when the addin_offset is non-zero, it is regarded
|
||||
* as present.
|
||||
*/
|
||||
if (p_child->addin_offset) {
|
||||
if (p_child->old.addin_offset) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
|
|||
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
|
||||
return;
|
||||
|
||||
intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
|
||||
intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
|
||||
if (!intel_tv) {
|
||||
return;
|
||||
}
|
||||
|
||||
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
|
||||
if (!intel_connector) {
|
||||
kfree(intel_tv);
|
||||
return;
|
||||
|
|
|
@ -204,6 +204,18 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
|
|||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
static void gen6_force_wake_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
if (--dev_priv->uncore.forcewake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv);
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
void intel_uncore_early_sanitize(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -216,6 +228,9 @@ void intel_uncore_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
|
||||
gen6_force_wake_work);
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
|
||||
|
@ -261,6 +276,16 @@ void intel_uncore_init(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
void intel_uncore_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
flush_delayed_work(&dev_priv->uncore.force_wake_work);
|
||||
|
||||
/* Paranoia: make sure we have disabled everything before we exit. */
|
||||
intel_uncore_sanitize(dev);
|
||||
}
|
||||
|
||||
static void intel_uncore_forcewake_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -276,10 +301,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
|
|||
|
||||
void intel_uncore_sanitize(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg_val;
|
||||
|
||||
intel_uncore_forcewake_reset(dev);
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
/* Turn off power gate, require especially for the BIOS less system */
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
|
||||
|
||||
if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
|
||||
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -306,8 +347,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
|||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
if (--dev_priv->uncore.forcewake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv);
|
||||
if (--dev_priv->uncore.forcewake_count == 0) {
|
||||
dev_priv->uncore.forcewake_count++;
|
||||
mod_delayed_work(dev_priv->wq,
|
||||
&dev_priv->uncore.force_wake_work,
|
||||
1);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
|
|
|
@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
|||
regp->MiscOutReg = 0x23; /* +hsync +vsync */
|
||||
}
|
||||
|
||||
regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
|
||||
|
||||
/*
|
||||
* Time Sequencer
|
||||
*/
|
||||
|
|
|
@ -433,6 +433,9 @@ struct drm_file {
|
|||
struct drm_master *master; /* master this node is currently associated with
|
||||
N.B. not always minor->master */
|
||||
|
||||
/* true when the client has asked us to expose stereo 3D mode flags */
|
||||
bool stereo_allowed;
|
||||
|
||||
/**
|
||||
* fbs - List of framebuffers associated with this file.
|
||||
*
|
||||
|
@ -1294,6 +1297,8 @@ extern int drm_getstats(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
extern int drm_getcap(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_setclientcap(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_setversion(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_noop(struct drm_device *dev, void *data,
|
||||
|
|
|
@ -108,6 +108,7 @@ enum drm_mode_status {
|
|||
MODE_ONE_HEIGHT, /* only one height is supported */
|
||||
MODE_ONE_SIZE, /* only one resolution is supported */
|
||||
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
|
||||
MODE_NO_STEREO, /* stereo modes not supported */
|
||||
MODE_UNVERIFIED = -3, /* mode needs to reverified */
|
||||
MODE_BAD = -2, /* unspecified reason */
|
||||
MODE_ERROR = -1 /* error condition */
|
||||
|
@ -124,7 +125,10 @@ enum drm_mode_status {
|
|||
.vscan = (vs), .flags = (f), \
|
||||
.base.type = DRM_MODE_OBJECT_MODE
|
||||
|
||||
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
|
||||
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
|
||||
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
|
||||
|
||||
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
|
||||
|
||||
struct drm_display_mode {
|
||||
/* Header */
|
||||
|
@ -155,8 +159,7 @@ struct drm_display_mode {
|
|||
int height_mm;
|
||||
|
||||
/* Actual mode we give to hw */
|
||||
int clock_index;
|
||||
int synth_clock;
|
||||
int crtc_clock; /* in KHz */
|
||||
int crtc_hdisplay;
|
||||
int crtc_hblank_start;
|
||||
int crtc_hblank_end;
|
||||
|
@ -180,6 +183,11 @@ struct drm_display_mode {
|
|||
int hsync; /* in kHz */
|
||||
};
|
||||
|
||||
static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
|
||||
{
|
||||
return mode->flags & DRM_MODE_FLAG_3D_MASK;
|
||||
}
|
||||
|
||||
enum drm_connector_status {
|
||||
connector_status_connected = 1,
|
||||
connector_status_disconnected = 2,
|
||||
|
@ -597,6 +605,7 @@ struct drm_connector {
|
|||
int connector_type_id;
|
||||
bool interlace_allowed;
|
||||
bool doublescan_allowed;
|
||||
bool stereo_allowed;
|
||||
struct list_head modes; /* list of modes on this connector */
|
||||
|
||||
enum drm_connector_status status;
|
||||
|
@ -976,7 +985,7 @@ extern void drm_mode_config_reset(struct drm_device *dev);
|
|||
extern void drm_mode_config_cleanup(struct drm_device *dev);
|
||||
extern void drm_mode_set_name(struct drm_display_mode *mode);
|
||||
extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
|
||||
extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
|
||||
extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
|
||||
extern int drm_mode_width(const struct drm_display_mode *mode);
|
||||
extern int drm_mode_height(const struct drm_display_mode *mode);
|
||||
|
||||
|
|
|
@ -611,12 +611,37 @@ struct drm_gem_open {
|
|||
__u64 size;
|
||||
};
|
||||
|
||||
#define DRM_CAP_DUMB_BUFFER 0x1
|
||||
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
|
||||
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
|
||||
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
|
||||
#define DRM_CAP_PRIME 0x5
|
||||
#define DRM_PRIME_CAP_IMPORT 0x1
|
||||
#define DRM_PRIME_CAP_EXPORT 0x2
|
||||
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
|
||||
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
|
||||
|
||||
/** DRM_IOCTL_GET_CAP ioctl argument type */
|
||||
struct drm_get_cap {
|
||||
__u64 capability;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_CLIENT_CAP_STEREO_3D
|
||||
*
|
||||
* if set to 1, the DRM core will expose the stereo 3D capabilities of the
|
||||
* monitor by advertising the supported 3D layouts in the flags of struct
|
||||
* drm_mode_modeinfo.
|
||||
*/
|
||||
#define DRM_CLIENT_CAP_STEREO_3D 1
|
||||
|
||||
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
|
||||
struct drm_set_client_cap {
|
||||
__u64 capability;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
#define DRM_CLOEXEC O_CLOEXEC
|
||||
struct drm_prime_handle {
|
||||
__u32 handle;
|
||||
|
@ -649,6 +674,7 @@ struct drm_prime_handle {
|
|||
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
|
||||
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
|
||||
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
|
||||
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
|
||||
|
||||
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
|
||||
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
|
||||
|
@ -774,17 +800,6 @@ struct drm_event_vblank {
|
|||
__u32 reserved;
|
||||
};
|
||||
|
||||
#define DRM_CAP_DUMB_BUFFER 0x1
|
||||
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
|
||||
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
|
||||
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
|
||||
#define DRM_CAP_PRIME 0x5
|
||||
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
|
||||
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
|
||||
|
||||
#define DRM_PRIME_CAP_IMPORT 0x1
|
||||
#define DRM_PRIME_CAP_EXPORT 0x2
|
||||
|
||||
/* typedef area */
|
||||
#ifndef __KERNEL__
|
||||
typedef struct drm_clip_rect drm_clip_rect_t;
|
||||
|
|
|
@ -44,20 +44,35 @@
|
|||
|
||||
/* Video mode flags */
|
||||
/* bit compatible with the xorg definitions. */
|
||||
#define DRM_MODE_FLAG_PHSYNC (1<<0)
|
||||
#define DRM_MODE_FLAG_NHSYNC (1<<1)
|
||||
#define DRM_MODE_FLAG_PVSYNC (1<<2)
|
||||
#define DRM_MODE_FLAG_NVSYNC (1<<3)
|
||||
#define DRM_MODE_FLAG_INTERLACE (1<<4)
|
||||
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
|
||||
#define DRM_MODE_FLAG_CSYNC (1<<6)
|
||||
#define DRM_MODE_FLAG_PCSYNC (1<<7)
|
||||
#define DRM_MODE_FLAG_NCSYNC (1<<8)
|
||||
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
|
||||
#define DRM_MODE_FLAG_BCAST (1<<10)
|
||||
#define DRM_MODE_FLAG_PIXMUX (1<<11)
|
||||
#define DRM_MODE_FLAG_DBLCLK (1<<12)
|
||||
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
|
||||
#define DRM_MODE_FLAG_PHSYNC (1<<0)
|
||||
#define DRM_MODE_FLAG_NHSYNC (1<<1)
|
||||
#define DRM_MODE_FLAG_PVSYNC (1<<2)
|
||||
#define DRM_MODE_FLAG_NVSYNC (1<<3)
|
||||
#define DRM_MODE_FLAG_INTERLACE (1<<4)
|
||||
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
|
||||
#define DRM_MODE_FLAG_CSYNC (1<<6)
|
||||
#define DRM_MODE_FLAG_PCSYNC (1<<7)
|
||||
#define DRM_MODE_FLAG_NCSYNC (1<<8)
|
||||
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
|
||||
#define DRM_MODE_FLAG_BCAST (1<<10)
|
||||
#define DRM_MODE_FLAG_PIXMUX (1<<11)
|
||||
#define DRM_MODE_FLAG_DBLCLK (1<<12)
|
||||
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
|
||||
/*
|
||||
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
|
||||
* (define not exposed to user space).
|
||||
*/
|
||||
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
|
||||
#define DRM_MODE_FLAG_3D_NONE (0<<14)
|
||||
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
|
||||
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
|
||||
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
|
||||
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
|
||||
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
|
||||
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
|
||||
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
|
||||
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
|
||||
|
||||
|
||||
/* DPMS flags */
|
||||
/* bit compatible with the xorg definitions. */
|
||||
|
|
Loading…
Reference in New Issue