Merge tag 'drm-intel-next-2020-07-02' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

drm/i915 features for v5.9

Highlights:
- Rocket Lake (RKL) platform enabling (Matt Roper, Lucas, José, Aditya)

Gem/GT:
- Numerous selftest fixes and improvements (Chris)
- TGL, RKL, EHL workaround updates (Matts Atwood and Roper, Clint, Swathi Dhanavanthri, Chris)
- Retry faulthandlers on ENOSPC to avoid oomkiller (Chris)
- Numerous refactorings and cleanups (Chris)
- Several GT fixes around init/suspend/resume/shutdown (Chris)
- Whitelist CTX_TIMESTAMP register on non-RCS (Chris)
- Track if an engine requires forcewake w/a (Chris)
- Locking improvements (Chris)
- Timeslicing improvements (Chris)
- Add a safety submission flush in the heartbeat (Chris)
- Flush gen3 relocs harder (Chris)
- Discard a misplaced GGTT vma (Chris)
- Reduce relocation paths to async GPU relocations only (Chris)
- It's all build up with no pay off (Chris' own words...)

Display:
- A plethora of DP MST fixes (Imre)
- Implement proper dbuf global state (Ville)
- Consider dbuf bandwidth when calculating CDCLK (Stan)
- FBC fixes and refactoring (Ville)
- PSR fixes and improvements (José, Gwan-gyeong)
- Cursor size fixes (Ville)
- Overlay color and gamma fixes (Ville)
- Fix and improve FSB and HRAWCLK read out (Ville)
- Pre allocate and late cleanup of DSB cmd buffer (Animesh)
- Stop using mode->private_flags (Ville)
- Add plane color encoding support for YCBCR_BT2020 (Kishore Kadiyala)
- Update TGL Type-C DP and DKL HBR and HBR+ vswing tables (José)
- Fix DSI connector init error path (Vivek)
- A plethora of DP vswing/preemph fixes and refactoring (Ville)
- Fix TGL DKL vswing sequence selection (Vandita)
- Fix ICL hotplug interrupt disabling after storm detection (Imre)
- Retry HDCP link integrity check on failure (Oliver Barta)
- Fix TBT DPLL fractional divider (Imre)
- Fix ICL+ HBR3 source rate (Matt Atwood)
- Fix gen2 spurious underruns (Ville)
- Fix potential NULL dereference, some spelling fixes (Colin Ian King)
- Fix NULL dereference on encoder state probe (Chris)

Other:
- Backmerge to get mmap locking API (Jani)
- Distinguish Comet Lake from Coffee Lake (Chris)
- Various compiler warning fixes (Arnd Bergmann, Nathan Chancellor)
- WARN* conversions to drm_WARN* (Pankaj)
- Switch to device specific parameters with debugfs access (Jani)
- Fix agp/intel error path leak (Qiushi Wu)
- Forcewake power optimization (Chris)
- Irq handler optimization (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87wo3lkbxt.fsf@intel.com
This commit is contained in:
Dave Airlie 2020-07-03 11:29:00 +10:00
commit 1cc4af412f
150 changed files with 6039 additions and 3349 deletions

View File

@ -550,6 +550,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_ICL_11_IDS(&gen11_early_ops),
INTEL_EHL_IDS(&gen11_early_ops),
INTEL_TGL_12_IDS(&gen11_early_ops),
INTEL_RKL_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);

View File

@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void)
if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
__free_page(page);
return -EINVAL;
}
intel_private.scratch_page_dma = dma_addr;
} else

View File

@ -5,7 +5,7 @@
* a module parameter. It must be unique and legal for a C identifier.
*
* The function should be of type int function(void). It may be conditionally
* compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
* compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
*
* Tests are executed in order by igt/dmabuf_selftest
*/

View File

@ -78,6 +78,8 @@ gt-y += \
gt/debugfs_engines.o \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
gt/gen2_engine_cs.o \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \

View File

@ -1469,8 +1469,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
pipe_config->hw.adjusted_mode.private_flags |=
I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
}
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
@ -1558,10 +1557,6 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
/* We would not operate in periodic command mode */
pipe_config->hw.adjusted_mode.private_flags &=
~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
/*
* In case of TE GATE cmd mode, we
* receive TE from the slave if
@ -1569,14 +1564,14 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
*/
if (is_cmd_mode(intel_dsi)) {
if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
pipe_config->hw.adjusted_mode.private_flags |=
pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE1 |
I915_MODE_FLAG_DSI_USE_TE0;
else if (intel_dsi->ports == BIT(PORT_B))
pipe_config->hw.adjusted_mode.private_flags |=
pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE1;
else
pipe_config->hw.adjusted_mode.private_flags |=
pipe_config->mode_flags |=
I915_MODE_FLAG_DSI_USE_TE0;
}
@ -1954,6 +1949,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
return;
err:
drm_connector_cleanup(connector);
drm_encoder_cleanup(&encoder->base);
kfree(intel_dsi);
kfree(intel_connector);

View File

@ -249,9 +249,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false;
crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
crtc_state->inherited = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
crtc_state->dsb = NULL;
return &crtc_state->uapi;
}
@ -292,6 +294,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
drm_WARN_ON(crtc->dev, crtc_state->dsb);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
kfree(crtc_state);

View File

@ -479,7 +479,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
index = i915_modparams.vbt_sdvo_panel_type;
index = dev_priv->params.vbt_sdvo_panel_type;
if (index == -2) {
drm_dbg_kms(&dev_priv->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
@ -829,9 +829,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
if (i915_modparams.edp_vswing) {
if (dev_priv->params.edp_vswing) {
dev_priv->vbt.edp.low_vswing =
i915_modparams.edp_vswing == 1;
dev_priv->params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp.low_vswing = vswing == 0;
@ -1619,30 +1619,18 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
static enum port dvo_port_to_port(u8 dvo_port)
static enum port __dvo_port_to_port(int n_ports, int n_dvo,
const int port_mapping[][3], u8 dvo_port)
{
/*
* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
static const int dvo_ports[][3] = {
[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
[PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
[PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
[PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
};
enum port port;
int i;
for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
if (dvo_ports[port][i] == -1)
for (port = PORT_A; port < n_ports; port++) {
for (i = 0; i < n_dvo; i++) {
if (port_mapping[port][i] == -1)
break;
if (dvo_port == dvo_ports[port][i])
if (dvo_port == port_mapping[port][i])
return port;
}
}
@ -1650,6 +1638,48 @@ static enum port dvo_port_to_port(u8 dvo_port)
return PORT_NONE;
}
static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
u8 dvo_port)
{
/*
* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
static const int port_mapping[][3] = {
[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
[PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
[PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
[PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT },
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
[PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
};
/*
* Bspec lists the ports as A, B, C, D - however internally in our
* driver we keep them as PORT_A, PORT_B, PORT_D and PORT_E so the
* registers in Display Engine match the right offsets. Apply the
* mapping here to translate from VBT to internal convention.
*/
static const int rkl_port_mapping[][3] = {
[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
[PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
[PORT_C] = { -1 },
[PORT_D] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
[PORT_E] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
};
if (IS_ROCKETLAKE(dev_priv))
return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
ARRAY_SIZE(rkl_port_mapping[0]),
rkl_port_mapping,
dvo_port);
else
return __dvo_port_to_port(ARRAY_SIZE(port_mapping),
ARRAY_SIZE(port_mapping[0]),
port_mapping,
dvo_port);
}
static void parse_ddi_port(struct drm_i915_private *dev_priv,
struct display_device_data *devdata,
u8 bdb_version)
@ -1659,7 +1689,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
port = dvo_port_to_port(child->dvo_port);
port = dvo_port_to_port(dev_priv, child->dvo_port);
if (port == PORT_NONE)
return;
@ -2603,10 +2633,10 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
aux_ch = AUX_CH_B;
break;
case DP_AUX_C:
aux_ch = AUX_CH_C;
aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_D : AUX_CH_C;
break;
case DP_AUX_D:
aux_ch = AUX_CH_D;
aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_E : AUX_CH_D;
break;
case DP_AUX_E:
aux_ch = AUX_CH_E;

View File

@ -5,12 +5,12 @@
#include <drm/drm_atomic_state_helper.h>
#include "intel_bw.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@ -199,6 +199,12 @@ static const struct intel_sa_info tgl_sa_info = {
.displayrtids = 256,
};
static const struct intel_sa_info rkl_sa_info = {
.deburst = 16,
.deprogbwlimit = 20, /* GB/s */
.displayrtids = 128,
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@ -309,7 +315,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
if (IS_GEN(dev_priv, 12))
if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
else if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
else if (IS_GEN(dev_priv, 11))
icl_get_bw_info(dev_priv, &icl_sa_info);
@ -420,6 +428,141 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state = NULL;
struct intel_bw_state *old_bw_state = NULL;
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int max_bw = 0;
int slice_id;
enum pipe pipe;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
enum plane_id plane_id;
struct intel_dbuf_bw *crtc_bw;
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
if (!crtc_state->hw.active)
continue;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_ddb_entry *uv_plane_alloc =
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
unsigned int data_rate = crtc_state->data_rate[plane_id];
unsigned int dbuf_mask = 0;
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
/*
* FIXME: To calculate that more properly we probably
* need to to split per plane data_rate into data_rate_y
* and data_rate_uv for multiplanar formats in order not
* to get accounted those twice if they happen to reside
* on different slices.
* However for pre-icl this would work anyway because
* we have only single slice and for icl+ uv plane has
* non-zero data rate.
* So in worst case those calculation are a bit
* pessimistic, which shouldn't pose any significant
* problem anyway.
*/
for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
crtc_bw->used_bw[slice_id] += data_rate;
}
}
if (!old_bw_state)
return 0;
for_each_pipe(dev_priv, pipe) {
struct intel_dbuf_bw *crtc_bw;
crtc_bw = &new_bw_state->dbuf_bw[pipe];
for_each_dbuf_slice(slice_id) {
/*
* Current experimental observations show that contrary
* to BSpec we get underruns once we exceed 64 * CDCLK
* for slices in total.
* As a temporary measure in order not to keep CDCLK
* bumped up all the time we calculate CDCLK according
* to this formula for overall bw consumed by slices.
*/
max_bw += crtc_bw->used_bw[slice_id];
}
}
new_bw_state->min_cdclk = max_bw / 64;
if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
return 0;
}
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state = NULL;
struct intel_bw_state *old_bw_state = NULL;
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int min_cdclk = 0;
enum pipe pipe;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
}
if (!old_bw_state)
return 0;
for_each_pipe(dev_priv, pipe) {
struct intel_cdclk_state *cdclk_state;
cdclk_state = intel_atomic_get_new_cdclk_state(state);
if (!cdclk_state)
return 0;
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
}
new_bw_state->min_cdclk = min_cdclk;
if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
return 0;
}
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);

View File

@ -9,14 +9,20 @@
#include <drm/drm_atomic.h>
#include "intel_display.h"
#include "intel_display_power.h"
#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
struct intel_dbuf_bw {
int used_bw[I915_MAX_DBUF_SLICES];
};
struct intel_bw_state {
struct intel_global_state base;
struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
/*
* Contains a bit mask, used to determine, whether correspondent
@ -36,6 +42,8 @@ struct intel_bw_state {
/* bitmask of active pipes */
u8 active_pipes;
int min_cdclk;
};
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
@ -56,5 +64,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state);
int skl_bw_calc_min_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_BW_H__ */

View File

@ -21,7 +21,10 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/time.h>
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
@ -2094,6 +2097,7 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_bw_state *bw_state = NULL;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
@ -2106,6 +2110,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
if (min_cdclk < 0)
return min_cdclk;
bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(bw_state))
return PTR_ERR(bw_state);
if (cdclk_state->min_cdclk[i] == min_cdclk)
continue;
@ -2117,9 +2125,15 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
}
min_cdclk = cdclk_state->force_min_cdclk;
for_each_pipe(dev_priv, pipe)
for_each_pipe(dev_priv, pipe) {
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
if (!bw_state)
continue;
min_cdclk = max(bw_state->min_cdclk, min_cdclk);
}
return min_cdclk;
}
@ -2700,29 +2714,59 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
static int g4x_hrawclk(struct drm_i915_private *dev_priv)
static int i9xx_hrawclk(struct drm_i915_private *dev_priv)
{
u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
clkcfg = intel_de_read(dev_priv, CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100000;
case CLKCFG_FSB_533:
return 133333;
case CLKCFG_FSB_667:
return 166667;
case CLKCFG_FSB_800:
return 200000;
case CLKCFG_FSB_1067:
case CLKCFG_FSB_1067_ALT:
return 266667;
case CLKCFG_FSB_1333:
case CLKCFG_FSB_1333_ALT:
return 333333;
default:
return 133333;
/*
* hrawclock is 1/4 the FSB frequency
*
* Note that this only reads the state of the FSB
* straps, not the actual FSB frequency. Some BIOSen
* let you configure each independently. Ideally we'd
* read out the actual FSB frequency but sadly we
* don't know which registers have that information,
* and all the relevant docs have gone to bit heaven :(
*/
clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK;
if (IS_MOBILE(dev_priv)) {
switch (clkcfg) {
case CLKCFG_FSB_400:
return 100000;
case CLKCFG_FSB_533:
return 133333;
case CLKCFG_FSB_667:
return 166667;
case CLKCFG_FSB_800:
return 200000;
case CLKCFG_FSB_1067:
return 266667;
case CLKCFG_FSB_1333:
return 333333;
default:
MISSING_CASE(clkcfg);
return 133333;
}
} else {
switch (clkcfg) {
case CLKCFG_FSB_400_ALT:
return 100000;
case CLKCFG_FSB_533:
return 133333;
case CLKCFG_FSB_667:
return 166667;
case CLKCFG_FSB_800:
return 200000;
case CLKCFG_FSB_1067_ALT:
return 266667;
case CLKCFG_FSB_1333_ALT:
return 333333;
case CLKCFG_FSB_1600_ALT:
return 400000;
default:
return 133333;
}
}
}
@ -2743,8 +2787,8 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
freq = vlv_hrawclk(dev_priv);
else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
freq = g4x_hrawclk(dev_priv);
else if (INTEL_GEN(dev_priv) >= 3)
freq = i9xx_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
return 0;
@ -2760,25 +2804,30 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) >= 12) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
@ -2787,18 +2836,23 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else
dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = chv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else {
dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}

View File

@ -714,16 +714,16 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@ -731,15 +731,13 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0),
1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1),
1 << 16);
intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2),
1 << 16);
}
intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@ -753,7 +751,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
ivb_load_lut_ext_max(crtc_state);
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@ -761,7 +759,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
ivb_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
ivb_load_lut_ext_max(crtc_state);
}
}
@ -776,7 +774,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
ivb_load_lut_ext_max(crtc_state);
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@ -784,7 +782,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
bdw_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
ivb_load_lut_ext_max(crtc_state);
}
}
@ -877,7 +875,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
ivb_load_lut_ext_max(crtc_state);
}
}
@ -900,14 +898,12 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
intel_dsb_put(dsb);
intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
}
static void
@ -916,7 +912,6 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
int i;
@ -927,19 +922,17 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_ldw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
intel_dsb_put(dsb);
}
static void
@ -949,7 +942,6 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
int i;
@ -963,12 +955,13 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* seg2[0] being unused by the hardware.
*/
intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
@ -986,24 +979,22 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
ivb_load_lut_ext_max(crtc);
intel_dsb_put(dsb);
ivb_load_lut_ext_max(crtc_state);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
@ -1018,11 +1009,10 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
ivb_load_lut_ext_max(crtc_state);
}
intel_dsb_commit(dsb);
intel_dsb_put(dsb);
intel_dsb_commit(crtc_state);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)

View File

@ -181,11 +181,25 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, CHICKEN_MISC_2, val);
}
static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
{
/*
* Some platforms only expect PHY_MISC to be programmed for PHY-A and
* PHY-B and may not even have instances of the register for the
* other combo PHY's.
*/
if (IS_ELKHARTLAKE(i915) ||
IS_ROCKETLAKE(i915))
return phy < PHY_C;
return true;
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
enum phy phy)
{
/* The PHY C added by EHL has no PHY_MISC register */
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
if (!has_phy_misc(dev_priv, phy))
return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
@ -220,6 +234,27 @@ static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
return false;
}
static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
{
/*
* Certain PHYs are connected to compensation resistors and act
* as masters to other PHYs.
*
* ICL,TGL:
* A(master) -> B(slave), C(slave)
* RKL:
* A(master) -> B(slave)
* C(master) -> D(slave)
*
* We must set the IREFGEN bit for any PHY acting as a master
* to another PHY.
*/
if (IS_ROCKETLAKE(dev_priv) && phy == PHY_C)
return true;
return phy == PHY_A;
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
@ -231,7 +266,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
if (phy == PHY_A) {
if (phy_is_master(dev_priv, phy)) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
@ -317,12 +352,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
continue;
}
/*
* Although EHL adds a combo PHY C, there's no PHY_MISC
* register for it and no need to program the
* DE_IO_COMP_PWR_DOWN setting on PHY C.
*/
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
/*
@ -347,7 +377,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
skip_phy_misc:
cnl_set_procmon_ref_values(dev_priv, phy);
if (phy == PHY_A) {
if (phy_is_master(dev_priv, phy)) {
val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
@ -376,12 +406,7 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
"Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
/*
* Although EHL adds a combo PHY C, there's no PHY_MISC
* register for it and no need to program the
* DE_IO_COMP_PWR_DOWN setting on PHY C.
*/
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
if (!has_phy_misc(dev_priv, phy))
goto skip_phy_misc;
val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));

View File

@ -833,7 +833,7 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
if (i915_modparams.load_detect_test) {
if (dev_priv->params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
@ -889,7 +889,7 @@ load_detect:
else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
else if (i915_modparams.load_detect_test)
else if (dev_priv->params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;

View File

@ -40,6 +40,10 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin"
#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
MODULE_FIRMWARE(RKL_CSR_PATH);
#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
@ -682,7 +686,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
if (IS_ROCKETLAKE(dev_priv)) {
csr->fw_path = RKL_CSR_PATH;
csr->required_version = RKL_CSR_VERSION_REQUIRED;
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
} else if (INTEL_GEN(dev_priv) >= 12) {
csr->fw_path = TGL_CSR_PATH;
csr->required_version = TGL_CSR_VERSION_REQUIRED;
/* Allow to load fw via parameter using the last known size */
@ -699,7 +707,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->fw_path = GLK_CSR_PATH;
csr->required_version = GLK_CSR_VERSION_REQUIRED;
csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
} else if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv)) {
csr->fw_path = KBL_CSR_PATH;
csr->required_version = KBL_CSR_VERSION_REQUIRED;
csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
@ -713,15 +723,15 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
}
if (i915_modparams.dmc_firmware_path) {
if (strlen(i915_modparams.dmc_firmware_path) == 0) {
if (dev_priv->params.dmc_firmware_path) {
if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
drm_info(&dev_priv->drm,
"Disabling CSR firmware and runtime PM\n");
return;
}
csr->fw_path = i915_modparams.dmc_firmware_path;
csr->fw_path = dev_priv->params.dmc_firmware_path;
/* Bypass version check for firmware override. */
csr->required_version = 0;
}

View File

@ -639,11 +639,25 @@ struct tgl_dkl_phy_ddi_buf_trans {
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
{ 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
{ 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
{ 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
{ 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
{ 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */
{ 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
{ 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
{ 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
{ 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
{ 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
{ 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
};
static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
{ 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */
{ 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */
{ 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
{ 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
{ 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
{ 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */
{ 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
{ 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
{ 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
@ -722,10 +736,14 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) {
if (IS_KBL_ULX(dev_priv) ||
IS_CFL_ULX(dev_priv) ||
IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
} else if (IS_KBL_ULT(dev_priv) ||
IS_CFL_ULT(dev_priv) ||
IS_CML_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp);
return kbl_u_ddi_translations_dp;
} else {
@ -738,12 +756,16 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
IS_CFL_ULX(dev_priv)) {
if (IS_SKL_ULX(dev_priv) ||
IS_KBL_ULX(dev_priv) ||
IS_CFL_ULX(dev_priv) ||
IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
IS_CFL_ULT(dev_priv)) {
} else if (IS_SKL_ULT(dev_priv) ||
IS_KBL_ULT(dev_priv) ||
IS_CFL_ULT(dev_priv) ||
IS_CML_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
return skl_u_ddi_translations_edp;
} else {
@ -752,7 +774,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv))
return kbl_get_buf_trans_dp(dev_priv, n_entries);
else
return skl_get_buf_trans_dp(dev_priv, n_entries);
@ -761,8 +785,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
IS_CFL_ULX(dev_priv)) {
if (IS_SKL_ULX(dev_priv) ||
IS_KBL_ULX(dev_priv) ||
IS_CFL_ULX(dev_priv) ||
IS_CML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@ -784,7 +810,9 @@ static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
enum port port, int *n_entries)
{
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv)) {
const struct ddi_buf_trans *ddi_translations =
kbl_get_buf_trans_dp(dev_priv, n_entries);
*n_entries = skl_buf_trans_num_entries(port, *n_entries);
@ -1014,6 +1042,22 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return tgl_combo_phy_ddi_translations_dp_hbr;
}
static const struct tgl_dkl_phy_ddi_buf_trans *
tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
return tgl_dkl_phy_hdmi_ddi_trans;
} else if (rate > 270000) {
*n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2);
return tgl_dkl_phy_dp_ddi_trans_hbr2;
}
*n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
return tgl_dkl_phy_dp_ddi_trans;
}
static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -1025,7 +1069,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
&n_entries);
default_entry = n_entries - 1;
} else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
@ -1608,7 +1653,6 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
if (INTEL_GEN(dev_priv) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
@ -1626,10 +1670,9 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
}
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
/*
@ -2095,10 +2138,10 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
ddi_translations[level].deemphasis);
}
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
@ -2108,7 +2151,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
tgl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
tgl_get_dkl_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
} else if (INTEL_GEN(dev_priv) == 11) {
if (IS_ELKHARTLAKE(dev_priv))
ehl_get_combo_buf_trans(dev_priv, encoder->type,
@ -2151,19 +2195,9 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
* used on all DDI platforms. Should that change we need to
* rethink this code.
*/
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
{
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
@ -2585,15 +2619,17 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
int rate = 0;
if (type == INTEL_OUTPUT_HDMI) {
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
} else {
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
ddi_translations = tgl_dkl_phy_dp_ddi_trans;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
rate = intel_dp->link_rate;
}
ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate,
&n_entries);
if (level >= n_entries)
level = n_entries - 1;
@ -3344,11 +3380,10 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int level = intel_ddi_hdmi_level(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, crtc_state);
@ -3375,9 +3410,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
intel_ddi_enable_pipe_clock(encoder, crtc_state);
intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
@ -4155,11 +4190,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
if (INTEL_GEN(dev_priv) >= 12) {
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
}
intel_dsc_get_config(encoder, pipe_config);
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
@ -4261,6 +4291,16 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
if (INTEL_GEN(dev_priv) >= 12) {
enum transcoder transcoder =
intel_dp_mst_is_slave_trans(pipe_config) ?
pipe_config->mst_master_transcoder :
pipe_config->cpu_transcoder;
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
}
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@ -4523,6 +4563,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
else
intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
if (INTEL_GEN(dev_priv) < 12) {
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);

View File

@ -42,9 +42,6 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
u32 ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);

View File

@ -35,6 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@ -3822,6 +3823,17 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
return true;
}
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
int x = 0, y = 0;
intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
plane_state->color_plane[0].offset, 0);
return y;
}
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
@ -4812,11 +4824,18 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
switch (plane_state->hw.color_encoding) {
case DRM_COLOR_YCBCR_BT709:
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
break;
case DRM_COLOR_YCBCR_BT2020:
plane_color_ctl |=
PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
break;
default:
plane_color_ctl |=
PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
}
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
@ -4879,7 +4898,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
if (!i915_modparams.force_reset_modeset_test &&
if (!dev_priv->params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@ -6424,8 +6443,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* We can't read out IPS on broadwell, assume the worst and
* forcibly enable IPS on the first fastset.
*/
if (new_crtc_state->update_pipe &&
old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
if (new_crtc_state->update_pipe && old_crtc_state->inherited)
return true;
return !old_crtc_state->ips_enabled;
@ -7212,30 +7230,33 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
if (IS_ELKHARTLAKE(dev_priv))
else if (IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
else if (IS_ELKHARTLAKE(dev_priv))
return phy <= PHY_C;
if (INTEL_GEN(dev_priv) >= 11)
else if (INTEL_GEN(dev_priv) >= 11)
return phy <= PHY_B;
return false;
else
return false;
}
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
if (INTEL_GEN(dev_priv) >= 12)
if (IS_ROCKETLAKE(dev_priv))
return false;
else if (INTEL_GEN(dev_priv) >= 12)
return phy >= PHY_D && phy <= PHY_I;
if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
return false;
else
return false;
}
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
{
if (IS_ELKHARTLAKE(i915) && port == PORT_D)
if (IS_ROCKETLAKE(i915) && port >= PORT_D)
return (enum phy)port - 1;
else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
return PHY_A;
return (enum phy)port;
@ -7506,6 +7527,10 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
/* prevents spurious underruns */
if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
@ -7579,6 +7604,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
to_intel_bw_state(dev_priv->bw_obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(dev_priv->cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
@ -7652,6 +7679,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
dbuf_state->active_pipes &= ~BIT(pipe);
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
}
@ -7869,7 +7898,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
if (!i915_modparams.enable_ips)
if (!dev_priv->params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@ -8140,8 +8169,8 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
return i915_modparams.panel_use_ssc != 0;
if (dev_priv->params.panel_use_ssc >= 0)
return dev_priv->params.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@ -10882,7 +10911,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
unsigned long panel_transcoder_mask = 0;
unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
intel_wakeref_t wf;
@ -10892,9 +10921,6 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
panel_transcoder_mask |= BIT(TRANSCODER_EDP);
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@ -10905,9 +10931,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
for_each_set_bit(panel_transcoder,
&panel_transcoder_mask,
ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
panel_transcoder_mask) {
bool force_thru = false;
enum pipe trans_pipe;
@ -12499,7 +12524,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
if (!icl_is_nv12_y_plane(linked->id))
if (!icl_is_nv12_y_plane(dev_priv, linked->id))
continue;
if (crtc_state->active_planes & BIT(linked->id))
@ -12545,6 +12570,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
else if (linked->id == PLANE_SPRITE4)
plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
else if (linked->id == PLANE_SPRITE3)
plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
else if (linked->id == PLANE_SPRITE2)
plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
else
MISSING_CASE(linked->id);
}
@ -12568,12 +12597,15 @@ static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int linetime_wm;
if (!crtc_state->hw.enable)
return 0;
return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
return min(linetime_wm, 0x1ff);
}
static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
@ -12581,12 +12613,15 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int linetime_wm;
if (!crtc_state->hw.enable)
return 0;
return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
cdclk_state->logical.cdclk);
linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
cdclk_state->logical.cdclk);
return min(linetime_wm, 0x1ff);
}
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
@ -12595,7 +12630,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
u16 linetime_wm;
int linetime_wm;
if (!crtc_state->hw.enable)
return 0;
@ -12607,7 +12642,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
linetime_wm /= 2;
return linetime_wm;
return min(linetime_wm, 0x1ff);
}
static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
@ -13570,8 +13605,8 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
static bool fastboot_enabled(struct drm_i915_private *dev_priv)
{
if (i915_modparams.fastboot != -1)
return i915_modparams.fastboot;
if (dev_priv->params.fastboot != -1)
return dev_priv->params.fastboot;
/* Enable fastboot by default on Skylake and newer */
if (INTEL_GEN(dev_priv) >= 9)
@ -13595,8 +13630,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool ret = true;
u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
(current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
current_config->inherited && !pipe_config->inherited;
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
@ -14007,10 +14041,10 @@ static void verify_wm_state(struct intel_crtc *crtc,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
hw_enabled_slices != dev_priv->dbuf.enabled_slices)
drm_err(&dev_priv->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
dev_priv->enabled_dbuf_slices_mask,
dev_priv->dbuf.enabled_slices,
hw_enabled_slices);
/* planes */
@ -14404,6 +14438,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
crtc->mode_flags = crtc_state->mode_flags;
/*
* The scanline counter increments at the leading edge of hsync.
*
@ -14551,20 +14587,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
state->modeset = true;
state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
if (state->active_pipe_changes) {
if (state->active_pipes != dev_priv->active_pipes) {
ret = _intel_atomic_lock_global_state(state);
if (ret)
return ret;
}
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
return hsw_mode_set_planes_workaround(state);
@ -14641,11 +14669,10 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
/* See {hsw,vlv,ivb}_plane_ratio() */
return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_IVYBRIDGE(dev_priv);
IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
}
static int intel_atomic_check_planes(struct intel_atomic_state *state,
bool *need_cdclk_calc)
static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@ -14687,7 +14714,13 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
if (hweight8(old_active_planes) == hweight8(new_active_planes))
/*
* Not only the number of planes, but if the plane configuration had
* changed might already mean we need to recompute min CDCLK,
* because different planes might consume different amount of Dbuf bandwidth
* according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
*/
if (old_active_planes == new_active_planes)
continue;
ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
@ -14695,6 +14728,21 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
return ret;
}
return 0;
}
static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_cdclk_state *new_cdclk_state;
struct intel_plane_state *plane_state;
struct intel_bw_state *new_bw_state;
struct intel_plane *plane;
int min_cdclk = 0;
enum pipe pipe;
int ret;
int i;
/*
* active_planes bitmask has been updated, and potentially
* affected planes are part of the state. We can now
@ -14706,6 +14754,30 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
return ret;
}
new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
*need_cdclk_calc = true;
ret = dev_priv->display.bw_calc_min_cdclk(state);
if (ret)
return ret;
new_bw_state = intel_atomic_get_new_bw_state(state);
if (!new_cdclk_state || !new_bw_state)
return 0;
for_each_pipe(dev_priv, pipe) {
min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
/*
* Currently do this change only if we need to increase
*/
if (new_bw_state->min_cdclk > min_cdclk)
*need_cdclk_calc = true;
}
return 0;
}
@ -14757,16 +14829,13 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_cdclk_state *new_cdclk_state;
struct intel_crtc *crtc;
int ret, i;
bool any_ms = false;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (new_crtc_state->uapi.mode.private_flags !=
old_crtc_state->uapi.mode.private_flags)
if (new_crtc_state->inherited != old_crtc_state->inherited)
new_crtc_state->uapi.mode_changed = true;
}
@ -14868,14 +14937,10 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
ret = intel_atomic_check_planes(state, &any_ms);
ret = intel_atomic_check_planes(state);
if (ret)
goto fail;
new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
any_ms = true;
/*
* distrust_bios_wm will force a full dbuf recomputation
* but the hardware state will only get updated accordingly
@ -14896,10 +14961,6 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
}
ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@ -14909,6 +14970,22 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
ret = intel_atomic_check_cdclk(state, &any_ms);
if (ret)
goto fail;
if (any_ms) {
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
intel_modeset_clear_plls(state);
}
ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state) &&
@ -14939,8 +15016,24 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
return drm_atomic_helper_prepare_planes(state->base.dev,
&state->base);
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
int i, ret;
ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
if (ret < 0)
return ret;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
bool mode_changed = needs_modeset(crtc_state);
if (mode_changed || crtc_state->update_pipe ||
crtc_state->uapi.color_mgmt_changed) {
intel_dsb_prepare(crtc_state);
}
}
return 0;
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@ -15112,7 +15205,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@ -15204,29 +15297,6 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
}
}
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
u8 required_slices = state->enabled_dbuf_slices_mask;
u8 slices_union = hw_enabled_slices | required_slices;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, slices_union);
}
static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
u8 required_slices = state->enabled_dbuf_slices_mask;
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, required_slices);
}
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@ -15393,15 +15463,27 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
&wait_reset);
}
static void intel_cleanup_dsbs(struct intel_atomic_state *state)
{
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i)
intel_dsb_cleanup(old_crtc_state);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct drm_atomic_state *state =
container_of(work, struct drm_atomic_state, commit_work);
struct drm_i915_private *i915 = to_i915(state->dev);
struct intel_atomic_state *state =
container_of(work, struct intel_atomic_state, base.commit_work);
struct drm_i915_private *i915 = to_i915(state->base.dev);
drm_atomic_helper_cleanup_planes(&i915->drm, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
intel_cleanup_dsbs(state);
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
intel_atomic_helper_free_state(i915);
}
@ -15467,9 +15549,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_encoders_update_prepare(state);
/* Enable all new slices, we might need */
if (state->modeset)
icl_dbuf_slice_pre_update(state);
intel_dbuf_pre_plane_update(state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.commit_modeset_enables(state);
@ -15524,9 +15604,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
dev_priv->display.optimize_watermarks(state, crtc);
}
/* Disable all slices, we don't need */
if (state->modeset)
icl_dbuf_slice_post_update(state);
intel_dbuf_post_plane_update(state);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@ -15535,6 +15613,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
modeset_put_power_domains(dev_priv, put_domains[i]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
/*
* DSB cleanup is done in cleanup_work aligning with framebuffer
* cleanup. So copy and reset the dsb structure to sync with
* commit_done and later do dsb cleanup in cleanup_work.
*/
old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
}
/* Underruns don't always raise interrupts, so check manually */
@ -15684,8 +15769,15 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_swap_global_state(state);
if (ret) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
i915_sw_fence_commit(&state->commit_ready);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_dsb_cleanup(new_crtc_state);
drm_atomic_helper_cleanup_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@ -16405,6 +16497,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
if (INTEL_GEN(dev_priv) >= 12)
drm_plane_enable_fb_damage_clips(&cursor->base);
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
@ -16745,7 +16840,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
if (INTEL_GEN(dev_priv) >= 12) {
if (IS_ROCKETLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */
intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */
} else if (INTEL_GEN(dev_priv) >= 12) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_D);
@ -17420,23 +17520,35 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->dbuf.obj.state);
intel_update_cdclk(i915);
intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_crtc *crtc;
struct intel_crtc *crtc;
drm_for_each_crtc(crtc, state->dev) {
struct drm_crtc_state *crtc_state;
for_each_intel_crtc(state->dev, crtc) {
struct intel_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
crtc_state = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (crtc_state->hw.active) {
/*
* Preserve the inherited flag to avoid
* taking the full modeset path.
*/
crtc_state->inherited = true;
}
}
drm_for_each_plane(plane, state->dev) {
@ -17578,6 +17690,15 @@ retry:
}
if (crtc_state->hw.active) {
/*
* We've not yet detected sink capabilities
* (audio,infoframes,etc.) and thus we don't want to
* force a full state recomputation yet. We want that to
* happen only for the first real commit from userspace.
* So preserve the inherited flag for the time being.
*/
crtc_state->inherited = true;
ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@ -17665,7 +17786,8 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
if (IS_I845G(i915) || IS_I865G(i915)) {
mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
mode_config->cursor_height = 1023;
} else if (IS_GEN(i915, 2)) {
} else if (IS_I830(i915) || IS_I85X(i915) ||
IS_I915G(i915) || IS_I915GM(i915)) {
mode_config->cursor_width = 64;
mode_config->cursor_height = 64;
} else {
@ -17711,6 +17833,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
return ret;
ret = intel_dbuf_init(i915);
if (ret)
return ret;
ret = intel_bw_init(i915);
if (ret)
return ret;
@ -18227,6 +18353,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(dev_priv->cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(dev_priv->dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@ -18257,7 +18385,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
enableddisabled(crtc_state->hw.active));
}
dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
dev_priv->active_pipes = cdclk_state->active_pipes =
dbuf_state->active_pipes = active_pipes;
readout_plane_state(dev_priv);
@ -18348,7 +18477,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
mode->private_flags = I915_MODE_FLAG_INHERITED;
crtc_state->inherited = true;
intel_crtc_compute_pixel_rate(crtc_state);

View File

@ -187,6 +187,13 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
#define for_each_dbuf_slice_in_mask(__slice, __mask) \
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
for_each_if((BIT(__slice)) & (__mask))
#define for_each_dbuf_slice(__slice) \
for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1)
enum port {
PORT_NONE = -1,
@ -608,6 +615,7 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);

View File

@ -125,7 +125,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
yesno(dev_priv->params.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@ -2218,7 +2218,8 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
}
if (INTEL_GEN(dev_priv) >= 10 &&
(connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
!to_intel_connector(connector)->mst_port) ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP))
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
connector, &i915_dsc_fec_support_fops);

View File

@ -1161,7 +1161,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
drm_WARN(&dev_priv->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
@ -1943,22 +1943,29 @@ static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
return !WARN_ON(power_domains->async_put_domains[0] &
power_domains->async_put_domains[1]);
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
power_domains->async_put_domains[1]);
}
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
err |= WARN_ON(!!power_domains->async_put_wakeref !=
!!__async_put_domains_mask(power_domains));
err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
!!__async_put_domains_mask(power_domains));
for_each_power_domain(domain, __async_put_domains_mask(power_domains))
err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
err |= drm_WARN_ON(&i915->drm,
power_domains->domain_use_count[domain] != 1);
return !err;
}
@ -2200,11 +2207,14 @@ static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref)
{
WARN_ON(power_domains->async_put_wakeref);
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
WARN_ON(!queue_delayed_work(system_unbound_wq,
&power_domains->async_put_work,
msecs_to_jiffies(100)));
drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
&power_domains->async_put_work,
msecs_to_jiffies(100)));
}
static void
@ -2913,6 +2923,53 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
#define RKL_PW_4_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define RKL_PW_3_POWER_DOMAINS ( \
RKL_PW_4_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
* There is no PW_2/PG_2 on RKL.
*
* RKL PW_1/PG_1 domains (under HW/DMC control):
* - DBUF function (note: registers are in PW0)
* - PIPE_A and its planes and VDSC/joining, except VGA
* - transcoder A
* - DDI_A and DDI_B
* - FBC
*
* RKL PW_0/PG_0 domains (under HW/DMC control):
* - PCI
* - clocks except port PLL
* - shared functions:
* * interrupts except pipe interrupts
* * MBus except PIPE_MBUS_DBOX_CTL
* * DBUF registers
* - central power except FBC
* - top-level GTC (DDI-level GTC is in the well associated with the DDI)
*/
#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
RKL_PW_3_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@ -4283,6 +4340,140 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
};
static const struct i915_power_well_desc rkl_power_wells[] = {
{
.name = "always-on",
.always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
{
.name = "DC off",
.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.id = SKL_DISP_DC_OFF,
},
{
.name = "power well 3",
.domains = RKL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = ICL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
.hsw.irq_pipe_mask = BIT(PIPE_B),
.hsw.has_vga = true,
.hsw.has_fuses = true,
},
},
{
.name = "power well 4",
.domains = RKL_PW_4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_4,
.hsw.has_fuses = true,
.hsw.irq_pipe_mask = BIT(PIPE_C),
}
},
{
.name = "DDI A IO",
.domains = ICL_DDI_IO_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
}
},
{
.name = "DDI B IO",
.domains = ICL_DDI_IO_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
}
},
{
.name = "DDI D TC1 IO",
.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
},
},
{
.name = "DDI E TC2 IO",
.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
},
},
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
},
},
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
},
},
{
.name = "AUX D TC1",
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
},
},
{
.name = "AUX E TC2",
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
.ops = &icl_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
},
},
};
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@ -4322,7 +4513,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
mask = 0;
}
if (!i915_modparams.disable_power_well)
if (!dev_priv->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@ -4365,6 +4556,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
const struct i915_power_well_desc *power_well_descs,
int power_well_count)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
u64 power_well_ids = 0;
int i;
@ -4384,8 +4578,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
if (id == DISP_PW_ID_NONE)
continue;
WARN_ON(id >= sizeof(power_well_ids) * 8);
WARN_ON(power_well_ids & BIT_ULL(id));
drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
@ -4408,11 +4602,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int err;
i915_modparams.disable_power_well =
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
i915_modparams.disable_power_well);
dev_priv->params.disable_power_well);
dev_priv->csr.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
dev_priv->csr.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
@ -4428,7 +4622,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
if (IS_GEN(dev_priv, 12)) {
if (IS_ROCKETLAKE(dev_priv)) {
err = set_power_wells(power_domains, rkl_power_wells);
} else if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
@ -4491,45 +4687,38 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool enable)
static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
enum dbuf_slice slice, bool enable)
{
u32 val, status;
i915_reg_t reg = DBUF_CTL_S(slice);
bool state;
u32 val;
val = intel_de_read(dev_priv, reg);
val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
if (enable)
val |= DBUF_POWER_REQUEST;
else
val &= ~DBUF_POWER_REQUEST;
intel_de_write(dev_priv, reg, val);
intel_de_posting_read(dev_priv, reg);
udelay(10);
status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
if ((enable && !status) || (!enable && status)) {
drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
enable ? "enable" : "disable");
return false;
}
return true;
state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
drm_WARN(&dev_priv->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
slice, enable ? "enable" : "disable");
}
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
icl_dbuf_slices_update(dev_priv, 0);
}
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
int i;
int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
enum dbuf_slice slice;
drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
"Invalid number of dbuf slices requested\n");
drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
"Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
req_slices, num_slices);
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
@ -4543,36 +4732,36 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
for (i = 0; i < max_slices; i++) {
intel_dbuf_slice_set(dev_priv,
DBUF_CTL_S(i),
(req_slices & BIT(i)) != 0);
}
for (slice = DBUF_S1; slice < num_slices; slice++)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
dev_priv->enabled_dbuf_slices_mask = req_slices;
dev_priv->dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
skl_ddb_get_hw_state(dev_priv);
dev_priv->dbuf.enabled_slices =
intel_enabled_dbuf_slices_mask(dev_priv);
/*
* Just power up at least 1 slice, we will
* figure out later which slices we have and what we need.
*/
icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
BIT(DBUF_S1));
gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
dev_priv->dbuf.enabled_slices);
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
icl_dbuf_slices_update(dev_priv, 0);
gen9_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
u32 mask, val;
unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
u32 mask, val, i;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
@ -4583,11 +4772,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1);
intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
if (INTEL_GEN(dev_priv) >= 12) {
intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
}
/*
* gen12 platforms that use abox1 and abox2 for pixel data reads still
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
if (IS_GEN(dev_priv, 12))
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
@ -5066,7 +5260,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
int i;
unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
int config, i;
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
@ -5074,29 +5269,27 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
else
table = tgl_buddy_page_masks;
for (i = 0; table[i].page_mask != 0; i++)
if (table[i].num_channels == num_channels &&
table[i].type == type)
for (config = 0; table[config].page_mask != 0; config++)
if (table[config].num_channels == num_channels &&
table[config].type == type)
break;
if (table[i].page_mask == 0) {
if (table[config].page_mask == 0) {
drm_dbg(&dev_priv->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
intel_de_write(dev_priv, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
table[i].page_mask);
intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
table[i].page_mask);
for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
/* Wa_22010178259:tgl */
intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
BW_BUDDY_TLB_REQ_TIMER_MASK,
REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
BW_BUDDY_TLB_REQ_TIMER_MASK,
REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
/* Wa_22010178259:tgl,rkl */
intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
}
}
}
@ -5105,6 +5298,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@ -5127,7 +5321,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_cdclk_init_hw(dev_priv);
/* 5. Enable DBUF. */
icl_dbuf_enable(dev_priv);
gen9_dbuf_enable(dev_priv);
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
@ -5138,6 +5332,13 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
/* Wa_14011508470 */
if (IS_GEN(dev_priv, 12)) {
val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
}
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
@ -5150,7 +5351,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
icl_dbuf_disable(dev_priv);
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
@ -5375,7 +5576,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
if (!i915_modparams.disable_power_well)
if (!i915->params.disable_power_well)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(i915);
@ -5399,7 +5600,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
fetch_and_zero(&i915->power_domains.wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915_modparams.disable_power_well)
if (!i915->params.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_display_power_flush_work_sync(i915);
@ -5488,7 +5689,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915_modparams.disable_power_well)
if (!i915->params.disable_power_well)
intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
intel_display_power_flush_work(i915);

View File

@ -314,15 +314,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
enum dbuf_slice {
DBUF_S1,
DBUF_S2,
I915_MAX_DBUF_SLICES
};
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,

View File

@ -479,16 +479,6 @@ struct intel_atomic_state {
bool dpll_set, modeset;
/*
* Does this transaction change the pipes that are active? This mask
* tracks which CRTC's have changed their active state at the end of
* the transaction (not counting the temporary disable during modesets).
* This mask should only be non-zero when intel_state->modeset is true,
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
u8 active_pipe_changes;
u8 active_pipes;
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@ -506,9 +496,6 @@ struct intel_atomic_state {
*/
bool global_state_changed;
/* Number of enabled DBuf slices */
u8 enabled_dbuf_slices_mask;
struct i915_sw_fence commit_ready;
struct llist_node freed;
@ -643,8 +630,7 @@ struct intel_crtc_scaler_state {
int scaler_id;
};
/* drm_mode->private_flags */
#define I915_MODE_FLAG_INHERITED (1<<0)
/* {crtc,crtc_state}->mode_flags */
/* Flag to get scanline using frame time stamps */
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
/* Flag to use the scanline counter instead of the pixel counter */
@ -841,6 +827,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
bool inherited; /* state inherited from BIOS? */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@ -956,6 +943,9 @@ struct intel_crtc_state {
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
/* I915_MODE_FLAG_* */
u8 mode_flags;
u8 lane_count;
/*
@ -1080,6 +1070,9 @@ struct intel_crtc_state {
/* Only valid on TGL+ */
enum transcoder mst_master_transcoder;
/* For DSB related info */
struct intel_dsb *dsb;
};
enum intel_pipe_crc_source {
@ -1118,6 +1111,10 @@ struct intel_crtc {
*/
bool active;
u8 plane_ids_mask;
/* I915_MODE_FLAG_* */
u8 mode_flags;
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
@ -1149,9 +1146,6 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
/* per pipe DSB related info */
struct intel_dsb dsb;
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc;
#endif
@ -1373,6 +1367,9 @@ struct intel_dp {
void (*set_idle_link_train)(struct intel_dp *intel_dp);
void (*set_signal_levels)(struct intel_dp *intel_dp);
u8 (*preemph_max)(struct intel_dp *intel_dp);
u8 (*voltage_max)(struct intel_dp *intel_dp);
/* Displayport compliance testing */
struct intel_dp_compliance compliance;

View File

@ -137,6 +137,8 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*
* This function is not safe to use prior to encoder type being set.
*/
bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
@ -409,7 +411,10 @@ static int intel_dp_rate_index(const int *rates, int len, int rate)
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
drm_WARN_ON(&i915->drm,
!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
intel_dp->num_source_rates,
@ -418,7 +423,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates);
/* Paranoia, there should always be something in common. */
if (WARN_ON(intel_dp->num_common_rates == 0)) {
if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
@ -465,6 +470,15 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int index;
/*
* TODO: Enable fallback on MST links once MST link compute can handle
* the fallback params.
*/
if (intel_dp->is_mst) {
drm_err(&i915->drm, "Link Training Unsuccessful\n");
return -1;
}
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@ -1555,6 +1569,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
@ -1568,10 +1583,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 2; /* 0 or 1 data bytes */
if (WARN_ON(txsize > 20))
if (drm_WARN_ON(&i915->drm, txsize > 20))
return -E2BIG;
WARN_ON(!msg->buffer != !msg->size);
drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
@ -1596,7 +1611,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
if (WARN_ON(rxsize > 20))
if (drm_WARN_ON(&i915->drm, rxsize > 20))
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@ -1871,10 +1886,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int len;
len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
if (WARN_ON(len <= 0))
if (drm_WARN_ON(&i915->drm, len <= 0))
return 162000;
return intel_dp->common_rates[len - 1];
@ -1882,10 +1898,11 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp)
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i = intel_dp_rate_index(intel_dp->sink_rates,
intel_dp->num_sink_rates, rate);
if (WARN_ON(i < 0))
if (drm_WARN_ON(&i915->drm, i < 0))
i = 0;
return i;
@ -3984,70 +4001,24 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATU
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/* These are source-specific values. */
u8
intel_dp_voltage_max(struct intel_dp *intel_dp)
static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum port port = encoder->port;
if (HAS_DDI(dev_priv))
return intel_ddi_dp_voltage_max(encoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
u8
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum port port = encoder->port;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
}
if (HAS_DDI(dev_priv)) {
return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
}
static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
{
return DP_TRAIN_PRE_EMPH_LEVEL_2;
}
static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
{
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
static void vlv_set_signal_levels(struct intel_dp *intel_dp)
@ -4330,6 +4301,7 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return EDP_LINK_TRAIN_400MV_6DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
@ -4746,7 +4718,9 @@ intel_dp_sink_can_mst(struct intel_dp *intel_dp)
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
return i915_modparams.enable_dp_mst &&
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return i915->params.enable_dp_mst &&
intel_dp->can_mst &&
intel_dp_sink_can_mst(intel_dp);
}
@ -4763,13 +4737,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
yesno(intel_dp->can_mst), yesno(sink_can_mst),
yesno(i915_modparams.enable_dp_mst));
yesno(i915->params.enable_dp_mst));
if (!intel_dp->can_mst)
return;
intel_dp->is_mst = sink_can_mst &&
i915_modparams.enable_dp_mst;
i915->params.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@ -5595,35 +5569,46 @@ update_status:
"Could not write test response to sink\n");
}
static int
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
* @intel_dp: Intel DP struct
*
* Read any pending MST interrupts, call MST core to handle these and ack the
* interrupts. Check if the main and AUX link state is ok.
*
* Returns:
* - %true if pending interrupts were serviced (or no interrupts were
* pending) w/o detecting an error condition.
* - %false if an error condition - like AUX failure or a loss of link - is
* detected, which needs servicing from the hotplug work.
*/
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool need_retrain = false;
bool link_ok = true;
if (!intel_dp->is_mst)
return -EINVAL;
WARN_ON_ONCE(intel_dp->active_mst_links < 0);
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[DP_DPRX_ESI_LEN] = {};
bool bret, handled;
bool handled;
int retry;
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
if (!bret) {
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
drm_dbg_kms(&i915->drm,
"failed to get ESI - device may have failed\n");
return -EINVAL;
link_ok = false;
break;
}
/* check link status - esi[10] = 0x200c */
if (intel_dp->active_mst_links > 0 && !need_retrain &&
if (intel_dp->active_mst_links > 0 && link_ok &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
drm_dbg_kms(&i915->drm,
"channel EQ not ok, retraining\n");
need_retrain = true;
link_ok = false;
}
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
@ -5643,7 +5628,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
}
}
return need_retrain;
return link_ok;
}
static bool
@ -5966,7 +5951,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
u8 *dpcd = intel_dp->dpcd;
u8 type;
if (WARN_ON(intel_dp_is_edp(intel_dp)))
if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
if (lspcon->active)
@ -6191,7 +6176,17 @@ intel_dp_detect(struct drm_connector *connector,
goto out;
}
if (intel_dp->reset_link_params) {
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
if (INTEL_GEN(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
intel_dp_configure_mst(intel_dp);
/*
* TODO: Reset link params when switching to MST mode, until MST
* supports link training fallback params.
*/
if (intel_dp->reset_link_params || intel_dp->is_mst) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@ -6203,12 +6198,6 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_print_rates(intel_dp);
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
if (INTEL_GEN(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
intel_dp_configure_mst(intel_dp);
if (intel_dp->is_mst) {
/*
* If we are in MST mode then this connector
@ -7294,35 +7283,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
}
if (intel_dp->is_mst) {
switch (intel_dp_check_mst_status(intel_dp)) {
case -EINVAL:
/*
* If we were in MST mode, and device is not
* there, get out of MST mode
*/
drm_dbg_kms(&i915->drm,
"MST device may have disappeared %d vs %d\n",
intel_dp->is_mst,
intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
return IRQ_NONE;
case 1:
return IRQ_NONE;
default:
break;
}
}
if (!intel_dp->is_mst) {
bool handled;
handled = intel_dp_short_pulse(intel_dp);
if (!handled)
if (!intel_dp_check_mst_status(intel_dp))
return IRQ_NONE;
} else if (!intel_dp_short_pulse(intel_dp)) {
return IRQ_NONE;
}
return IRQ_HANDLED;
@ -8195,8 +8159,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
intel_dp->reset_link_params = true;
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
@ -8212,28 +8174,22 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
*/
drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) &&
port != PORT_B && port != PORT_C))
return false;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
}
intel_dp_set_source_rates(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
* for DP the encoder type can be set by the caller to
* INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
*/
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) &&
intel_dp_is_edp(intel_dp) &&
port != PORT_B && port != PORT_C))
return false;
drm_dbg_kms(&dev_priv->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
@ -8366,6 +8322,15 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
else
intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
(HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2;
}
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);

View File

@ -92,10 +92,6 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
u8
intel_dp_voltage_max(struct intel_dp *intel_dp);
u8
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);

View File

@ -348,7 +348,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (i915_modparams.enable_dpcd_backlight == 0 ||
if (i915->params.enable_dpcd_backlight == 0 ||
!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
@ -358,7 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
*/
if (i915->vbt.backlight.type !=
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
i915_modparams.enable_dpcd_backlight != 1 &&
i915->params.enable_dpcd_backlight != 1 &&
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
drm_info(&i915->drm,

View File

@ -34,6 +34,21 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
static u8 dp_voltage_max(u8 preemph)
{
switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
case DP_TRAIN_PRE_EMPH_LEVEL_1:
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
case DP_TRAIN_PRE_EMPH_LEVEL_2:
return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
case DP_TRAIN_PRE_EMPH_LEVEL_3:
default:
return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
}
}
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
@ -44,23 +59,20 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
u8 preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
}
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
preemph_max = intel_dp->preemph_max(intel_dp);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
v = min(v, dp_voltage_max(p));
voltage_max = intel_dp->voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}

View File

@ -317,6 +317,25 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
return ret;
}
static void clear_act_sent(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
intel_de_write(i915, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT);
}
static void wait_for_act_sent(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
}
static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@ -370,6 +389,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
clear_act_sent(intel_dp);
val = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
@ -377,11 +398,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
drm_err(&dev_priv->drm,
"Timed out waiting for ACT sent when disabling\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
wait_for_act_sent(intel_dp);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
@ -452,7 +469,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
u32 temp;
bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
@ -485,8 +501,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
intel_dp->active_mst_links++;
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@ -514,19 +528,25 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
clear_act_sent(intel_dp);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
val = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
val |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
intel_de_write(dev_priv,
TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder),
val);
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
wait_for_act_sent(intel_dp);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);

View File

@ -2934,6 +2934,15 @@ static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
.dco_integer = 0x43, .dco_fraction = 0x4000,
/* the following params are unused */
};
/*
* Display WA #22010492432: tgl
* Divide the nominal .dco_fraction value by 2.
*/
static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
.dco_integer = 0x54, .dco_fraction = 0x1800,
/* the following params are unused */
.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
};
@ -2970,12 +2979,14 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
*pll_params = tgl_tbt_pll_19_2MHz_values;
break;
case 24000:
*pll_params = tgl_tbt_pll_24MHz_values;
break;
case 38400:
*pll_params = tgl_tbt_pll_38_4MHz_values;
break;
}
} else {
switch (dev_priv->dpll.ref_clks.nssc) {
@ -3038,49 +3049,26 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
icl_wrpll_ref_clock(i915));
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
static void icl_calc_dpll_state(struct drm_i915_private *i915,
const struct skl_wrpll_params *pll_params,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
encoder->port)))
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
if (!ret)
return false;
cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
pll_params.dco_integer;
cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params.kdiv) |
DPLL_CFGCR1_PDIV(pll_params.pdiv);
if (INTEL_GEN(dev_priv) >= 12)
cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
memset(pll_state, 0, sizeof(*pll_state));
pll_state->cfgcr0 = cfgcr0;
pll_state->cfgcr1 = cfgcr1;
pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
pll_params->dco_integer;
return true;
pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params->kdiv) |
DPLL_CFGCR1_PDIV(pll_params->pdiv);
if (INTEL_GEN(i915) >= 12)
pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
}
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1;
@ -3493,19 +3481,29 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
unsigned long dpll_mask;
int ret;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
if (!ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate combo PHY PLL state.\n");
return false;
}
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
@ -3539,16 +3537,19 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll;
enum intel_dpll_id dpll_id;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate TBT PLL state.\n");
return false;
}
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));

View File

@ -34,152 +34,52 @@
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
static bool is_dsb_busy(struct intel_dsb *dsb)
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
enum dsb_id id)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
}
static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
enum pipe pipe, enum dsb_id id)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
if (DSB_STATUS & dsb_ctrl) {
drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
intel_de_posting_read(i915, DSB_CTRL(pipe, id));
return true;
}
static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
enum pipe pipe, enum dsb_id id)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
if (DSB_STATUS & dsb_ctrl) {
drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
intel_de_posting_read(i915, DSB_CTRL(pipe, id));
return true;
}
/**
* intel_dsb_get() - Allocate DSB context and return a DSB instance.
* @crtc: intel_crtc structure to get pipe info.
*
* This function provides handle of a DSB instance, for the further DSB
* operations.
*
* Returns: address of Intel_dsb instance requested for.
* Failure: Returns the same DSB instance, but without a command buffer.
*/
struct intel_dsb *
intel_dsb_get(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_dsb *dsb = &crtc->dsb;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *buf;
intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
return dsb;
if (dsb->refcount++ != 0)
return dsb;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
drm_err(&i915->drm, "Gem object creation failed\n");
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
drm_err(&i915->drm, "Command buffer creation failed\n");
goto out;
}
dsb->id = DSB1;
dsb->vma = vma;
dsb->cmd_buf = buf;
out:
/*
* On error dsb->cmd_buf will continue to be NULL, making the writes
* pass-through. Leave the dangling ref to be removed later by the
* corresponding intel_dsb_put(): the important error message will
* already be logged above.
*/
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return dsb;
}
/**
* intel_dsb_put() - To destroy DSB context.
* @dsb: intel_dsb structure.
*
* This function destroys the DSB context allocated by a dsb_get(), by
* unpinning and releasing the VMA object associated with it.
*/
void intel_dsb_put(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (!HAS_DSB(i915))
return;
if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
return;
if (--dsb->refcount == 0) {
i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
dsb->cmd_buf = NULL;
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
}
}
/**
* intel_dsb_indexed_reg_write() -Write to the DSB context for auto
* increment register.
* @dsb: intel_dsb structure.
* @crtc_state: intel_crtc_state structure
* @reg: register address.
* @val: value.
*
@ -189,19 +89,20 @@ void intel_dsb_put(struct intel_dsb *dsb)
* is done through mmio write.
*/
void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 val)
void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct intel_dsb *dsb = crtc_state->dsb;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 *buf = dsb->cmd_buf;
u32 *buf;
u32 reg_val;
if (!buf) {
if (!dsb) {
intel_de_write(dev_priv, reg, val);
return;
}
buf = dsb->cmd_buf;
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
@ -256,7 +157,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
/**
* intel_dsb_reg_write() -Write to the DSB context for normal
* register.
* @dsb: intel_dsb structure.
* @crtc_state: intel_crtc_state structure
* @reg: register address.
* @val: value.
*
@ -265,17 +166,21 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
* and rest all erroneous condition register programming is done
* through mmio write.
*/
void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 *buf = dsb->cmd_buf;
struct intel_dsb *dsb;
u32 *buf;
if (!buf) {
dsb = crtc_state->dsb;
if (!dsb) {
intel_de_write(dev_priv, reg, val);
return;
}
buf = dsb->cmd_buf;
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
@ -290,26 +195,27 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
* @dsb: intel_dsb structure.
* @crtc_state: intel_crtc_state structure
*
* This function is used to do actual write to hardware using DSB.
* On errors, fall back to MMIO. Also this function help to reset the context.
*/
void intel_dsb_commit(struct intel_dsb *dsb)
void intel_dsb_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
struct intel_dsb *dsb = crtc_state->dsb;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 tail;
if (!dsb->free_pos)
if (!(dsb && dsb->free_pos))
return;
if (!intel_dsb_enable_engine(dsb))
if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
goto reset;
if (is_dsb_busy(dsb)) {
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm,
"HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
@ -322,7 +228,7 @@ void intel_dsb_commit(struct intel_dsb *dsb)
memset(&dsb->cmd_buf[dsb->free_pos], 0,
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
drm_err(&dev_priv->drm,
"TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
@ -332,7 +238,7 @@ void intel_dsb_commit(struct intel_dsb *dsb)
i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
drm_err(&dev_priv->drm,
"Timed out waiting for DSB workload completion.\n");
goto reset;
@ -341,5 +247,83 @@ void intel_dsb_commit(struct intel_dsb *dsb)
reset:
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
intel_dsb_disable_engine(dsb);
intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
}
/**
* intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
* @crtc_state: intel_crtc_state structure to prepare associated dsb instance.
*
* This function prepare the command buffer which is used to store dsb
* instructions with data.
*/
void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_dsb *dsb;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *buf;
intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
return;
dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb) {
drm_err(&i915->drm, "DSB object creation failed\n");
return;
}
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
drm_err(&i915->drm, "Gem object creation failed\n");
kfree(dsb);
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
kfree(dsb);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
drm_err(&i915->drm, "Command buffer creation failed\n");
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
kfree(dsb);
goto out;
}
dsb->id = DSB1;
dsb->vma = vma;
dsb->cmd_buf = buf;
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
crtc_state->dsb = dsb;
out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
/**
* intel_dsb_cleanup() - To cleanup DSB context.
* @crtc_state: intel_crtc_state structure to cleanup associated dsb instance.
*
* This function cleanup the DSB context by unpinning and releasing
* the VMA object associated with it.
*/
void intel_dsb_cleanup(struct intel_crtc_state *crtc_state)
{
if (!crtc_state->dsb)
return;
i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP);
kfree(crtc_state->dsb);
crtc_state->dsb = NULL;
}

View File

@ -10,7 +10,7 @@
#include "i915_reg.h"
struct intel_crtc;
struct intel_crtc_state;
struct i915_vma;
enum dsb_id {
@ -22,7 +22,6 @@ enum dsb_id {
};
struct intel_dsb {
long refcount;
enum dsb_id id;
u32 *cmd_buf;
struct i915_vma *vma;
@ -41,12 +40,12 @@ struct intel_dsb {
u32 ins_start_offset;
};
struct intel_dsb *
intel_dsb_get(struct intel_crtc *crtc);
void intel_dsb_put(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 val);
void intel_dsb_commit(struct intel_dsb *dsb);
void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val);
void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val);
void intel_dsb_commit(const struct intel_crtc_state *crtc_state);
#endif

View File

@ -47,19 +47,6 @@
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
* origin so the x and y offsets can actually fit the registers. As a
* consequence, the fence doesn't really start exactly at the display plane
* address we program because it starts at the real start of the buffer, so we
* have to take this into consideration here.
*/
static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
{
return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
}
/*
* For SKL+, the plane source size used by the hardware is based on the value we
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
@ -141,18 +128,17 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
intel_de_write(dev_priv, FBC_FENCE_OFF,
params->crtc.fence_y_offset);
params->fence_y_offset);
}
/* enable it... */
fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl = FBC_CTL_INTERVAL(params->interval);
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff);
if (params->fence_id >= 0)
fbc_ctl |= params->fence_id;
fbc_ctl |= FBC_CTL_FENCENO(params->fence_id);
intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
}
@ -175,7 +161,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
intel_de_write(dev_priv, DPFC_FENCE_YOFF,
params->crtc.fence_y_offset);
params->fence_y_offset);
} else {
intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
}
@ -243,7 +229,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
params->fence_y_offset);
}
} else {
if (IS_GEN(dev_priv, 6)) {
@ -253,7 +239,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
params->crtc.fence_y_offset);
params->fence_y_offset);
/* enable it... */
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@ -320,7 +306,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
params->fence_y_offset);
} else if (dev_priv->ggtt.num_fences) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
@ -631,8 +617,8 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv,
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
* the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
* variables instead of just looking at the pipe/plane size.
* the X and Y offset registers. That's why we include the src x/y offsets
* instead of just looking at the plane size.
*/
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
@ -705,7 +691,6 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->uapi.src.y1 >> 16;
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
@ -713,6 +698,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.stride = fb->pitches[0];
cache->fb.modifier = fb->modifier;
/* FBC1 compression interval: arbitrary choice of 1 second */
cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->vma->fence);
@ -740,7 +730,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
return false;
}
if (!i915_modparams.enable_fbc) {
if (!dev_priv->params.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@ -883,10 +873,12 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
memset(params, 0, sizeof(*params));
params->fence_id = cache->fence_id;
params->fence_y_offset = cache->fence_y_offset;
params->interval = cache->interval;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
@ -1017,7 +1009,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
fbc->flip_pending = false;
if (!i915_modparams.enable_fbc) {
if (!dev_priv->params.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
__intel_fbc_disable(dev_priv);
@ -1090,11 +1082,19 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (!HAS_FBC(dev_priv))
return;
/*
* GTT tracking does not nuke the entire cfb
* so don't clear busy_bits set for some other
* reason.
*/
if (origin == ORIGIN_GTT)
return;
mutex_lock(&fbc->lock);
fbc->busy_bits &= ~frontbuffer_bits;
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
if (origin == ORIGIN_FLIP)
goto out;
if (!fbc->busy_bits && fbc->crtc &&
@ -1370,8 +1370,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
if (i915_modparams.enable_fbc >= 0)
return !!i915_modparams.enable_fbc;
if (dev_priv->params.enable_fbc >= 0)
return !!dev_priv->params.enable_fbc;
if (!HAS_FBC(dev_priv))
return 0;
@ -1415,20 +1415,15 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
i915_modparams.enable_fbc);
dev_priv->params.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
return;
}
/* This value was pulled out of someone's hat */
if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
intel_de_write(dev_priv, FBC_CONTROL,
500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */

View File

@ -1923,8 +1923,11 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
return (INTEL_GEN(dev_priv) >= 10 ||
IS_GEMINILAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv));
}
void intel_hdcp_component_init(struct drm_i915_private *dev_priv)

View File

@ -1540,7 +1540,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
}
static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_connector *connector =
@ -1563,8 +1563,7 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
drm_err(&i915->drm,
"Ri' mismatch detected, link check failed (%x)\n",
drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n",
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
port)));
return false;
@ -1572,6 +1571,20 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(intel_dig_port))
return true;
drm_err(&i915->drm, "Link check failed\n");
return false;
}
struct hdcp2_hdmi_msg_timeout {
u8 msg_id;
u16 timeout;
@ -3082,6 +3095,24 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
enum phy phy = intel_port_to_phy(dev_priv, port);
WARN_ON(port == PORT_C);
/*
* Pin mapping for RKL depends on which PCH is present. With TGP, the
* final two outputs use type-c pins, even though they're actually
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
}
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@ -3119,7 +3150,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
return ddc_pin;
}
if (HAS_PCH_MCC(dev_priv))
if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);

View File

@ -89,6 +89,15 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
{
enum phy phy = intel_port_to_phy(dev_priv, port);
/*
* RKL + TGP PCH is a special case; we effectively choose the hpd_pin
* based on the DDI rather than the PHY (i.e., the last two outputs
* shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs
* from the behavior of both TGL+TGP and RKL+CMP.
*/
if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
return HPD_PORT_A + port - PORT_A;
switch (phy) {
case PHY_F:
return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;

View File

@ -784,8 +784,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
if (i915_modparams.lvds_channel_mode > 0)
return i915_modparams.lvds_channel_mode == 2;
if (dev_priv->params.lvds_channel_mode > 0)
return dev_priv->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)

View File

@ -801,7 +801,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
const struct firmware *fw = NULL;
const char *name = i915_modparams.vbt_firmware;
const char *name = dev_priv->params.vbt_firmware;
int ret;
if (!name || !*name)

View File

@ -100,12 +100,15 @@
#define CLK_RGB24_MASK 0x0
#define CLK_RGB16_MASK 0x070307
#define CLK_RGB15_MASK 0x070707
#define CLK_RGB8I_MASK 0xffffff
#define RGB30_TO_COLORKEY(c) \
((((c) & 0x3fc00000) >> 6) | (((c) & 0x000ff000) >> 4) | (((c) & 0x000003fc) >> 2))
#define RGB16_TO_COLORKEY(c) \
(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
((((c) & 0xf800) << 8) | (((c) & 0x07e0) << 5) | (((c) & 0x001f) << 3))
#define RGB15_TO_COLORKEY(c) \
(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
((((c) & 0x7c00) << 9) | (((c) & 0x03e0) << 6) | (((c) & 0x001f) << 3))
#define RGB8I_TO_COLORKEY(c) \
((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0))
/* overlay flip addr flag */
#define OFC_UPDATE 0x1
@ -682,8 +685,8 @@ static void update_colorkey(struct intel_overlay *overlay,
switch (format) {
case DRM_FORMAT_C8:
key = 0;
flags |= CLK_RGB8I_MASK;
key = RGB8I_TO_COLORKEY(key);
flags |= CLK_RGB24_MASK;
break;
case DRM_FORMAT_XRGB1555:
key = RGB15_TO_COLORKEY(key);
@ -693,6 +696,11 @@ static void update_colorkey(struct intel_overlay *overlay,
key = RGB16_TO_COLORKEY(key);
flags |= CLK_RGB16_MASK;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
key = RGB30_TO_COLORKEY(key);
flags |= CLK_RGB24_MASK;
break;
default:
flags |= CLK_RGB24_MASK;
break;
@ -777,9 +785,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
const struct intel_crtc_state *crtc_state =
overlay->crtc->config;
u32 oconfig = 0;
oconfig = OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable &&
crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
oconfig |= OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable)
oconfig |= OCONF_GAMMA2_ENABLE;
if (IS_GEN(dev_priv, 4))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?

View File

@ -521,10 +521,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (i915_modparams.invert_brightness < 0)
if (dev_priv->params.invert_brightness < 0)
return val;
if (i915_modparams.invert_brightness > 0 ||
if (dev_priv->params.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val + panel->backlight.min;
}

View File

@ -83,7 +83,7 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
{
switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915_modparams.enable_psr;
return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@ -426,6 +426,12 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
}
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
@ -444,6 +450,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
check_tp3_sel:
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
@ -495,6 +502,27 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
if (dev_priv->params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
return val;
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@ -507,16 +535,23 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
val |= intel_psr2_get_tp_time(intel_dp);
if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
if (INTEL_GEN(dev_priv) >= 12) {
/*
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
* values from BSpec. In order to setting an optimal power
* consumption, lower than 4k resoluition mode needs to decrese
* IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
* mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
*/
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
val |= TGL_EDP_PSR2_FAST_WAKE(7);
} else if (INTEL_GEN(dev_priv) >= 9) {
val |= EDP_PSR2_IO_BUFFER_WAKE(7);
val |= EDP_PSR2_FAST_WAKE(7);
}
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
@ -657,6 +692,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
if (crtc_state->crc_enabled) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled because it would inhibit pipe CRC calculation\n");
return false;
}
if (INTEL_GEN(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
@ -671,14 +712,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
max_bpp = 24;
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
return false;
}
if (crtc_state->pipe_bpp > max_bpp) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, pipe bpp %d > max supported %d\n",
@ -699,9 +732,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
if (crtc_state->crc_enabled) {
/*
* Some platforms lack PSR2 HW tracking and instead require manual
* tracking by software. In this case, the driver is required to track
* the areas that need updates and program hardware to send selective
* updates.
*
* So until the software tracking is implemented, PSR2 needs to be
* disabled for platforms without PSR2 HW tracking.
*/
if (!HAS_PSR_HW_TRACKING(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled because it would inhibit pipe CRC calculation\n");
"No PSR2 HW tracking in the platform\n");
return false;
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
return false;
}
@ -1450,9 +1500,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
if (i915_modparams.enable_psr == -1)
if (dev_priv->params.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
dev_priv->params.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))

View File

@ -411,6 +411,7 @@ static const char *sdvo_cmd_name(u8 cmd)
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
const char *cmd_name;
int i, pos = 0;
char buffer[64];
@ -431,7 +432,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
else
BUF_PRINT("(%02X)", cmd);
WARN_ON(pos >= sizeof(buffer) - 1);
drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
@ -533,6 +534,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
@ -597,7 +599,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
WARN_ON(pos >= sizeof(buffer) - 1);
drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
@ -1081,6 +1083,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@ -1106,7 +1109,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_avi_infoframe_check(frame);
if (WARN_ON(ret))
if (drm_WARN_ON(&dev_priv->drm, ret))
return false;
return true;
@ -1115,6 +1118,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
ssize_t len;
@ -1123,11 +1127,12 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
return true;
if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
if (drm_WARN_ON(&dev_priv->drm,
frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
return false;
len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
if (WARN_ON(len < 0))
if (drm_WARN_ON(&dev_priv->drm, len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@ -1237,6 +1242,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev);
unsigned dotclock = pipe_config->port_clock;
struct dpll *clock = &pipe_config->dpll;
@ -1257,7 +1263,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
clock->m1 = 12;
clock->m2 = 8;
} else {
WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
drm_WARN(&dev_priv->drm, 1,
"SDVO TV clock out of range: %i\n", dotclock);
}
pipe_config->clock_set = true;
@ -2293,7 +2300,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
return 0;
}
WARN_ON(1);
drm_WARN_ON(connector->dev, 1);
*val = 0;
} else if (property == intel_sdvo_connector->top ||
property == intel_sdvo_connector->bottom)

View File

@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
@ -333,6 +334,21 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
{
if (IS_ROCKETLAKE(i915))
return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
else
return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
}
bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
return INTEL_GEN(dev_priv) >= 11 &&
icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
}
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
{
return INTEL_GEN(dev_priv) >= 11 &&
@ -3003,7 +3019,7 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
if (icl_is_hdr_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
return icl_hdr_plane_formats;
} else if (icl_is_nv12_y_plane(plane_id)) {
} else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
*num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
return icl_sdr_y_plane_formats;
} else {
@ -3046,6 +3062,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
unsigned int supported_csc;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@ -3120,9 +3137,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0,
supported_rotations);
supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
drm_plane_create_color_properties(&plane->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
supported_csc,
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709,
@ -3136,6 +3157,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
if (INTEL_GEN(dev_priv) >= 12)
drm_plane_enable_fb_damage_clips(&plane->base);
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;

View File

@ -32,21 +32,14 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
static inline bool icl_is_nv12_y_plane(enum plane_id id)
{
/* Don't need to do a gen check, these planes are only available on gen11 */
if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
return true;
return false;
}
static inline u8 icl_hdr_plane_mask(void)
{
return BIT(PLANE_PRIMARY) |
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
enum plane_id plane_id);
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,

View File

@ -360,12 +360,12 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
}
if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
!WARN_ON(dig_port->tc_legacy_port))
!drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
if (dig_port->tc_legacy_port) {
WARN_ON(max_lanes != 4);
drm_WARN_ON(&i915->drm, max_lanes != 4);
dig_port->tc_mode = TC_PORT_LEGACY;
return;
@ -445,18 +445,20 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 live_status_mask = tc_port_live_status_mask(dig_port);
bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
enum tc_port_mode mode;
if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
if (in_safe_mode ||
drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
if (live_status_mask) {
enum tc_port_mode live_mode = fls(live_status_mask) - 1;
if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
mode = live_mode;
}
@ -505,7 +507,9 @@ static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
{
WARN_ON(dig_port->tc_link_refcount);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
dig_port->tc_link_refcount = refcount;
}

View File

@ -1158,7 +1158,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
/* pixel counter doesn't work on i965gm TV output */
if (IS_I965GM(dev_priv))
adjusted_mode->private_flags |=
pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
@ -1328,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
/* pixel counter doesn't work on i965gm TV output */
if (IS_I965GM(dev_priv))
adjusted_mode->private_flags |=
pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
return 0;

View File

@ -476,13 +476,13 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
* POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
*
* - ICL eDP/DSI transcoder
* - TGL pipe A
* - Gen12+ (except RKL) pipe A
*
* For any other pipe, VDSC/joining uses the power well associated with
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
if (INTEL_GEN(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc_state))
return POWER_DOMAIN_PIPE(pipe);

View File

@ -298,7 +298,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv)) {
/* Enable Frame time stamp based scanline reporting */
adjusted_mode->private_flags |=
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
/* Dual link goes to DSI transcoder A. */
@ -1097,8 +1097,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
adjusted_mode->private_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =

View File

@ -112,8 +112,7 @@ static void lut_close(struct i915_gem_context *ctx)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
rcu_read_unlock();
i915_gem_object_lock(obj);
spin_lock(&obj->lut_lock);
list_for_each_entry(lut, &obj->lut_list, obj_link) {
if (lut->ctx != ctx)
continue;
@ -124,8 +123,7 @@ static void lut_close(struct i915_gem_context *ctx)
list_del(&lut->obj_link);
break;
}
i915_gem_object_unlock(obj);
rcu_read_lock();
spin_unlock(&obj->lut_lock);
if (&lut->obj_link != &obj->lut_list) {
i915_lut_handle_free(lut);
@ -650,7 +648,7 @@ static void context_close(struct i915_gem_context *ctx)
* context close.
*/
if (!i915_gem_context_is_persistent(ctx) ||
!i915_modparams.enable_hangcheck)
!ctx->i915->params.enable_hangcheck)
kill_context(ctx);
i915_gem_context_put(ctx);
@ -667,7 +665,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* reset] are allowed to survive past termination. We require
* hangcheck to ensure that the persistent requests are healthy.
*/
if (!i915_modparams.enable_hangcheck)
if (!ctx->i915->params.enable_hangcheck)
return -EINVAL;
i915_gem_context_set_persistence(ctx);

View File

@ -217,6 +217,7 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
.name = "i915_gem_object_dmabuf",
.get_pages = i915_gem_object_get_pages_dmabuf,
.put_pages = i915_gem_object_put_pages_dmabuf,
};

View File

@ -45,13 +45,6 @@ struct eb_vma_array {
struct eb_vma vma[];
};
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@ -260,8 +253,6 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
unsigned long vaddr; /** Current kmap address */
unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@ -605,23 +596,6 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
const struct drm_i915_gem_object *obj)
{
if (!i915_gem_object_has_struct_page(obj))
return false;
if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
return true;
if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
return false;
return (cache->has_llc ||
obj->cache_dirty ||
obj->cache_level != I915_CACHE_NONE);
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@ -815,14 +789,14 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
if (err == 0) { /* And nor has this handle */
struct drm_i915_gem_object *obj = vma->obj;
i915_gem_object_lock(obj);
spin_lock(&obj->lut_lock);
if (idr_find(&eb->file->object_idr, handle) == obj) {
list_add(&lut->obj_link, &obj->lut_list);
} else {
radix_tree_delete(&ctx->handles_vma, handle);
err = -ENOENT;
}
i915_gem_object_unlock(obj);
spin_unlock(&obj->lut_lock);
}
mutex_unlock(&ctx->mutex);
}
@ -945,8 +919,6 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
cache->page = -1;
cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@ -958,25 +930,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
static inline void *unmask_page(unsigned long p)
{
return (void *)(uintptr_t)(p & PAGE_MASK);
}
static inline unsigned int unmask_flags(unsigned long p)
{
return p & ~PAGE_MASK;
}
#define KMAP 0x4 /* after CLFLUSH_FLAGS */
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
struct drm_i915_private *i915 =
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
return &i915->ggtt;
}
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@ -1089,181 +1042,6 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
static void reloc_cache_reset(struct reloc_cache *cache)
{
void *vaddr;
if (!cache->vaddr)
return;
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP) {
if (cache->vaddr & CLFLUSH_AFTER)
mb();
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
}
cache->vaddr = 0;
cache->page = -1;
}
static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
{
void *vaddr;
if (cache->vaddr) {
kunmap_atomic(unmask_page(cache->vaddr));
} else {
unsigned int flushes;
int err;
err = i915_gem_object_prepare_write(obj, &flushes);
if (err)
return ERR_PTR(err);
BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
cache->vaddr = flushes | KMAP;
cache->node.mm = (void *)obj;
if (flushes)
mb();
}
vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = page;
return vaddr;
}
static void *reloc_iomap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
{
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
void *vaddr;
if (cache->vaddr) {
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
if (i915_gem_object_is_tiled(obj))
return ERR_PTR(-EINVAL);
if (use_cpu_reloc(cache, obj))
return NULL;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return ERR_PTR(err);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
}
offset = cache->node.start;
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
} else {
offset += page << PAGE_SHIFT;
}
vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
return vaddr;
}
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
unsigned long page)
{
void *vaddr;
if (cache->page == page) {
vaddr = unmask_page(cache->vaddr);
} else {
vaddr = NULL;
if ((cache->vaddr & KMAP) == 0)
vaddr = reloc_iomap(obj, cache, page);
if (!vaddr)
vaddr = reloc_kmap(obj, cache, page);
}
return vaddr;
}
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
{
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
if (flushes & CLFLUSH_BEFORE) {
clflushopt(addr);
mb();
}
*addr = value;
/*
* Writes to the same cacheline are serialised by the CPU
* (including clflush). On the write path, we only require
* that it hits memory in an orderly fashion and place
* mb barriers at the start and end of the relocation phase
* to ensure ordering of clflush wrt to the system.
*/
if (flushes & CLFLUSH_AFTER)
clflushopt(addr);
} else
*addr = value;
}
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@ -1429,17 +1207,6 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
static inline bool use_reloc_gpu(struct i915_vma *vma)
{
if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
return true;
if (DBG_FORCE_RELOC)
return false;
return !dma_resv_test_signaled_rcu(vma->resv, true);
}
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@ -1454,10 +1221,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
static int __reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@ -1473,7 +1240,7 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
return false;
return PTR_ERR(batch);
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@ -1522,55 +1289,21 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
return true;
}
static bool reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
{
if (eb->reloc_cache.vaddr)
return false;
if (!use_reloc_gpu(vma))
return false;
return __reloc_entry_gpu(eb, vma, offset, target_addr);
return 0;
}
static u64
relocate_entry(struct i915_vma *vma,
relocate_entry(struct i915_execbuffer *eb,
struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
u64 offset = reloc->offset;
int err;
if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
bool wide = eb->reloc_cache.use_64bit_reloc;
void *vaddr;
repeat:
vaddr = reloc_vaddr(vma->obj,
&eb->reloc_cache,
offset >> PAGE_SHIFT);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
clflush_write32(vaddr + offset_in_page(offset),
lower_32_bits(target_addr),
eb->reloc_cache.vaddr);
if (wide) {
offset += sizeof(u32);
target_addr >>= 32;
wide = false;
goto repeat;
}
}
err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
if (err)
return err;
return target->node.start | UPDATE;
}
@ -1626,8 +1359,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
err = i915_vma_bind(target->vma,
target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
if (err)
return err;
}
}
@ -1636,8 +1368,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@ -1669,7 +1400,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
return relocate_entry(ev->vma, reloc, eb, target->vma);
return relocate_entry(eb, ev->vma, reloc, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@ -1707,10 +1438,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
}
if (unlikely(copied))
return -EFAULT;
remain -= count;
do {
@ -1718,8 +1447,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
remain = (int)offset;
goto out;
return (int)offset;
} else {
/*
* Note that reporting an error now
@ -1749,9 +1477,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
out:
reloc_cache_reset(&eb->reloc_cache);
return remain;
return 0;
}
static int eb_relocate(struct i915_execbuffer *eb)
@ -1911,8 +1638,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@ -2659,7 +2386,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
if (!(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;

View File

@ -137,6 +137,7 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
.name = "i915_gem_object_internal",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_internal,

View File

@ -9,6 +9,7 @@
#include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_gem_object_get_pages_buddy,

View File

@ -216,12 +216,12 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case -ENXIO: /* unable to access backing store (on device) */
return VM_FAULT_SIGBUS;
case -ENOSPC: /* shmemfs allocation failure */
case -ENOMEM: /* our allocation failure */
return VM_FAULT_OOM;
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:

View File

@ -53,7 +53,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key)
{
__mutex_init(&obj->mm.lock, "obj->mm.lock", key);
__mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@ -61,6 +61,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
spin_lock_init(&obj->mmo.lock);
obj->mmo.offsets = RB_ROOT;
@ -72,6 +73,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
&obj->mm.lock);
}
/**
@ -100,21 +105,29 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_lut_handle bookmark = {};
struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
LIST_HEAD(close);
i915_gem_object_lock(obj);
spin_lock(&obj->lut_lock);
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
if (ctx->file_priv != fpriv)
continue;
if (ctx && ctx->file_priv == fpriv) {
i915_gem_context_get(ctx);
list_move(&lut->obj_link, &close);
}
i915_gem_context_get(ctx);
list_move(&lut->obj_link, &close);
/* Break long locks, and carefully continue on from this spot */
if (&ln->obj_link != &obj->lut_list) {
list_add_tail(&bookmark.obj_link, &ln->obj_link);
if (cond_resched_lock(&obj->lut_lock))
list_safe_reset_next(&bookmark, ln, obj_link);
__list_del_entry(&bookmark.obj_link);
}
}
i915_gem_object_unlock(obj);
spin_unlock(&obj->lut_lock);
spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)

View File

@ -126,6 +126,17 @@ void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
intel_engine_pm_put(ce->engine);
}
static int
move_obj_to_gpu(struct drm_i915_gem_object *obj,
struct i915_request *rq,
bool write)
{
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
return i915_request_await_object(rq, obj, write);
}
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
@ -143,12 +154,6 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
return err;
if (obj->cache_dirty & ~obj->cache_coherent) {
i915_gem_object_lock(obj);
i915_gem_clflush_object(obj, 0);
i915_gem_object_unlock(obj);
}
batch = intel_emit_vma_fill_blt(ce, vma, value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@ -165,27 +170,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
goto out_request;
err = i915_request_await_object(rq, obj, true);
if (unlikely(err))
goto out_request;
if (ce->engine->emit_init_breadcrumb) {
err = ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_request;
}
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
err = move_obj_to_gpu(vma->obj, rq, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
err = ce->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
if (ce->engine->emit_init_breadcrumb)
err = ce->engine->emit_init_breadcrumb(rq);
if (likely(!err))
err = ce->engine->emit_bb_start(rq,
batch->node.start,
batch->node.size,
0);
out_request:
if (unlikely(err))
i915_request_set_error_once(rq, err);
@ -317,16 +317,6 @@ out_pm:
return ERR_PTR(err);
}
static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
{
struct drm_i915_gem_object *obj = vma->obj;
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
return i915_request_await_object(rq, obj, write);
}
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct intel_context *ce)
@ -375,7 +365,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
goto out_request;
for (i = 0; i < ARRAY_SIZE(vma); i++) {
err = move_to_gpu(vma[i], rq, i);
err = move_obj_to_gpu(vma[i]->obj, rq, i);
if (unlikely(err))
goto out_unlock;
}

View File

@ -61,6 +61,8 @@ struct drm_i915_gem_object_ops {
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);
const char *name; /* friendly name for debug, e.g. lockdep classes */
};
enum i915_mmap_type {
@ -119,6 +121,7 @@ struct drm_i915_gem_object {
* this translation from object to context->handles_vma.
*/
struct list_head lut_list;
spinlock_t lut_lock; /* guards lut_list */
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;

View File

@ -27,7 +27,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
void *dst;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
/*
@ -140,6 +140,7 @@ static void phys_release(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.name = "i915_gem_object_phys",
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,

View File

@ -147,8 +147,7 @@ rebuild_st:
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
drm_WARN_ON(&i915->drm,
(gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
@ -430,6 +429,7 @@ static void shmem_release(struct drm_i915_gem_object *obj)
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.name = "i915_gem_object_shmem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,

View File

@ -566,6 +566,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.name = "i915_gem_object_stolen",
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
.release = i915_gem_object_release_stolen,

View File

@ -21,7 +21,7 @@ struct i915_mm_struct {
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
struct work_struct work;
struct rcu_work work;
};
#if defined(CONFIG_MMU_NOTIFIER)
@ -189,40 +189,31 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
struct i915_mmu_notifier *mn;
int err = 0;
struct i915_mmu_notifier *mn, *old;
int err;
mn = mm->mn;
if (mn)
mn = READ_ONCE(mm->mn);
if (likely(mn))
return mn;
mn = i915_mmu_notifier_create(mm);
if (IS_ERR(mn))
err = PTR_ERR(mn);
return mn;
mmap_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
/* Protected by mmap_lock (write-lock) */
err = __mmu_notifier_register(&mn->mn, mm->mm);
if (!err) {
/* Protected by mm_lock */
mm->mn = fetch_and_zero(&mn);
}
} else if (mm->mn) {
/*
* Someone else raced and successfully installed the mmu
* notifier, we can cancel our own errors.
*/
err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
mmap_write_unlock(mm->mm);
if (mn && !IS_ERR(mn))
err = mmu_notifier_register(&mn->mn, mm->mm);
if (err) {
kfree(mn);
return ERR_PTR(err);
}
return err ? ERR_PTR(err) : mm->mn;
old = cmpxchg(&mm->mn, NULL, mn);
if (old) {
mmu_notifier_unregister(&mn->mn, mm->mm);
kfree(mn);
mn = old;
}
return mn;
}
static int
@ -235,7 +226,7 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
if (WARN_ON(obj->userptr.mm == NULL))
if (GEM_WARN_ON(!obj->userptr.mm))
return -EINVAL;
mn = i915_mmu_notifier_find(obj->userptr.mm);
@ -301,23 +292,28 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
#endif
static struct i915_mm_struct *
__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
{
struct i915_mm_struct *mm;
struct i915_mm_struct *it, *mm = NULL;
/* Protected by dev_priv->mm_lock */
hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
if (mm->mm == real)
return mm;
rcu_read_lock();
hash_for_each_possible_rcu(i915->mm_structs,
it, node,
(unsigned long)real)
if (it->mm == real && kref_get_unless_zero(&it->kref)) {
mm = it;
break;
}
rcu_read_unlock();
return NULL;
return mm;
}
static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_mm_struct *mm;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_mm_struct *mm, *new;
int ret = 0;
/* During release of the GEM object we hold the struct_mutex. This
@ -330,39 +326,42 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
* struct_mutex, i.e. we need to schedule a worker to do the clean
* up.
*/
mutex_lock(&dev_priv->mm_lock);
mm = __i915_mm_struct_find(dev_priv, current->mm);
if (mm == NULL) {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (mm == NULL) {
ret = -ENOMEM;
goto out;
}
mm = __i915_mm_struct_find(i915, current->mm);
if (mm)
goto out;
kref_init(&mm->kref);
mm->i915 = to_i915(obj->base.dev);
new = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!new)
return -ENOMEM;
mm->mm = current->mm;
kref_init(&new->kref);
new->i915 = to_i915(obj->base.dev);
new->mm = current->mm;
new->mn = NULL;
spin_lock(&i915->mm_lock);
mm = __i915_mm_struct_find(i915, current->mm);
if (!mm) {
hash_add_rcu(i915->mm_structs,
&new->node,
(unsigned long)new->mm);
mmgrab(current->mm);
mm = new;
}
spin_unlock(&i915->mm_lock);
if (mm != new)
kfree(new);
mm->mn = NULL;
/* Protected by dev_priv->mm_lock */
hash_add(dev_priv->mm_structs,
&mm->node, (unsigned long)mm->mm);
} else
kref_get(&mm->kref);
obj->userptr.mm = mm;
out:
mutex_unlock(&dev_priv->mm_lock);
obj->userptr.mm = mm;
return ret;
}
static void
__i915_mm_struct_free__worker(struct work_struct *work)
{
struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work);
i915_mmu_notifier_free(mm->mn, mm->mm);
mmdrop(mm->mm);
kfree(mm);
@ -373,12 +372,12 @@ __i915_mm_struct_free(struct kref *kref)
{
struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
/* Protected by dev_priv->mm_lock */
hash_del(&mm->node);
mutex_unlock(&mm->i915->mm_lock);
spin_lock(&mm->i915->mm_lock);
hash_del_rcu(&mm->node);
spin_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
queue_work(mm->i915->mm.userptr_wq, &mm->work);
INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker);
queue_rcu_work(system_wq, &mm->work);
}
static void
@ -387,9 +386,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
if (obj->userptr.mm == NULL)
return;
kref_put_mutex(&obj->userptr.mm->kref,
__i915_mm_struct_free,
&to_i915(obj->base.dev)->mm_lock);
kref_put(&obj->userptr.mm->kref, __i915_mm_struct_free);
obj->userptr.mm = NULL;
}
@ -712,6 +709,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.name = "i915_gem_object_userptr",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_MMAP |
@ -850,7 +848,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->mm_lock);
spin_lock_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =

View File

@ -88,6 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_ops = {
.name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = huge_get_pages,
.put_pages = huge_put_pages,

View File

@ -139,6 +139,7 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
.name = "huge-gem",
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = get_huge_pages,
@ -283,12 +284,14 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
}
static const struct drm_i915_gem_object_ops fake_ops = {
.name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_huge_pages,
.put_pages = fake_put_huge_pages,
};
static const struct drm_i915_gem_object_ops fake_ops_single = {
.name = "fake-gem",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = fake_get_huge_pages_single,
.put_pages = fake_put_huge_pages,
@ -1409,147 +1412,6 @@ out:
return err;
}
static int igt_ppgtt_pin_update(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct i915_address_space *vm;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
unsigned int n;
int first, last;
int err = 0;
/*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the
* past we had a subtle issue with being able to incorrectly do multiple
* alloc va ranges on the same object when doing a PIN_UPDATE, which
* resulted in some pretty nasty bugs, though only when using
* huge-gtt-pages.
*/
vm = i915_gem_context_get_vm_rcu(ctx);
if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
goto out_vm;
}
first = ilog2(I915_GTT_PAGE_SIZE_64K);
last = ilog2(I915_GTT_PAGE_SIZE_2M);
for_each_set_bit_from(first, &supported, last + 1) {
unsigned int page_size = BIT(first);
obj = i915_gem_object_create_internal(dev_priv, page_size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
}
err = i915_vma_pin(vma, SZ_2M, 0, flags);
if (err)
goto out_put;
if (vma->page_sizes.sg < page_size) {
pr_info("Unable to allocate page-size %x, finishing test early\n",
page_size);
goto out_unpin;
}
err = igt_check_page_sizes(vma);
if (err)
goto out_unpin;
if (vma->page_sizes.gtt != page_size) {
dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
/*
* The only valid reason for this to ever fail would be
* if the dma-mapper screwed us over when we did the
* dma_map_sg(), since it has the final say over the dma
* address.
*/
if (IS_ALIGNED(addr, page_size)) {
pr_err("page_sizes.gtt=%u, expected=%u\n",
vma->page_sizes.gtt, page_size);
err = -EINVAL;
} else {
pr_info("dma address misaligned, finishing test early\n");
}
goto out_unpin;
}
err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
i915_vma_unpin(vma);
i915_gem_object_put(obj);
}
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_vm;
}
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
}
err = i915_vma_pin(vma, 0, 0, flags);
if (err)
goto out_put;
/*
* Make sure we don't end up with something like where the pde is still
* pointing to the 2M page, and the pt we just filled-in is dangling --
* we can check this by writing to the first page where it would then
* land in the now stale 2M page.
*/
n = 0;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
continue;
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
break;
}
i915_gem_context_unlock_engines(ctx);
if (err)
goto out_unpin;
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
goto out_unpin;
}
out_unpin:
i915_vma_unpin(vma);
out_put:
i915_gem_object_put(obj);
out_vm:
i915_vm_put(vm);
return err;
}
static int igt_tmpfs_fallback(void *arg)
{
struct i915_gem_context *ctx = arg;
@ -1760,7 +1622,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shrink_thp),
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),

View File

@ -702,8 +702,5 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
return i915_live_subtests(tests, i915);
}

View File

@ -37,20 +37,14 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
if (!__reloc_entry_gpu(eb, vma,
offsets[0] * sizeof(u32),
0)) {
err = -EIO;
err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
if (err)
goto unpin_vma;
}
/* !8-Byte aligned */
if (!__reloc_entry_gpu(eb, vma,
offsets[1] * sizeof(u32),
1)) {
err = -EIO;
err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
if (err)
goto unpin_vma;
}
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@ -60,12 +54,9 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
if (!__reloc_entry_gpu(eb, vma,
offsets[2] * sizeof(u32),
2)) {
err = -EIO;
err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
if (err)
goto unpin_vma;
}
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);

View File

@ -193,7 +193,7 @@ err_src:
}
struct igt_thread_arg {
struct drm_i915_private *i915;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
struct file *file;
struct rnd_state prng;
@ -203,7 +203,7 @@ struct igt_thread_arg {
static int igt_fill_blt_thread(void *arg)
{
struct igt_thread_arg *thread = arg;
struct drm_i915_private *i915 = thread->i915;
struct intel_engine_cs *engine = thread->engine;
struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
@ -215,7 +215,7 @@ static int igt_fill_blt_thread(void *arg)
ctx = thread->ctx;
if (!ctx) {
ctx = live_context(i915, thread->file);
ctx = live_context_for_engine(engine, thread->file);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@ -223,7 +223,7 @@ static int igt_fill_blt_thread(void *arg)
ctx->sched.priority = I915_USER_PRIORITY(prio);
}
ce = i915_gem_context_get_engine(ctx, BCS0);
ce = i915_gem_context_get_engine(ctx, 0);
GEM_BUG_ON(IS_ERR(ce));
/*
@ -256,7 +256,7 @@ static int igt_fill_blt_thread(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
obj = huge_gem_object(i915, phys_sz, sz);
obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@ -321,7 +321,7 @@ err_flush:
static int igt_copy_blt_thread(void *arg)
{
struct igt_thread_arg *thread = arg;
struct drm_i915_private *i915 = thread->i915;
struct intel_engine_cs *engine = thread->engine;
struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *src, *dst;
struct i915_gem_context *ctx;
@ -333,7 +333,7 @@ static int igt_copy_blt_thread(void *arg)
ctx = thread->ctx;
if (!ctx) {
ctx = live_context(i915, thread->file);
ctx = live_context_for_engine(engine, thread->file);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@ -341,7 +341,7 @@ static int igt_copy_blt_thread(void *arg)
ctx->sched.priority = I915_USER_PRIORITY(prio);
}
ce = i915_gem_context_get_engine(ctx, BCS0);
ce = i915_gem_context_get_engine(ctx, 0);
GEM_BUG_ON(IS_ERR(ce));
/*
@ -374,7 +374,7 @@ static int igt_copy_blt_thread(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
src = huge_gem_object(i915, phys_sz, sz);
src = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(src)) {
err = PTR_ERR(src);
goto err_flush;
@ -394,7 +394,7 @@ static int igt_copy_blt_thread(void *arg)
if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
src->cache_dirty = true;
dst = huge_gem_object(i915, phys_sz, sz);
dst = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_put_src;
@ -456,7 +456,7 @@ err_flush:
return err;
}
static int igt_threaded_blt(struct drm_i915_private *i915,
static int igt_threaded_blt(struct intel_engine_cs *engine,
int (*blt_fn)(void *arg),
unsigned int flags)
#define SINGLE_CTX BIT(0)
@ -477,14 +477,14 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
if (!thread)
goto out_tsk;
thread[0].file = mock_file(i915);
thread[0].file = mock_file(engine->i915);
if (IS_ERR(thread[0].file)) {
err = PTR_ERR(thread[0].file);
goto out_thread;
}
if (flags & SINGLE_CTX) {
thread[0].ctx = live_context(i915, thread[0].file);
thread[0].ctx = live_context_for_engine(engine, thread[0].file);
if (IS_ERR(thread[0].ctx)) {
err = PTR_ERR(thread[0].ctx);
goto out_file;
@ -492,7 +492,7 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
}
for (i = 0; i < n_cpus; ++i) {
thread[i].i915 = i915;
thread[i].engine = engine;
thread[i].file = thread[0].file;
thread[i].ctx = thread[0].ctx;
thread[i].n_cpus = n_cpus;
@ -532,24 +532,40 @@ out_tsk:
return err;
}
static int test_copy_engines(struct drm_i915_private *i915,
int (*fn)(void *arg),
unsigned int flags)
{
struct intel_engine_cs *engine;
int ret;
for_each_uabi_class_engine(engine, I915_ENGINE_CLASS_COPY, i915) {
ret = igt_threaded_blt(engine, fn, flags);
if (ret)
return ret;
}
return 0;
}
static int igt_fill_blt(void *arg)
{
return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
return test_copy_engines(arg, igt_fill_blt_thread, 0);
}
static int igt_fill_blt_ctx0(void *arg)
{
return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
return test_copy_engines(arg, igt_fill_blt_thread, SINGLE_CTX);
}
static int igt_copy_blt(void *arg)
{
return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
return test_copy_engines(arg, igt_copy_blt_thread, 0);
}
static int igt_copy_blt_ctx0(void *arg)
{
return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
return test_copy_engines(arg, igt_copy_blt_thread, SINGLE_CTX);
}
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
@ -564,9 +580,6 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
return i915_live_subtests(tests, i915);
}

View File

@ -99,6 +99,43 @@ err_ctx:
return ERR_PTR(err);
}
struct i915_gem_context *
live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
{
struct i915_gem_engines *engines;
struct i915_gem_context *ctx;
struct intel_context *ce;
engines = alloc_engines(1);
if (!engines)
return ERR_PTR(-ENOMEM);
ctx = live_context(engine->i915, file);
if (IS_ERR(ctx)) {
__free_engines(engines, 0);
return ctx;
}
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
__free_engines(engines, 0);
return ERR_CAST(ce);
}
intel_context_set_gem(ce, ctx);
engines->engines[0] = ce;
engines->num_engines = 1;
mutex_lock(&ctx->engines_mutex);
i915_gem_context_set_user_engines(ctx);
engines = rcu_replace_pointer(ctx->engines, engines, 1);
mutex_unlock(&ctx->engines_mutex);
engines_idle_release(ctx, engines);
return ctx;
}
struct i915_gem_context *
kernel_context(struct drm_i915_private *i915)
{

View File

@ -9,6 +9,7 @@
struct file;
struct drm_i915_private;
struct intel_engine_cs;
void mock_init_contexts(struct drm_i915_private *i915);
@ -21,6 +22,9 @@ void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct file *file);
struct i915_gem_context *
live_context_for_engine(struct intel_engine_cs *engine, struct file *file);
struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
void kernel_context_close(struct i915_gem_context *ctx);

View File

@ -0,0 +1,329 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "gen2_engine_cs.h"
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_ring.h"
int gen2_emit_flush(struct i915_request *rq, u32 mode)
{
unsigned int num_store_dw = 12;
u32 cmd, *cs;
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
*cs++ = 0;
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
}
*cs++ = cmd;
intel_ring_advance(rq, cs);
return 0;
}
int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
int i;
/*
* read/write caches:
*
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
* also flushed at 2d versus 3d pipeline switches.
*
* read-only caches:
*
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
* MI_READ_FLUSH is set, and is always flushed on 965.
*
* I915_GEM_DOMAIN_COMMAND may not exist?
*
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
* invalidated when MI_EXE_FLUSH is set.
*
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
* invalidated with every MI_FLUSH.
*
* TLBs:
*
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH.
*/
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5))
cmd |= MI_INVALIDATE_ISP;
}
i = 2;
if (mode & EMIT_INVALIDATE)
i += 20;
cs = intel_ring_begin(rq, i);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
/*
* A random delay to let the CS invalidate take effect? Without this
* delay, the GPU relocation path fails as the CS does not see
* the updated contents. Just as important, if we apply the flushes
* to the EMIT_FLUSH branch (i.e. immediately after the relocation
* write and before the invalidate on the next batch), the relocations
* still fail. This implies that is a delay following invalidation
* that is required to reset the caches as opposed to a delay to
* ensure the memory is written.
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
for (i = 0; i < 12; i++)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
*cs++ = cmd;
intel_ring_advance(rq, cs);
return 0;
}
int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
int flush, int post)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
while (flush--) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SCRATCH * sizeof(u32);
*cs++ = rq->fence.seqno;
}
while (post--) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
}
*cs++ = MI_USER_INTERRUPT;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
return __gen2_emit_breadcrumb(rq, cs, 16, 8);
}
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
return __gen2_emit_breadcrumb(rq, cs, 8, 8);
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
int i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs, cs_offset =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Evict the invalid PTE TLBs */
*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
*cs++ = cs_offset;
*cs++ = 0xdeadbeef;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
cs = intel_ring_begin(rq, 6 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Blit the batch (which has now all relocs applied) to the
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
*cs++ = cs_offset;
*cs++ = 4096;
*cs++ = offset;
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
/* ... and execute it. */
offset = cs_offset;
}
if (!(dispatch_flags & I915_DISPATCH_SECURE))
offset |= MI_BATCH_NON_SECURE;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset;
intel_ring_advance(rq, cs);
return 0;
}
int gen3_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
if (!(dispatch_flags & I915_DISPATCH_SECURE))
offset |= MI_BATCH_NON_SECURE;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset;
intel_ring_advance(rq, cs);
return 0;
}
int gen4_emit_bb_start(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
u32 security;
u32 *cs;
security = MI_BATCH_NON_SECURE_I965;
if (dispatch_flags & I915_DISPATCH_SECURE)
security = 0;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | security;
*cs++ = offset;
intel_ring_advance(rq, cs);
return 0;
}
void gen2_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
i915->irq_mask &= ~engine->irq_enable_mask;
intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
ENGINE_POSTING_READ16(engine, RING_IMR);
}
void gen2_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
i915->irq_mask |= engine->irq_enable_mask;
intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
}
void gen3_irq_enable(struct intel_engine_cs *engine)
{
engine->i915->irq_mask &= ~engine->irq_enable_mask;
intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
void gen3_irq_disable(struct intel_engine_cs *engine)
{
engine->i915->irq_mask |= engine->irq_enable_mask;
intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
}
void gen5_irq_enable(struct intel_engine_cs *engine)
{
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
void gen5_irq_disable(struct intel_engine_cs *engine)
{
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}

View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef __GEN2_ENGINE_CS_H__
#define __GEN2_ENGINE_CS_H__
#include <linux/types.h>
struct i915_request;
struct intel_engine_cs;
int gen2_emit_flush(struct i915_request *rq, u32 mode);
int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs);
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
int i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags);
int gen3_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags);
int gen4_emit_bb_start(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags);
void gen2_irq_enable(struct intel_engine_cs *engine);
void gen2_irq_disable(struct intel_engine_cs *engine);
void gen3_irq_enable(struct intel_engine_cs *engine);
void gen3_irq_disable(struct intel_engine_cs *engine);
void gen5_irq_enable(struct intel_engine_cs *engine);
void gen5_irq_disable(struct intel_engine_cs *engine);
#endif /* __GEN2_ENGINE_CS_H__ */

View File

@ -0,0 +1,455 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "gen6_engine_cs.h"
#include "intel_engine.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_ring.h"
#define HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
/*
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
*
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
* produced by non-pipelined state commands), software needs to first
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
* 0.
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
*
* And the workaround for these two requires this workaround first:
*
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
* BEFORE the pipe-control with a post-sync op and no write-cache
* flushes.
*
* And this last workaround is tricky because of the requirements on
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
* volume 2 part 1:
*
* "1 of the following must also be set:
* - Render Target Cache Flush Enable ([12] of DW1)
* - Depth Cache Flush Enable ([0] of DW1)
* - Stall at Pixel Scoreboard ([1] of DW1)
* - Depth Stall ([13] of DW1)
* - Post-Sync Operation ([13] of DW1)
* - Notify Enable ([8] of DW1)"
*
* The cache flushes require the workaround flush that triggered this
* one, so we can't use it. Depth stall would trigger the same.
* Post-sync nonzero is what triggered this second workaround, so we
* can't use that one either. Notify enable is IRQs, which aren't
* really our business. That leaves only stall at scoreboard.
*/
static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0; /* low dword */
*cs++ = 0; /* high dword */
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = gen6_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
/*
* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact. And when rearranging requests, the order of flushes is
* unknown.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
* Ensure that any following seqno writes only happen
* when the render cache is indeed flushed.
*/
flags |= PIPE_CONTROL_CS_STALL;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = flags;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
/* First we do the gen6_emit_post_sync_nonzero_flush w/a */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_timeline(rq)->hwsp_offset |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)
{
u32 cmd, *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
/*
* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
cmd |= flags;
*cs++ = cmd;
*cs++ = HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
{
return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
}
int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
{
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
}
int gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 security;
u32 *cs;
security = MI_BATCH_NON_SECURE_I965;
if (dispatch_flags & I915_DISPATCH_SECURE)
security = 0;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = __gen6_emit_bb_start(cs, offset, security);
intel_ring_advance(rq, cs);
return 0;
}
int
hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 security;
u32 *cs;
security = MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW;
if (dispatch_flags & I915_DISPATCH_SECURE)
security = 0;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = __gen6_emit_bb_start(cs, offset, security);
intel_ring_advance(rq, cs);
return 0;
}
static int gen7_stall_cs(struct i915_request *rq)
{
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
/*
* Ensure that any following seqno writes only happen when the render
* cache is indeed flushed.
*
* Workaround: 4th PIPE_CONTROL command (except the ones with only
* read-cache invalidate bits set) must have the CS_STALL bit set. We
* don't try to be clever and just set it unconditionally.
*/
flags |= PIPE_CONTROL_CS_STALL;
/*
* CS_STALL suggests at least a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/*
* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set.
*/
gen7_stall_cs(rq);
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_timeline(rq)->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#define GEN7_XCS_WA 32
u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
}
*cs++ = MI_FLUSH_DW;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#undef GEN7_XCS_WA
void gen6_irq_enable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR,
~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
void gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
void hsw_irq_enable_vecs(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
}
void hsw_irq_disable_vecs(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
}

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef __GEN6_ENGINE_CS_H__
#define __GEN6_ENGINE_CS_H__
#include <linux/types.h>
#include "intel_gpu_commands.h"
struct i915_request;
struct intel_engine_cs;
int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
int gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags);
int hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags);
void gen6_irq_enable(struct intel_engine_cs *engine);
void gen6_irq_disable(struct intel_engine_cs *engine);
void hsw_irq_enable_vecs(struct intel_engine_cs *engine);
void hsw_irq_disable_vecs(struct intel_engine_cs *engine);
#endif /* __GEN6_ENGINE_CS_H__ */

View File

@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
*cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
*cs++ = intel_sseu_make_rpcs(rq->engine->i915, &sseu);
intel_ring_advance(rq, cs);

View File

@ -187,7 +187,6 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
@ -335,7 +334,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
ktime_t *now);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);

View File

@ -414,12 +414,12 @@ void intel_engines_release(struct intel_gt *gt)
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
intel_wakeref_wait_for_idle(&engine->wakeref);
GEM_BUG_ON(intel_engine_pm_is_awake(engine));
if (!engine->release)
continue;
intel_wakeref_wait_for_idle(&engine->wakeref);
GEM_BUG_ON(intel_engine_pm_is_awake(engine));
engine->release(engine);
engine->release = NULL;
@ -661,7 +661,6 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
if (!frame)
return -ENOMEM;
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
@ -1095,19 +1094,21 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
if (__tasklet_is_scheduled(t)) {
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t))
t->func(t->data);
tasklet_unlock(t);
}
local_bh_enable();
}
if (!t->func)
return;
/* Otherwise flush the tasklet if it was running on another cpu */
tasklet_unlock_wait(t);
/* Synchronise and wait for the tasklet on another CPU */
tasklet_kill(t);
/* Having cancelled the tasklet, ensure that is run */
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t))
t->func(t->data);
tasklet_unlock(t);
}
local_bh_enable();
}
/**
@ -1194,8 +1195,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
static int print_sched_attr(struct drm_i915_private *i915,
const struct i915_sched_attr *attr,
static int print_sched_attr(const struct i915_sched_attr *attr,
char *buf, int x, int len)
{
if (attr->priority == I915_PRIORITY_INVALID)
@ -1215,7 +1215,7 @@ static void print_request(struct drm_printer *m,
char buf[80] = "";
int x = 0;
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
@ -1423,9 +1423,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
"\t\tActive[%d]: ccid:%08x, ",
"\t\tActive[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->active),
rq->context->lrc.ccid);
rq->context->lrc.ccid,
intel_context_is_closed(rq->context) ? "!" : "",
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
@ -1435,9 +1437,11 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
int len;
len = scnprintf(hdr, sizeof(hdr),
"\t\tPending[%d]: ccid:%08x, ",
"\t\tPending[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->pending),
rq->context->lrc.ccid);
rq->context->lrc.ccid,
intel_context_is_closed(rq->context) ? "!" : "",
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
@ -1506,6 +1510,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
ktime_t dummy;
if (header) {
va_list ap;
@ -1523,6 +1528,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
yesno(!llist_empty(&engine->barrier_tasks)));
drm_printf(m, "\tLatency: %luus\n",
ewma__engine_latency_read(&engine->latency));
if (intel_engine_supports_stats(engine))
drm_printf(m, "\tRuntime: %llums\n",
ktime_to_ms(intel_engine_get_busy_time(engine,
&dummy)));
drm_printf(m, "\tForcewake: %x domains, %d active\n",
engine->fw_domain, atomic_read(&engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@ -1589,7 +1600,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
ktime_t *now)
{
ktime_t total = engine->stats.total;
@ -1597,9 +1609,9 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
* If the engine is executing something at the moment
* add it to the total.
*/
*now = ktime_get();
if (atomic_read(&engine->stats.active))
total = ktime_add(total,
ktime_sub(ktime_get(), engine->stats.start));
total = ktime_add(total, ktime_sub(*now, engine->stats.start));
return total;
}
@ -1607,17 +1619,18 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
* @now: monotonic timestamp of sampling
*
* Returns accumulated time @engine was busy since engine stats were enabled.
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{
unsigned int seq;
ktime_t total;
do {
seq = read_seqbegin(&engine->stats.lock);
total = __intel_engine_get_busy_time(engine);
total = __intel_engine_get_busy_time(engine, now);
} while (read_seqretry(&engine->stats.lock, seq));
return total;

View File

@ -4,6 +4,7 @@
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_request.h"
#include "intel_context.h"
@ -31,7 +32,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
delay = msecs_to_jiffies_timeout(delay);
if (delay >= HZ)
delay = round_jiffies_up_relative(delay);
mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
mod_delayed_work(system_highpri_wq, &engine->heartbeat.work, delay);
return true;
}
@ -48,8 +49,10 @@ static void show_heartbeat(const struct i915_request *rq,
struct drm_printer p = drm_debug_printer("heartbeat");
intel_engine_dump(engine, &p,
"%s heartbeat {prio:%d} not ticking\n",
"%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
engine->name,
rq->fence.context,
rq->fence.seqno,
rq->sched.attr.priority);
}
@ -62,6 +65,10 @@ static void heartbeat(struct work_struct *wrk)
container_of(wrk, typeof(*engine), heartbeat.work.work);
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
unsigned long serial;
/* Just in case everything has gone horribly wrong, give it a kick */
intel_engine_flush_submission(engine);
rq = engine->heartbeat.systole;
if (rq && i915_request_completed(rq)) {
@ -76,8 +83,19 @@ static void heartbeat(struct work_struct *wrk)
goto out;
if (engine->heartbeat.systole) {
if (engine->schedule &&
rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
if (!i915_sw_fence_signaled(&rq->submit)) {
/*
* Not yet submitted, system is stalled.
*
* This more often happens for ring submission,
* where all contexts are funnelled into a common
* ringbuffer. If one context is blocked on an
* external fence, not only is it not submitted,
* but all other contexts, including the kernel
* context are stuck waiting for the signal.
*/
} else if (engine->schedule &&
rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
/*
* Gradually raise the priority of the heartbeat to
* give high priority work [which presumably desires
@ -105,10 +123,19 @@ static void heartbeat(struct work_struct *wrk)
goto out;
}
if (engine->wakeref_serial == engine->serial)
serial = READ_ONCE(engine->serial);
if (engine->wakeref_serial == serial)
goto out;
mutex_lock(&ce->timeline->mutex);
if (!mutex_trylock(&ce->timeline->mutex)) {
/* Unable to lock the kernel timeline, is the engine stuck? */
if (xchg(&engine->heartbeat.blocked, serial) == serial)
intel_gt_handle_error(engine->gt, engine->mask,
I915_ERROR_CAPTURE,
"no heartbeat on %s",
engine->name);
goto out;
}
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
@ -117,7 +144,7 @@ static void heartbeat(struct work_struct *wrk)
goto unlock;
idle_pulse(engine, rq);
if (i915_modparams.enable_hangcheck)
if (engine->i915->params.enable_hangcheck)
engine->heartbeat.systole = i915_request_get(rq);
__i915_request_commit(rq);

View File

@ -24,6 +24,7 @@
#include "i915_selftest.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
@ -313,6 +314,16 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
/*
* Some w/a require forcewake to be held (which prevents RC6) while
* a particular engine is active. If so, we set fw_domain to which
* domains need to be held for the duration of request activity,
* and 0 if none. We try to limit the duration of the hold as much
* as possible.
*/
enum forcewake_domains fw_domain;
atomic_t fw_active;
unsigned long context_tag;
struct rb_node uabi_node;
@ -337,6 +348,7 @@ struct intel_engine_cs {
struct {
struct delayed_work work;
struct i915_request *systole;
unsigned long blocked;
} heartbeat;
unsigned long serial;

View File

@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
struct i915_vma *vma, *vn;
int open;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
mutex_lock(&ggtt->vm.mutex);
/* Skip rewriting PTE on VMA unbind. */
open = atomic_xchg(&ggtt->vm.open, 0);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
if (i915_vma_is_pinned(vma))
continue;
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
__i915_vma_evict(vma);
drm_mm_remove_node(&vma->node);
}
}
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
ggtt->invalidate(ggtt);
atomic_set(&ggtt->vm.open, open);
mutex_unlock(&ggtt->vm.mutex);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
@ -424,22 +443,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
return 0;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@ -1166,6 +1180,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
static unsigned int clear_bind(struct i915_vma *vma)
{
return atomic_fetch_and(~I915_VMA_BIND_MASK, &vma->flags);
}
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
@ -1183,14 +1202,11 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
unsigned int was_bound = clear_bind(vma);
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
PIN_GLOBAL, NULL));
was_bound, NULL));
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;

View File

@ -616,6 +616,11 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
void intel_gt_driver_release(struct intel_gt *gt)
{
struct i915_address_space *vm;
intel_wakeref_t wakeref;
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
__intel_gt_reset(gt, ALL_ENGINES);
vm = fetch_and_zero(&gt->vm);
if (vm) /* FIXME being called twice on error paths :( */

View File

@ -212,8 +212,9 @@ void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
if (cancel_delayed_work_sync(&pool->work))
do {
pool_free_imm(pool);
} while (cancel_delayed_work_sync(&pool->work));
}
void intel_gt_fini_buffer_pool(struct intel_gt *gt)

View File

@ -214,8 +214,8 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
drm_err(&gt->i915->drm,
"Failed to initialize GPU, declaring it wedged!\n");
i915_probe_error(gt->i915,
"Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}

View File

@ -446,6 +446,9 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
* we have to flip the index value to become priority.
*/
p = to_priolist(rb);
if (!I915_USER_PRIORITY_SHIFT)
return p->priority;
return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
}
@ -1377,6 +1380,8 @@ __execlists_schedule_in(struct i915_request *rq)
ce->lrc.ccid |= engine->execlists.ccid;
__intel_gt_pm_get(engine->gt);
if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active))
intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@ -1409,8 +1414,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct i915_request *next = READ_ONCE(ve->request);
if (next && next->execution_mask & ~rq->execution_mask)
tasklet_schedule(&ve->base.execlists.tasklet);
if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
static inline void
@ -1445,6 +1450,8 @@ __execlists_schedule_out(struct i915_request *rq,
intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !atomic_dec_return(&engine->fw_active))
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
intel_gt_pm_put_async(engine->gt);
/*
@ -1636,9 +1643,9 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
ccid = ce->lrc.ccid;
/*
* Sentinels are supposed to be lonely so they flush the
* current exection off the HW. Check that they are the
* only request in the pending submission.
* Sentinels are supposed to be the last request so they flush
* the current execution off the HW. Check that they are the only
* request in the pending submission.
*/
if (sentinel) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
@ -1647,15 +1654,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
port - execlists->pending);
return false;
}
sentinel = i915_request_has_sentinel(rq);
if (sentinel && port != execlists->pending) {
GEM_TRACE_ERR("%s: sentinel context:%llx not in prime position[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
@ -1967,7 +1966,7 @@ static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
if (list_is_last(&rq->sched.link, &engine->active.requests))
return INT_MIN;
return engine->execlists.queue_priority_hint;
return rq_prio(list_next_entry(rq, sched.link));
}
@ -2445,6 +2444,7 @@ done:
set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
start_timeslice(engine, execlists->queue_priority_hint);
skip_submit:
ring_set_paused(engine, 0);
}
@ -3170,13 +3170,6 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
if (reset_in_progress(execlists))
return; /* defer until we restart the engine following reset */
/* Hopefully we clear execlists->pending[] to let us through */
if (READ_ONCE(execlists->pending[0]) &&
tasklet_trylock(&execlists->tasklet)) {
process_csb(engine);
tasklet_unlock(&execlists->tasklet);
}
__execlists_submission_tasklet(engine);
}
@ -3199,11 +3192,25 @@ static bool ancestor_on_hold(const struct intel_engine_cs *engine,
return !list_empty(&engine->active.hold) && hold_request(rq);
}
static void flush_csb(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *el = &engine->execlists;
if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) {
if (!reset_in_progress(el))
process_csb(engine);
tasklet_unlock(&el->tasklet);
}
}
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
/* Hopefully we clear execlists->pending[] to let us through */
flush_csb(engine);
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
@ -3536,7 +3543,7 @@ static int emit_pdps(struct i915_request *rq)
int err, i;
u32 *cs;
GEM_BUG_ON(intel_vgpu_active(rq->i915));
GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
/*
* Beware ye of the dragons, this sequence is magic!
@ -4515,11 +4522,11 @@ static int gen8_emit_flush_render(struct i915_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
if (IS_GEN(request->i915, 9))
if (IS_GEN(request->engine->i915, 9))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0))
dc_flush_wa = true;
}
@ -5598,7 +5605,7 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_schedule(&ve->base.execlists.tasklet);
tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
spin_unlock_irqrestore(&ve->base.active.lock, flags);

View File

@ -61,7 +61,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
#define OUT_BATCH(batch, i, val) \
do { \
if ((i) >= PAGE_SIZE / sizeof(u32)) \
goto err; \
goto out; \
(batch)[(i)++] = (val); \
} while(0)
@ -70,15 +70,12 @@ static int render_state_setup(struct intel_renderstate *so,
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
unsigned int needs_clflush;
int ret = -EINVAL;
u32 *d;
int ret;
ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
if (ret)
return ret;
d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
if (IS_ERR(d))
return PTR_ERR(d);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@ -89,7 +86,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
goto err;
goto out;
d[i++] = s;
s = upper_32_bits(r);
@ -103,7 +100,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (rodata->reloc[reloc_index] != -1) {
drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
goto err;
goto out;
}
so->batch_offset = i915_ggtt_offset(so->vma);
@ -150,19 +147,11 @@ static int render_state_setup(struct intel_renderstate *so,
*/
so->aux_size = ALIGN(so->aux_size, 8);
if (needs_clflush)
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
ret = 0;
out:
i915_gem_object_finish_access(so->vma->obj);
__i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
i915_gem_object_unpin_map(so->vma->obj);
return ret;
err:
kunmap_atomic(d);
ret = -EINVAL;
goto out;
}
#undef OUT_BATCH

View File

@ -638,7 +638,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
bool intel_has_gpu_reset(const struct intel_gt *gt)
{
if (!i915_modparams.reset)
if (!gt->i915->params.reset)
return NULL;
return intel_get_gpu_reset(gt);
@ -646,7 +646,7 @@ bool intel_has_gpu_reset(const struct intel_gt *gt)
bool intel_has_reset_engine(const struct intel_gt *gt)
{
if (i915_modparams.reset < 2)
if (gt->i915->params.reset < 2)
return false;
return INTEL_INFO(gt->i915)->has_reset_engine;
@ -1038,7 +1038,7 @@ void intel_gt_reset(struct intel_gt *gt,
awake = reset_prepare(gt);
if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
if (gt->i915->params.reset)
drm_err(&gt->i915->drm, "GPU reset not supported\n");
else
drm_dbg(&gt->i915->drm, "GPU reset disabled\n");

View File

@ -27,21 +27,15 @@
*
*/
#include <linux/log2.h>
#include "gem/i915_gem_context.h"
#include "gen2_engine_cs.h"
#include "gen6_engine_cs.h"
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
#include "shmem_utils.h"
/* Rough estimate of the typical request size, performing a flush,
@ -49,436 +43,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
if (mode & EMIT_FLUSH)
num_store_dw = 4;
cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = 0;
}
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
intel_ring_advance(rq, cs);
return 0;
}
static int
gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
int i;
/*
* read/write caches:
*
* I915_GEM_DOMAIN_RENDER is always invalidated, but is
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
* also flushed at 2d versus 3d pipeline switches.
*
* read-only caches:
*
* I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
* MI_READ_FLUSH is set, and is always flushed on 965.
*
* I915_GEM_DOMAIN_COMMAND may not exist?
*
* I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
* invalidated when MI_EXE_FLUSH is set.
*
* I915_GEM_DOMAIN_VERTEX, which exists on 965, is
* invalidated with every MI_FLUSH.
*
* TLBs:
*
* On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH.
*/
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
cmd |= MI_INVALIDATE_ISP;
}
i = 2;
if (mode & EMIT_INVALIDATE)
i += 20;
cs = intel_ring_begin(rq, i);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
/*
* A random delay to let the CS invalidate take effect? Without this
* delay, the GPU relocation path fails as the CS does not see
* the updated contents. Just as important, if we apply the flushes
* to the EMIT_FLUSH branch (i.e. immediately after the relocation
* write and before the invalidate on the next batch), the relocations
* still fail. This implies that is a delay following invalidation
* that is required to reset the caches as opposed to a delay to
* ensure the memory is written.
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
for (i = 0; i < 12; i++)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
*cs++ = cmd;
intel_ring_advance(rq, cs);
return 0;
}
/*
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
*
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
* produced by non-pipelined state commands), software needs to first
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
* 0.
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
*
* And the workaround for these two requires this workaround first:
*
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
* BEFORE the pipe-control with a post-sync op and no write-cache
* flushes.
*
* And this last workaround is tricky because of the requirements on
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
* volume 2 part 1:
*
* "1 of the following must also be set:
* - Render Target Cache Flush Enable ([12] of DW1)
* - Depth Cache Flush Enable ([0] of DW1)
* - Stall at Pixel Scoreboard ([1] of DW1)
* - Depth Stall ([13] of DW1)
* - Post-Sync Operation ([13] of DW1)
* - Notify Enable ([8] of DW1)"
*
* The cache flushes require the workaround flush that triggered this
* one, so we can't use it. Depth stall would trigger the same.
* Post-sync nonzero is what triggered this second workaround, so we
* can't use that one either. Notify enable is IRQs, which aren't
* really our business. That leaves only stall at scoreboard.
*/
static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0; /* low dword */
*cs++ = 0; /* high dword */
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = gen6_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
* Ensure that any following seqno writes only happen
* when the render cache is indeed flushed.
*/
flags |= PIPE_CONTROL_CS_STALL;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = flags;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
/* First we do the gen6_emit_post_sync_nonzero_flush w/a */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_timeline(rq)->hwsp_offset |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
static int
gen7_render_ring_cs_stall_wa(struct i915_request *rq)
{
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
/*
* Ensure that any following seqno writes only happen when the render
* cache is indeed flushed.
*
* Workaround: 4th PIPE_CONTROL command (except the ones with only
* read-cache invalidate bits set) must have the CS_STALL bit set. We
* don't try to be clever and just set it unconditionally.
*/
flags |= PIPE_CONTROL_CS_STALL;
/*
* CS_STALL suggests at least a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
gen7_render_ring_cs_stall_wa(rq);
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_timeline(rq)->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#define GEN7_XCS_WA 32
static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
}
*cs++ = MI_FLUSH_DW;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#undef GEN7_XCS_WA
static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
{
/*
@ -865,32 +429,6 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
static int rcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
* image. For as it is loaded, it is executed and the stored
* address may no longer be valid, leading to a GPU hang.
*
* This imposes the requirement that userspace reload their
* CONSTANT_BUFFER on every batch, fortunately a requirement
* they are already accustomed to from before contexts were
* enabled.
*/
if (IS_GEN(i915, 4))
intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
if (IS_GEN_RANGE(i915, 6, 7))
intel_uncore_write(uncore, INSTPM,
_MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return xcs_resume(engine);
}
static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
@ -918,255 +456,6 @@ static void i9xx_submit_request(struct i915_request *request)
intel_ring_set_tail(request->ring, request->tail));
}
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#define GEN5_WA_STORES 8 /* must be at least 1! */
static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
}
*cs++ = MI_USER_INTERRUPT;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#undef GEN5_WA_STORES
static void
gen5_irq_enable(struct intel_engine_cs *engine)
{
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen5_irq_disable(struct intel_engine_cs *engine)
{
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
{
engine->i915->irq_mask &= ~engine->irq_enable_mask;
intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
static void
i9xx_irq_disable(struct intel_engine_cs *engine)
{
engine->i915->irq_mask |= engine->irq_enable_mask;
intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
}
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
i915->irq_mask &= ~engine->irq_enable_mask;
intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
ENGINE_POSTING_READ16(engine, RING_IMR);
}
static void
i8xx_irq_disable(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
i915->irq_mask |= engine->irq_enable_mask;
intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
}
static int
bsd_ring_flush(struct i915_request *rq, u32 mode)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static void
gen6_irq_enable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR,
~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
}
static int
i965_emit_bb_start(struct i915_request *rq,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
*cs++ = offset;
intel_ring_advance(rq, cs);
return 0;
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs, cs_offset =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Evict the invalid PTE TLBs */
*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
*cs++ = cs_offset;
*cs++ = 0xdeadbeef;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
cs = intel_ring_begin(rq, 6 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Blit the batch (which has now all relocs applied) to the
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
*cs++ = cs_offset;
*cs++ = 4096;
*cs++ = offset;
*cs++ = MI_FLUSH;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
/* ... and execute it. */
offset = cs_offset;
}
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
intel_ring_advance(rq, cs);
return 0;
}
static int
i915_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
MI_BATCH_NON_SECURE);
intel_ring_advance(rq, cs);
return 0;
}
static void __ring_context_fini(struct intel_context *ce)
{
i915_vma_put(ce->state);
@ -1356,8 +645,8 @@ static inline int mi_set_context(struct i915_request *rq,
struct intel_context *ce,
u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *i915 = engine->i915;
enum intel_engine_id id;
const int num_engines =
IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
@ -1471,7 +760,7 @@ static inline int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
@ -1582,7 +871,7 @@ static int switch_context(struct i915_request *rq)
void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce) {
@ -1704,99 +993,6 @@ static void gen6_bsd_submit_request(struct i915_request *request)
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)
{
u32 cmd, *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
/*
* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
cmd |= flags;
*cs++ = cmd;
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
{
return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
}
static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
{
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
}
static int
hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
intel_ring_advance(rq, cs);
return 0;
}
static int
gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
intel_ring_advance(rq, cs);
return 0;
}
/* Blitter support (SandyBridge+) */
static int gen6_ring_flush(struct i915_request *rq, u32 mode)
{
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
@ -1843,11 +1039,11 @@ static void setup_irq(struct intel_engine_cs *engine)
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
} else if (INTEL_GEN(i915) >= 3) {
engine->irq_enable = i9xx_irq_enable;
engine->irq_disable = i9xx_irq_disable;
engine->irq_enable = gen3_irq_enable;
engine->irq_disable = gen3_irq_disable;
} else {
engine->irq_enable = i8xx_irq_enable;
engine->irq_disable = i8xx_irq_disable;
engine->irq_enable = gen2_irq_enable;
engine->irq_disable = gen2_irq_disable;
}
}
@ -1874,7 +1070,7 @@ static void setup_common(struct intel_engine_cs *engine)
* equivalent to our next initial bread so we can elide
* engine->emit_init_breadcrumb().
*/
engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
if (IS_GEN(i915, 5))
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
@ -1883,11 +1079,11 @@ static void setup_common(struct intel_engine_cs *engine)
if (INTEL_GEN(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(i915) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
engine->emit_bb_start = gen4_emit_bb_start;
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
engine->emit_bb_start = gen3_emit_bb_start;
}
static void setup_rcs(struct intel_engine_cs *engine)
@ -1900,25 +1096,23 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(i915) >= 7) {
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
engine->emit_flush = gen7_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
} else if (IS_GEN(i915, 6)) {
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
engine->emit_flush = gen6_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
} else if (IS_GEN(i915, 5)) {
engine->emit_flush = gen4_render_ring_flush;
engine->emit_flush = gen4_emit_flush_rcs;
} else {
if (INTEL_GEN(i915) < 4)
engine->emit_flush = gen2_render_ring_flush;
engine->emit_flush = gen2_emit_flush;
else
engine->emit_flush = gen4_render_ring_flush;
engine->emit_flush = gen4_emit_flush_rcs;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
engine->resume = rcs_resume;
}
static void setup_vcs(struct intel_engine_cs *engine)
@ -1929,15 +1123,15 @@ static void setup_vcs(struct intel_engine_cs *engine)
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN(i915, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
engine->emit_flush = gen6_emit_flush_vcs;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
} else {
engine->emit_flush = bsd_ring_flush;
engine->emit_flush = gen4_emit_flush_vcs;
if (IS_GEN(i915, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
@ -1949,13 +1143,13 @@ static void setup_bcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
engine->emit_flush = gen6_ring_flush;
engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static void setup_vecs(struct intel_engine_cs *engine)
@ -1964,12 +1158,12 @@ static void setup_vecs(struct intel_engine_cs *engine)
GEM_BUG_ON(INTEL_GEN(i915) < 7);
engine->emit_flush = gen6_ring_flush;
engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
engine->irq_enable = hsw_vebox_irq_enable;
engine->irq_disable = hsw_vebox_irq_disable;
engine->irq_enable = hsw_irq_enable_vecs;
engine->irq_disable = hsw_irq_disable_vecs;
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,

View File

@ -51,15 +51,16 @@ static void rps_timer(struct timer_list *t)
{
struct intel_rps *rps = from_timer(rps, t, timer);
struct intel_engine_cs *engine;
ktime_t dt, last, timestamp;
enum intel_engine_id id;
s64 max_busy[3] = {};
ktime_t dt, last;
timestamp = 0;
for_each_engine(engine, rps_to_gt(rps), id) {
s64 busy;
int i;
dt = intel_engine_get_busy_time(engine);
dt = intel_engine_get_busy_time(engine, &timestamp);
last = engine->stats.rps;
engine->stats.rps = dt;
@ -69,16 +70,14 @@ static void rps_timer(struct timer_list *t)
swap(busy, max_busy[i]);
}
}
dt = ktime_get();
last = rps->pm_timestamp;
rps->pm_timestamp = dt;
rps->pm_timestamp = timestamp;
if (intel_rps_is_active(rps)) {
s64 busy;
int i;
dt = ktime_sub(dt, last);
dt = ktime_sub(timestamp, last);
/*
* Our goal is to evaluate each engine independently, so we run

View File

@ -205,6 +205,18 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@ -355,7 +367,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@ -600,11 +615,14 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_1604555607:gen12 and Wa_1608008084:gen12
* FF_MODE2 register will return the wrong value when read. The default
* value for this register is zero for all fields and there are no bit
* masks. So instead of doing a RMW we should just write the TDS timer
* value for Wa_1604555607.
* masks. So instead of doing a RMW we should just write the GS Timer
* and TDS timer values for Wa_1604555607 and Wa_16011163337.
*/
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128, 0);
wa_add(wal,
FF_MODE2,
FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
0);
/* WaDisableGPGPUMidThreadPreemption:tgl */
WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
@ -630,7 +648,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
else if (IS_COFFEELAKE(i915))
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
glk_ctx_workarounds_init(engine, wal);
@ -644,6 +662,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 7))
gen7_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 6))
gen6_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8)
return;
else
@ -917,7 +939,7 @@ static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915))
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
GAM_ECOCHK,
ECOCHK_DIS_TLB);
@ -1180,7 +1202,7 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
else if (IS_COFFEELAKE(i915))
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_gt_workarounds_init(i915, wal);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(i915, wal);
@ -1428,6 +1450,18 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void cml_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
cfl_whitelist_build(engine);
}
static void cnl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
@ -1478,9 +1512,15 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@ -1512,6 +1552,9 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg(w, HIZ_CHICKEN);
break;
default:
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
}
}
@ -1529,6 +1572,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
else if (IS_COMETLAKE(i915))
cml_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
@ -1725,6 +1770,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/* Wa_22010271021:ehl */
if (IS_ELKHARTLAKE(i915))
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN_RANGE(i915, 9, 12)) {
@ -1734,7 +1785,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
}
if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915)) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or(wal,
GEN8_GARBCNTL,
@ -1818,6 +1872,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
if (IS_GEN(i915, 4))
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
* image. For as it is loaded, it is executed and the stored
* address may no longer be valid, leading to a GPU hang.
*
* This imposes the requirement that userspace reload their
* CONSTANT_BUFFER on every batch, fortunately a requirement
* they are already accustomed to from before contexts were
* enabled.
*/
wa_add(wal, ECOSKPD,
0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
0 /* XXX bit doesn't stick on Broadwater */);
}
static void
@ -1932,7 +2001,7 @@ wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = rq->i915;
struct drm_i915_private *i915 = rq->engine->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
@ -2021,7 +2090,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wa, results[i], wal->name, from))

View File

@ -49,7 +49,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
if (INTEL_GEN(rq->i915) >= 8)
if (INTEL_GEN(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));

View File

@ -10,6 +10,7 @@
#include "intel_gt_requests.h"
#include "i915_selftest.h"
#include "selftest_engine_heartbeat.h"
static int timeline_sync(struct intel_timeline *tl)
{
@ -142,24 +143,6 @@ out:
return err;
}
static void engine_heartbeat_disable(struct intel_engine_cs *engine,
unsigned long *saved)
{
*saved = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
static void engine_heartbeat_enable(struct intel_engine_cs *engine,
unsigned long saved)
{
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms = saved;
}
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@ -170,11 +153,9 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
unsigned long heartbeat;
engine_heartbeat_disable(engine, &heartbeat);
st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
engine_heartbeat_enable(engine, heartbeat);
st_engine_heartbeat_enable(engine);
if (err)
break;
}
@ -192,11 +173,9 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
unsigned long heartbeat;
engine_heartbeat_disable(engine, &heartbeat);
st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_pulse);
engine_heartbeat_enable(engine, heartbeat);
st_engine_heartbeat_enable(engine);
if (err && err != -ENODEV)
break;
@ -386,11 +365,27 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
saved_hangcheck = i915_modparams.enable_hangcheck;
i915_modparams.enable_hangcheck = INT_MAX;
saved_hangcheck = i915->params.enable_hangcheck;
i915->params.enable_hangcheck = INT_MAX;
err = intel_gt_live_subtests(tests, &i915->gt);
i915_modparams.enable_hangcheck = saved_hangcheck;
i915->params.enable_hangcheck = saved_hangcheck;
return err;
}
void st_engine_heartbeat_disable(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
void st_engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef SELFTEST_ENGINE_HEARTBEAT_H
#define SELFTEST_ENGINE_HEARTBEAT_H
struct intel_engine_cs;
void st_engine_heartbeat_disable(struct intel_engine_cs *engine);
void st_engine_heartbeat_enable(struct intel_engine_cs *engine);
#endif /* SELFTEST_ENGINE_HEARTBEAT_H */

View File

@ -6,7 +6,107 @@
#include "i915_selftest.h"
#include "selftest_engine.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = 0;
/*
* Check that if an engine supports busy-stats, they tell the truth.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t de, dt;
ktime_t t[2];
if (!intel_engine_supports_stats(engine))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
if (intel_gt_pm_wait_for_idle(gt)) {
err = -EBUSY;
break;
}
st_engine_heartbeat_disable(engine);
ENGINE_TRACE(engine, "measuring idle time\n");
preempt_disable();
de = intel_engine_get_busy_time(engine, &t[0]);
udelay(100);
de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
preempt_enable();
dt = ktime_sub(t[1], t[0]);
if (de < 0 || de > 10) {
pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
engine->name,
de, (int)div64_u64(100 * de, dt), dt);
GEM_TRACE_DUMP();
err = -EINVAL;
goto end;
}
/* 100% busy */
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto end;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(engine->gt);
err = -ETIME;
goto end;
}
ENGINE_TRACE(engine, "measuring busy time\n");
preempt_disable();
de = intel_engine_get_busy_time(engine, &t[0]);
udelay(100);
de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
preempt_enable();
dt = ktime_sub(t[1], t[0]);
if (100 * de < 95 * dt || 95 * de > 100 * dt) {
pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
engine->name,
de, (int)div64_u64(100 * de, dt), dt);
GEM_TRACE_DUMP();
err = -EINVAL;
goto end;
}
end:
st_engine_heartbeat_enable(engine);
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
break;
}
igt_spinner_fini(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
static int live_engine_pm(void *arg)
{
@ -77,6 +177,7 @@ static int live_engine_pm(void *arg)
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};

View File

@ -5,10 +5,141 @@
* Copyright © 2019 Intel Corporation
*/
#include <linux/sort.h>
#include "intel_gt_clock_utils.h"
#include "selftest_llc.h"
#include "selftest_rc6.h"
#include "selftest_rps.h"
static int cmp_u64(const void *A, const void *B)
{
const u64 *a = A, *b = B;
if (a < b)
return -1;
else if (a > b)
return 1;
else
return 0;
}
static int cmp_u32(const void *A, const void *B)
{
const u32 *a = A, *b = B;
if (a < b)
return -1;
else if (a > b)
return 1;
else
return 0;
}
static void measure_clocks(struct intel_engine_cs *engine,
u32 *out_cycles, ktime_t *out_dt)
{
ktime_t dt[5];
u32 cycles[5];
int i;
for (i = 0; i < 5; i++) {
preempt_disable();
cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
dt[i] = ktime_get();
udelay(1000);
dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
preempt_enable();
}
/* Use the median of both cycle/dt; close enough */
sort(cycles, 5, sizeof(*cycles), cmp_u32, NULL);
*out_cycles = (cycles[1] + 2 * cycles[2] + cycles[3]) / 4;
sort(dt, 5, sizeof(*dt), cmp_u64, NULL);
*out_dt = div_u64(dt[1] + 2 * dt[2] + dt[3], 4);
}
static int live_gt_clocks(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */
pr_info("CS_TIMESTAMP frequency unknown\n");
return 0;
}
if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
if (IS_GEN(gt->i915, 5))
/*
* XXX CS_TIMESTAMP low dword is dysfunctional?
*
* Ville's experiments indicate the high dword still works,
* but at a correspondingly reduced frequency.
*/
return 0;
if (IS_GEN(gt->i915, 4))
/*
* XXX CS_TIMESTAMP appears gibberish
*
* Ville's experiments indicate that it mostly appears 'stuck'
* in that we see the register report the same cycle count
* for a couple of reads.
*/
return 0;
intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
u32 cycles;
u32 expected;
u64 time;
u64 dt;
if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0)
continue;
measure_clocks(engine, &cycles, &dt);
time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt);
pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n",
engine->name, cycles, time, dt, expected,
RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000);
if (9 * time < 8 * dt || 8 * time > 9 * dt) {
pr_err("%s: CS ticks did not match walltime!\n",
engine->name);
err = -EINVAL;
break;
}
if (9 * expected < 8 * cycles || 8 * expected > 9 * cycles) {
pr_err("%s: walltime did not match CS ticks!\n",
engine->name);
err = -EINVAL;
break;
}
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
return err;
}
static int live_gt_resume(void *arg)
{
struct intel_gt *gt = arg;
@ -52,6 +183,7 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_gt_clocks),
SUBTEST(live_rc6_manual),
SUBTEST(live_rps_clock_interval),
SUBTEST(live_rps_control),

View File

@ -29,6 +29,7 @@
#include "intel_gt.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@ -203,12 +204,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@ -217,12 +218,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(gt->i915) >= 4) {
@ -230,24 +231,24 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_ARB_CHECK;
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
}
@ -310,22 +311,6 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
}
static int igt_hang_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@ -482,7 +467,7 @@ static int igt_reset_nop_engine(void *arg)
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@ -499,6 +484,20 @@ static int igt_reset_nop_engine(void *arg)
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
intel_engine_dump(engine, &p,
"%s(%s): failed to submit request\n",
__func__,
engine->name);
GEM_TRACE("%s(%s): failed to submit request\n",
__func__,
engine->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = PTR_ERR(rq);
break;
}
@ -526,7 +525,7 @@ static int igt_reset_nop_engine(void *arg)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@ -576,7 +575,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
@ -628,7 +627,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (err)
break;
@ -805,10 +804,10 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].resets =
i915_reset_engine_count(global, other);
if (!(flags & TEST_OTHERS))
if (other == engine && !(flags & TEST_SELF))
continue;
if (other == engine && !(flags & TEST_SELF))
if (other != engine && !(flags & TEST_OTHERS))
continue;
threads[tmp].engine = other;
@ -827,7 +826,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
@ -866,13 +865,29 @@ static int __igt_reset_engines(struct intel_gt *gt,
count++;
if (rq) {
if (rq->fence.error != -EIO) {
pr_err("i915_reset_engine(%s:%s):"
" failed to reset request %llx:%lld\n",
engine->name, test_name,
rq->fence.context,
rq->fence.seqno);
i915_request_put(rq);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to complete request after reset\n",
engine->name, test_name);
" failed to complete request %llx:%lld after reset\n",
engine->name, test_name,
rq->fence.context,
rq->fence.seqno);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
@ -901,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@ -983,7 +998,7 @@ static int igt_reset_engines(void *arg)
},
{
"self-priority",
TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
},
{ }
};

File diff suppressed because it is too large Load Diff

View File

@ -157,7 +157,7 @@ static int read_mocs_table(struct i915_request *rq,
{
u32 addr;
if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
addr = global_mocs_offset();
else
addr = mocs_offset(rq->engine);

View File

@ -132,7 +132,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
if (INTEL_GEN(rq->i915) >= 8)
if (INTEL_GEN(rq->engine->i915) >= 8)
cmd++;
*cs++ = cmd;
@ -197,10 +197,10 @@ int live_rc6_ctx_wa(void *arg)
int pass;
for (pass = 0; pass < 2; pass++) {
struct i915_gpu_error *error = &gt->i915->gpu_error;
struct intel_context *ce;
unsigned int resets =
i915_reset_engine_count(&gt->i915->gpu_error,
engine);
i915_reset_engine_count(error, engine);
const u32 *res;
/* Use a sacrifical context */
@ -230,8 +230,7 @@ int live_rc6_ctx_wa(void *arg)
engine->name, READ_ONCE(*res));
if (resets !=
i915_reset_engine_count(&gt->i915->gpu_error,
engine)) {
i915_reset_engine_count(error, engine)) {
pr_err("%s: GPU reset required\n",
engine->name);
add_taint_for_CI(TAINT_WARN);

View File

@ -12,6 +12,7 @@
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
#include "selftest_engine_heartbeat.h"
#include "selftest_rps.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
@ -20,22 +21,6 @@
/* Try to isolate the impact of cstates from determing frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
}
static void dummy_rps_work(struct work_struct *wrk)
{
}
@ -249,13 +234,13 @@ int live_rps_clock_interval(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@ -266,7 +251,7 @@ int live_rps_clock_interval(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@ -322,7 +307,7 @@ int live_rps_clock_interval(void *arg)
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (err == 0) {
u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
@ -408,7 +393,7 @@ int live_rps_control(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
@ -424,7 +409,7 @@ int live_rps_control(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@ -434,7 +419,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not set minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@ -451,7 +436,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@ -466,7 +451,7 @@ int live_rps_control(void *arg)
min_dt = ktime_sub(ktime_get(), min_dt);
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
engine->name,
@ -637,14 +622,14 @@ int live_rps_frequency_cs(void *arg)
int freq;
} min, max;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, false,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
break;
}
@ -725,7 +710,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@ -779,14 +764,14 @@ int live_rps_frequency_srm(void *arg)
int freq;
} min, max;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, true,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
break;
}
@ -866,7 +851,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@ -1061,11 +1046,11 @@ int live_rps_interrupt(void *arg)
intel_gt_pm_wait_for_idle(engine->gt);
GEM_BUG_ON(intel_rps_is_active(rps));
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
err = __rps_up_interrupt(rps, engine, &spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (err)
goto out;
@ -1074,13 +1059,13 @@ int live_rps_interrupt(void *arg)
/* Keep the engine awake but idle and check for DOWN */
if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
intel_rc6_disable(&gt->rc6);
err = __rps_down_interrupt(rps, engine);
intel_rc6_enable(&gt->rc6);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (err)
goto out;
}
@ -1165,13 +1150,13 @@ int live_rps_power(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@ -1182,7 +1167,7 @@ int live_rps_power(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@ -1195,7 +1180,7 @@ int live_rps_power(void *arg)
min.power = measure_power_at(rps, &min.freq);
igt_spinner_end(&spin);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
engine->name,
@ -1252,6 +1237,11 @@ int live_rps_dynamic(void *arg)
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
if (intel_rps_has_interrupts(rps))
pr_info("RPS has interrupt support\n");
if (intel_rps_uses_timer(rps))
pr_info("RPS has timer support\n");
for_each_engine(engine, gt, id) {
struct i915_request *rq;
struct {

View File

@ -12,6 +12,7 @@
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_engine_heartbeat.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
@ -426,12 +427,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
if (IS_ERR(cs))
return PTR_ERR(cs);
if (INTEL_GEN(rq->i915) >= 8) {
if (INTEL_GEN(rq->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
} else if (INTEL_GEN(rq->i915) >= 4) {
} else if (INTEL_GEN(rq->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;
@ -751,22 +752,6 @@ out_free:
return err;
}
static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
}
static int live_hwsp_rollover_kernel(void *arg)
{
struct intel_gt *gt = arg;
@ -785,7 +770,7 @@ static int live_hwsp_rollover_kernel(void *arg)
struct i915_request *rq[3] = {};
int i;
engine_heartbeat_disable(engine);
st_engine_heartbeat_disable(engine);
if (intel_gt_wait_for_idle(gt, HZ / 2)) {
err = -EIO;
goto out;
@ -836,7 +821,7 @@ static int live_hwsp_rollover_kernel(void *arg)
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
engine_heartbeat_enable(engine);
st_engine_heartbeat_enable(engine);
if (err)
break;
}

View File

@ -417,6 +417,20 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
return false;
}
static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
{
reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
switch (reg) {
case 0x358:
case 0x35c:
case 0x3a8:
return true;
default:
return false;
}
}
static bool ro_register(u32 reg)
{
if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
@ -497,6 +511,9 @@ static int check_dirty_whitelist(struct intel_context *ce)
if (wo_register(engine, reg))
continue;
if (timestamp(engine, reg))
continue; /* timestamps are expected to autoincrement */
ro_reg = ro_register(reg);
/* Clear non priv flags */

View File

@ -0,0 +1,46 @@
ASM sources for auto generated shaders
======================================
The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
pre-compiled batch chunks that will clear any residual render cache during
context switch.
They are generated from their respective platform ASM files present on
i915/gt/shaders/clear_kernel directory.
The generated .c files should never be modified directly. Instead, any modification
needs to be done on the on their respective ASM files and build instructions below
needes to be followed.
Building
========
Environment
-----------
IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
on building.
Please make sure your Mesa tool is compiled with "-Dtools=intel" and
"-Ddri-drivers=i965", and run this script from IGT source root directory"
The instructions bellow assume:
* IGT gpu tools source code is located on your home directory (~) as ~/igt
* Mesa source code is located on your home directory (~) as ~/mesa
and built under the ~/mesa/build directory
* Linux kernel source code is under your home directory (~) as ~/linux
Instructions
------------
~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
~/igt/lib/i915/shaders/clear_kernel/ivb.asm
~ $ cd ~/igt
igt $ ./scripts/generate_clear_kernel.sh -g ivb \
-m ~/mesa/build/src/intel/tools/i965_asm
~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
~/igt/lib/i915/shaders/clear_kernel/hsw.asm
~ $ cd ~/igt
igt $ ./scripts/generate_clear_kernel.sh -g hsw \
-m ~/mesa/build/src/intel/tools/i965_asm

View File

@ -0,0 +1,119 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
/*
* Kernel for PAVP buffer clear.
*
* 1. Clear all 64 GRF registers assigned to the kernel with designated value;
* 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
* 512 bytes of Render Cache.
*/
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/**
* Curbe Format
*
* DW 1.0 - Block Offset to write Render Cache
* DW 1.1 [15:0] - Clear Word
* DW 1.2 - Delay iterations
* DW 1.3 - Enable Instrumentation (only for debug)
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
* DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
*
* Binding Table
*
* BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
* BTI 1: Wait/Instrumentation Buffer
* Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
* Expected to be initialized to 0 by driver/another kernel
* Layout:
* RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
* Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
*/
add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
/**
* State Register has info on where this thread is running
* IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
* HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
*/
mov(8) g3<1>UD 0x00000000UD { align1 1Q };
shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
mov(8) g5<1>UD 0x00000000UD { align1 1Q };
and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
/* Media block read to fetch current value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
/* Media block write for updated value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
/* Delay thread for specified parameter */
add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/* Initialize looping parameters */
mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
/* Write 32x16 all "0" block */
mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
mov(16) g3<1>UD 0x00000000UD { align1 1H };
mov(16) g4<1>UD 0x00000000UD { align1 1H };
mov(16) g5<1>UD 0x00000000UD { align1 1H };
mov(16) g6<1>UD 0x00000000UD { align1 1H };
mov(16) g7<1>UD 0x00000000UD { align1 1H };
mov(16) g8<1>UD 0x00000000UD { align1 1H };
mov(16) g9<1>UD 0x00000000UD { align1 1H };
mov(16) g10<1>UD 0x00000000UD { align1 1H };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
/* Now, clear all GRF registers */
add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
/* Terminante the thread */
sendc(8) null<1>UD g127<8,8,1>F 0x82000010
thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };

View File

@ -0,0 +1,117 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
/*
* Kernel for PAVP buffer clear.
*
* 1. Clear all 64 GRF registers assigned to the kernel with designated value;
* 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
* 512 bytes of Render Cache.
*/
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/**
* Curbe Format
*
* DW 1.0 - Block Offset to write Render Cache
* DW 1.1 [15:0] - Clear Word
* DW 1.2 - Delay iterations
* DW 1.3 - Enable Instrumentation (only for debug)
* DW 1.4 - Rsvd (intended for context ID)
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
* DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
*
* Binding Table
*
* BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
* BTI 1: Wait/Instrumentation Buffer
* Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
* Expected to be initialized to 0 by driver/another kernel
* Layout :
* RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
* Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
*/
add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
/**
* State Register has info on where this thread is running
* IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
* HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
*/
mov(8) g3<1>UD 0x00000000UD { align1 1Q };
shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
mov(8) g5<1>UD 0x00000000UD { align1 1Q };
and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
/* Media block read to fetch current value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
/* Media block write for updated value at specified location in instrumentation buffer */
sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
/* Delay thread for specified parameter */
add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
/* Store designated "clear GRF" value */
mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
/* Initialize looping parameters */
mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
/* Write 32x16 all "0" block */
mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
mov(16) g3<1>UD 0x00000000UD { align1 1H };
mov(16) g4<1>UD 0x00000000UD { align1 1H };
mov(16) g5<1>UD 0x00000000UD { align1 1H };
mov(16) g6<1>UD 0x00000000UD { align1 1H };
mov(16) g7<1>UD 0x00000000UD { align1 1H };
mov(16) g8<1>UD 0x00000000UD { align1 1H };
mov(16) g9<1>UD 0x00000000UD { align1 1H };
mov(16) g10<1>UD 0x00000000UD { align1 1H };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
/* Now, clear all GRF registers */
add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
/* Terminante the thread */
sendc(8) null<1>UD g127<8,8,1>F 0x82000010
thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };

Some files were not shown because too many files have changed in this diff Show More